1 // Copyright (C) 2003 Dolphin Project / 2012 PPSSPP Project
3 // This program is free software: you can redistribute it and/or modify
4 // it under the terms of the GNU General Public License as published by
5 // the Free Software Foundation, version 2.0 or later versions.
7 // This program is distributed in the hope that it will be useful,
8 // but WITHOUT ANY WARRANTY; without even the implied warranty of
9 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 // GNU General Public License 2.0 for more details.
12 // A copy of the GPL 2.0 should have been included with the program.
13 // If not, see http://www.gnu.org/licenses/
15 // Official git repository and contact information can be found at
16 // https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
20 #include "base/mutex.h"
21 #include "Common/Common.h"
22 #include "Common/MemoryUtil.h"
24 #include "Common/MemArena.h"
26 #include "Common/ChunkFile.h"
28 #include "Core/MemMap.h"
29 #include "Core/HDRemaster.h"
30 #include "Core/MIPS/MIPS.h"
31 #include "Core/HLE/HLE.h"
33 #include "Core/Core.h"
34 #include "Core/Debugger/SymbolMap.h"
35 #include "Core/Debugger/Breakpoints.h"
36 #include "Core/Config.h"
37 #include "Core/HLE/ReplaceTables.h"
41 // The base pointer to the auto-mirrored arena.
52 // 64-bit: Pointers to low-mem (sub-0x10000000) mirror
53 // 32-bit: Same as the corresponding physical/virtual pointers.
60 u8
*m_pPhysicalScratchPad
;
61 u8
*m_pUncachedScratchPad
;
62 // 64-bit: Pointers to high-mem mirrors
63 // 32-bit: Same as above
66 u8
*m_pKernelRAM
; // RAM mirrored up to "kernel space". Fully accessible at all times currently.
74 // VRAM is mirrored 4 times. The second and fourth mirrors are swizzled.
75 // In practice, a game accessing the mirrors most likely is deswizzling the depth buffer.
85 // Holds the ending address of the PSP's user space.
86 // Required for HD Remasters to work properly.
87 // This replaces RAM_NORMAL_SIZE at runtime.
89 // Used to store the PSP model on game startup.
92 recursive_mutex g_shutdownLock
;
94 // We don't declare the IO region in here since its handled by other means.
95 static MemoryView views
[] =
97 {&m_pScratchPad
, &m_pPhysicalScratchPad
, 0x00010000, SCRATCHPAD_SIZE
, 0},
98 {NULL
, &m_pUncachedScratchPad
, 0x40010000, SCRATCHPAD_SIZE
, MV_MIRROR_PREVIOUS
},
99 {&m_pVRAM
, &m_pPhysicalVRAM1
, 0x04000000, 0x00200000, 0},
100 {NULL
, &m_pPhysicalVRAM2
, 0x04200000, 0x00200000, MV_MIRROR_PREVIOUS
},
101 {NULL
, &m_pPhysicalVRAM3
, 0x04400000, 0x00200000, MV_MIRROR_PREVIOUS
},
102 {NULL
, &m_pPhysicalVRAM4
, 0x04600000, 0x00200000, MV_MIRROR_PREVIOUS
},
103 {NULL
, &m_pUncachedVRAM1
, 0x44000000, 0x00200000, MV_MIRROR_PREVIOUS
},
104 {NULL
, &m_pUncachedVRAM2
, 0x44200000, 0x00200000, MV_MIRROR_PREVIOUS
},
105 {NULL
, &m_pUncachedVRAM3
, 0x44400000, 0x00200000, MV_MIRROR_PREVIOUS
},
106 {NULL
, &m_pUncachedVRAM4
, 0x44600000, 0x00200000, MV_MIRROR_PREVIOUS
},
107 {&m_pRAM
, &m_pPhysicalRAM
, 0x08000000, g_MemorySize
, MV_IS_PRIMARY_RAM
}, // only from 0x08800000 is it usable (last 24 megs)
108 {NULL
, &m_pUncachedRAM
, 0x48000000, g_MemorySize
, MV_MIRROR_PREVIOUS
| MV_IS_PRIMARY_RAM
},
109 {NULL
, &m_pKernelRAM
, 0x88000000, g_MemorySize
, MV_MIRROR_PREVIOUS
| MV_IS_PRIMARY_RAM
},
110 // Starts at memory + 31 MB.
111 {&m_pRAM2
, &m_pPhysicalRAM2
, 0x09F00000, g_MemorySize
, MV_IS_EXTRA1_RAM
},
112 {NULL
, &m_pUncachedRAM2
, 0x49F00000, g_MemorySize
, MV_MIRROR_PREVIOUS
| MV_IS_EXTRA1_RAM
},
113 {NULL
, &m_pKernelRAM2
, 0x89F00000, g_MemorySize
, MV_MIRROR_PREVIOUS
| MV_IS_EXTRA1_RAM
},
114 // Starts at memory + 31 * 2 MB.
115 {&m_pRAM3
, &m_pPhysicalRAM3
, 0x0BE00000, g_MemorySize
, MV_IS_EXTRA2_RAM
},
116 {NULL
, &m_pUncachedRAM3
, 0x4BE00000, g_MemorySize
, MV_MIRROR_PREVIOUS
| MV_IS_EXTRA2_RAM
},
117 {NULL
, &m_pKernelRAM3
, 0x8BE00000, g_MemorySize
, MV_MIRROR_PREVIOUS
| MV_IS_EXTRA2_RAM
},
119 // TODO: There are a few swizzled mirrors of VRAM, not sure about the best way to
123 static const int num_views
= sizeof(views
) / sizeof(MemoryView
);
125 inline static bool CanIgnoreView(const MemoryView
&view
) {
127 // Basically, 32-bit platforms can ignore views that are masked out anyway.
128 return (view
.flags
& MV_MIRROR_PREVIOUS
) && (view
.virtual_address
& ~MEMVIEW32_MASK
) != 0;
134 // yeah, this could also be done in like two bitwise ops...
135 #define SKIP(a_flags, b_flags)
136 // if (!(a_flags & MV_WII_ONLY) && (b_flags & MV_WII_ONLY))
138 // if (!(a_flags & MV_FAKE_VMEM) && (b_flags & MV_FAKE_VMEM))
141 static bool Memory_TryBase(u32 flags
) {
142 // OK, we know where to find free space. Now grab it!
143 // We just mimic the popular BAT setup.
147 #elif !defined(__SYMBIAN32__)
149 size_t last_position
= 0;
152 // Zero all the pointers to be sure.
153 for (int i
= 0; i
< num_views
; i
++)
155 if (views
[i
].out_ptr_low
)
156 *views
[i
].out_ptr_low
= 0;
157 if (views
[i
].out_ptr
)
158 *views
[i
].out_ptr
= 0;
162 for (i
= 0; i
< num_views
; i
++)
164 const MemoryView
&view
= views
[i
];
167 SKIP(flags
, view
.flags
);
170 if (!CanIgnoreView(view
)) {
171 memmap
->Commit(view
.virtual_address
& MEMVIEW32_MASK
, view
.size
);
173 *(view
.out_ptr
) = (u8
*)base
+ (view
.virtual_address
& MEMVIEW32_MASK
);
175 if (!CanIgnoreView(view
)) {
176 *(view
.out_ptr_low
) = (u8
*)(base
+ view
.virtual_address
);
177 ptr
= VirtualAlloc(base
+ (view
.virtual_address
& MEMVIEW32_MASK
), view
.size
, MEM_COMMIT
, PAGE_READWRITE
);
179 *(view
.out_ptr
) = (u8
*)base
+ (view
.virtual_address
& MEMVIEW32_MASK
);
181 if (view
.flags
& MV_MIRROR_PREVIOUS
) {
182 position
= last_position
;
184 *(view
.out_ptr_low
) = (u8
*)g_arena
.CreateView(position
, view
.size
);
185 if (!*view
.out_ptr_low
)
188 #if defined(_ARCH_64)
189 *view
.out_ptr
= (u8
*)g_arena
.CreateView(
190 position
, view
.size
, base
+ view
.virtual_address
);
192 if (CanIgnoreView(view
)) {
193 // No need to create multiple identical views.
194 *view
.out_ptr
= *views
[i
- 1].out_ptr
;
196 *view
.out_ptr
= (u8
*)g_arena
.CreateView(
197 position
, view
.size
, base
+ (view
.virtual_address
& MEMVIEW32_MASK
));
202 last_position
= position
;
203 position
+= g_arena
.roundup(view
.size
);
209 #if !defined(__SYMBIAN32__)
211 // Argh! ERROR! Free what we grabbed so far so we can try again.
212 for (int j
= 0; j
<= i
; j
++)
214 if (views
[i
].size
== 0)
216 SKIP(flags
, views
[i
].flags
);
217 if (views
[j
].out_ptr_low
&& *views
[j
].out_ptr_low
)
219 g_arena
.ReleaseView(*views
[j
].out_ptr_low
, views
[j
].size
);
220 *views
[j
].out_ptr_low
= NULL
;
222 if (*views
[j
].out_ptr
)
224 if (!CanIgnoreView(views
[j
])) {
225 g_arena
.ReleaseView(*views
[j
].out_ptr
, views
[j
].size
);
227 *views
[j
].out_ptr
= NULL
;
234 void MemoryMap_Setup(u32 flags
)
236 // Find a base to reserve 256MB
238 base
= (u8
*)VirtualAlloc(0, 0x10000000, MEM_RESERVE
|MEM_LARGE_PAGES
, PAGE_READWRITE
);
239 #elif defined(__SYMBIAN32__)
240 memmap
= new RChunk();
241 memmap
->CreateDisconnectedLocal(0 , 0, 0x10000000);
242 base
= memmap
->Base();
244 size_t total_mem
= 0;
246 for (int i
= 0; i
< num_views
; i
++)
248 if (views
[i
].size
== 0)
250 SKIP(flags
, views
[i
].flags
);
251 if (!CanIgnoreView(views
[i
]))
252 total_mem
+= g_arena
.roundup(views
[i
].size
);
254 // Grab some pagefile backed memory out of the void ...
255 g_arena
.GrabLowMemSpace(total_mem
);
256 // 32-bit Windows retrieves base a different way
257 #if defined(_M_X64) || !defined(_WIN32)
258 // This really shouldn't fail - in 64-bit, there will always be enough address space.
259 // Linux32 is fine with the x64 method, although limited to 32-bit with no automirrors.
260 base
= MemArena::Find4GBBase();
265 // Now, create views in high memory where there's plenty of space.
266 #if defined(_WIN32) && !defined(_M_X64)
267 // Try a whole range of possible bases. Return once we got a valid one.
268 int base_attempts
= 0;
269 u32 max_base_addr
= 0x7FFF0000 - 0x10000000;
271 for (u32 base_addr
= 0x01000000; base_addr
< max_base_addr
; base_addr
+= 0x400000)
274 base
= (u8
*)base_addr
;
275 if (Memory_TryBase(flags
))
277 INFO_LOG(MEMMAP
, "Found valid memory base at %p after %i tries.", base
, base_attempts
);
284 PanicAlert("No possible memory base pointer found!");
286 // Try base we retrieved earlier
287 if (!Memory_TryBase(flags
))
289 ERROR_LOG(MEMMAP
, "MemoryMap_Setup: Failed finding a memory base.");
290 PanicAlert("MemoryMap_Setup: Failed finding a memory base.");
296 void MemoryMap_Shutdown(u32 flags
)
299 memmap
->Decommit(0, memmap
->MaxSize());
303 for (int i
= 0; i
< num_views
; i
++)
305 if (views
[i
].size
== 0)
307 SKIP(flags
, views
[i
].flags
);
308 if (views
[i
].out_ptr_low
&& *views
[i
].out_ptr_low
)
309 g_arena
.ReleaseView(*views
[i
].out_ptr_low
, views
[i
].size
);
310 if (*views
[i
].out_ptr
&& (!views
[i
].out_ptr_low
|| *views
[i
].out_ptr
!= *views
[i
].out_ptr_low
))
311 g_arena
.ReleaseView(*views
[i
].out_ptr
, views
[i
].size
);
312 *views
[i
].out_ptr
= NULL
;
313 if (views
[i
].out_ptr_low
)
314 *views
[i
].out_ptr_low
= NULL
;
316 g_arena
.ReleaseSpace();
324 // On some 32 bit platforms, you can only map < 32 megs at a time.
325 const static int MAX_MMAP_SIZE
= 31 * 1024 * 1024;
326 _dbg_assert_msg_(MEMMAP
, g_MemorySize
< MAX_MMAP_SIZE
* 3, "ACK - too much memory for three mmap views.");
327 for (size_t i
= 0; i
< ARRAY_SIZE(views
); i
++) {
328 if (views
[i
].flags
& MV_IS_PRIMARY_RAM
)
329 views
[i
].size
= std::min((int)g_MemorySize
, MAX_MMAP_SIZE
);
330 if (views
[i
].flags
& MV_IS_EXTRA1_RAM
)
331 views
[i
].size
= std::min(std::max((int)g_MemorySize
- MAX_MMAP_SIZE
, 0), MAX_MMAP_SIZE
);
332 if (views
[i
].flags
& MV_IS_EXTRA2_RAM
)
333 views
[i
].size
= std::min(std::max((int)g_MemorySize
- MAX_MMAP_SIZE
* 2, 0), MAX_MMAP_SIZE
);
335 MemoryMap_Setup(flags
);
337 INFO_LOG(MEMMAP
, "Memory system initialized. RAM at %p (mirror at 0 @ %p, uncached @ %p)",
338 m_pRAM
, m_pPhysicalRAM
, m_pUncachedRAM
);
341 void DoState(PointerWrap
&p
)
343 auto s
= p
.Section("Memory", 1, 2);
349 g_MemorySize
= RAM_NORMAL_SIZE
;
350 g_PSPModel
= PSP_MODEL_FAT
;
352 u32 oldMemorySize
= g_MemorySize
;
354 p
.DoMarker("PSPModel");
355 if (!g_RemasterMode
) {
356 g_MemorySize
= g_PSPModel
== PSP_MODEL_FAT
? RAM_NORMAL_SIZE
: RAM_DOUBLE_SIZE
;
357 if (oldMemorySize
< g_MemorySize
) {
364 p
.DoArray(GetPointer(PSP_GetKernelMemoryBase()), g_MemorySize
);
367 p
.DoArray(m_pVRAM
, VRAM_SIZE
);
369 p
.DoArray(m_pScratchPad
, SCRATCHPAD_SIZE
);
370 p
.DoMarker("ScratchPad");
375 lock_guard
guard(g_shutdownLock
);
378 MemoryMap_Shutdown(flags
);
380 DEBUG_LOG(MEMMAP
, "Memory system shut down.");
386 memset(GetPointerUnchecked(PSP_GetKernelMemoryBase()), 0, g_MemorySize
);
388 memset(m_pScratchPad
, 0, SCRATCHPAD_SIZE
);
390 memset(m_pVRAM
, 0, VRAM_SIZE
);
393 // Wanting to avoid include pollution, MemMap.h is included a lot.
394 MemoryInitedLock::MemoryInitedLock()
396 g_shutdownLock
.lock();
398 MemoryInitedLock::~MemoryInitedLock()
400 g_shutdownLock
.unlock();
403 MemoryInitedLock
Lock()
405 return MemoryInitedLock();
408 __forceinline
static Opcode
Read_Instruction(u32 address
, bool resolveReplacements
, Opcode inst
)
410 if (!MIPS_IS_EMUHACK(inst
.encoding
)) {
414 if (MIPS_IS_RUNBLOCK(inst
.encoding
) && MIPSComp::jit
) {
415 JitBlockCache
*bc
= MIPSComp::jit
->GetBlockCache();
416 int block_num
= bc
->GetBlockNumberFromEmuHackOp(inst
, true);
417 if (block_num
>= 0) {
418 inst
= bc
->GetOriginalFirstOp(block_num
);
419 if (resolveReplacements
&& MIPS_IS_REPLACEMENT(inst
)) {
421 if (GetReplacedOpAt(address
, &op
)) {
422 if (MIPS_IS_EMUHACK(op
)) {
423 ERROR_LOG(HLE
,"WTF 1");
429 ERROR_LOG(HLE
, "Replacement, but no replacement op? %08x", inst
.encoding
);
436 } else if (resolveReplacements
&& MIPS_IS_REPLACEMENT(inst
.encoding
)) {
438 if (GetReplacedOpAt(address
, &op
)) {
439 if (MIPS_IS_EMUHACK(op
)) {
440 ERROR_LOG(HLE
,"WTF 2");
453 Opcode
Read_Instruction(u32 address
, bool resolveReplacements
)
455 Opcode inst
= Opcode(Read_U32(address
));
456 return Read_Instruction(address
, resolveReplacements
, inst
);
459 Opcode
ReadUnchecked_Instruction(u32 address
, bool resolveReplacements
)
461 Opcode inst
= Opcode(ReadUnchecked_U32(address
));
462 return Read_Instruction(address
, resolveReplacements
, inst
);
465 Opcode
Read_Opcode_JIT(u32 address
)
467 Opcode inst
= Opcode(Read_U32(address
));
468 if (MIPS_IS_RUNBLOCK(inst
.encoding
) && MIPSComp::jit
) {
469 JitBlockCache
*bc
= MIPSComp::jit
->GetBlockCache();
470 int block_num
= bc
->GetBlockNumberFromEmuHackOp(inst
, true);
471 if (block_num
>= 0) {
472 return bc
->GetOriginalFirstOp(block_num
);
481 // WARNING! No checks!
482 // We assume that _Address is cached
483 void Write_Opcode_JIT(const u32 _Address
, const Opcode
& _Value
)
485 Memory::WriteUnchecked_U32(_Value
.encoding
, _Address
);
488 void Memset(const u32 _Address
, const u8 _iValue
, const u32 _iLength
)
490 u8
*ptr
= GetPointer(_Address
);
492 memset(ptr
, _iValue
, _iLength
);
496 for (size_t i
= 0; i
< _iLength
; i
++)
497 Write_U8(_iValue
, (u32
)(_Address
+ i
));
499 #ifndef MOBILE_DEVICE
500 CBreakPoints::ExecMemCheck(_Address
, true, _iLength
, currentMIPS
->pc
);