tools/adflib: build only host variant which is used by Sam440 target
[AROS.git] / rom / exec / mungwall.c
blobb18d09bc04ee8d057e46d59e484e95054e8341dc
1 /*
2 Copyright © 1995-2015, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: MungWall memory anti-trashing checker
6 Lang: english
7 */
9 #include <exec/alerts.h>
10 #include <exec/rawfmt.h>
11 #include <proto/exec.h>
13 #include "etask.h"
14 #include "exec_intern.h"
15 #include "exec_util.h"
16 #include "memory.h"
17 #include "mungwall.h"
19 #define DSCAN(x)
22 * Build a wall around the allocated chunk.
23 * Returns updated pointer to the beginning of the chunk (actually a pointer to a usable area)
25 * 'res' is a pointer to the beginning of a raw memory block (inside which walls will be constructed)
26 * 'origSize' is ORIGINAL allocation size (before adding mungwall size)
28 APTR MungWall_Build(APTR res, APTR pool, IPTR origSize, ULONG requirements, struct TraceLocation *loc, struct ExecBase *SysBase)
30 if ((PrivExecBase(SysBase)->IntFlags & EXECF_MungWall) && res)
32 struct MungwallHeader *header = res;
34 D(bug("[MungWall] Allocated %u bytes at 0x%p\n", origSize, res + MUNGWALL_BLOCK_SHIFT);)
37 * Build pre-wall starting from header, and up to MUNGWALL_BLOCK_SHIFT.
38 * This will fill also header padding. Will help in detecting issues.
40 memset(res, 0xDB, MUNGWALL_BLOCK_SHIFT);
43 * Now save allocation info in the header (there is one room of MUNGWALLHEADER_SIZE
44 * bytes before wall for such stuff (see above).
46 header->mwh_magicid = MUNGWALL_HEADER_ID;
47 header->mwh_fault = FALSE;
48 header->mwh_allocsize = origSize;
49 header->mwh_pool = pool;
50 header->mwh_AllocFunc = loc->function;
51 header->mwh_Owner = GET_THIS_TASK;
52 header->mwh_Caller = loc->caller;
54 /* Skip to the start of data space */
55 res += MUNGWALL_BLOCK_SHIFT;
57 /* Fill the block with weird stuff to exploit bugs in applications */
58 if (!(requirements & MEMF_CLEAR))
59 MUNGE_BLOCK(res, MEMFILL_ALLOC, origSize);
62 * Initialize post-wall.
63 * Fill only MUNGWALL_SIZE bytes, this is what we are guaranteed to have in the end.
64 * We can't assume anything about padding. AllocMem() does guarantee that the actual
65 * allocation start and size is a multiple of MEMCHUNK_TOTAL, but for example
66 * AllocPooled() doesn't. Pooled allocations are only IPTR-aligned (see InternalFreePooled(),
67 * in AROS pooled allocations are vectored, pool pointer is stored in an IPTR preceding the
68 * actual block in AllocVec()-alike manner).
69 * IPTR alignment is 100% correct since original AmigaOS(tm) autodocs say that even AllocMem()
70 * result is longword-aligned. AROS introduces additional assumption that AllocMem() result
71 * is aligned at least up to AROS_WORSTALIGN (CPU-specific value, see exec/memory.h), however
72 * this is still not true for other allocation functions (AllocVec() etc).
74 memset(res + origSize, 0xDB, MUNGWALL_SIZE);
76 Forbid();
77 AddHead((struct List *)&PrivExecBase(SysBase)->AllocMemList, (struct Node *)&header->mwh_node);
78 Permit();
80 return res;
83 char *FormatMWContext(char *buffer, struct MungwallContext *ctx, struct ExecBase *SysBase)
85 buffer = NewRawDoFmt("In %s, block at 0x%p, size %lu", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->freeFunc, (APTR)ctx->hdr + MUNGWALL_BLOCK_SHIFT, ctx->hdr->mwh_allocsize) - 1;
86 buffer = NewRawDoFmt("\nAllocated using %s ", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->hdr->mwh_AllocFunc) - 1;
87 buffer = FormatTask(buffer, "by task 0x%p (%s)", ctx->hdr->mwh_Owner, SysBase);
88 buffer = FormatLocation(buffer, " at 0x%p", ctx->hdr->mwh_Caller, SysBase);
90 if (ctx->hdr->mwh_magicid != MUNGWALL_HEADER_ID)
91 buffer = NewRawDoFmt("\nMUNGWALL_HEADER_ID mismatch", (VOID_FUNC)RAWFMTFUNC_STRING, buffer) - 1;
93 if (ctx->freeSize)
94 buffer = NewRawDoFmt("\nFreeMem size %lu mismatch", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->freeSize) - 1;
96 if (ctx->pre_start)
97 buffer = NewRawDoFmt("\nPre-wall broken at 0x%p - 0x%p", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->pre_start, ctx->pre_end) - 1;
99 if (ctx->post_start)
100 buffer = NewRawDoFmt("\nPost-wall broken at 0x%p - 0x%p", (VOID_FUNC)RAWFMTFUNC_STRING, buffer, ctx->post_start, ctx->post_end) - 1;
102 return buffer;
105 static APTR CheckWall(UBYTE *ptr, UBYTE fill, IPTR size, APTR *endptr)
107 APTR start = NULL;
108 APTR end = NULL;
110 while (size--)
112 if (*ptr != fill)
114 if (!start)
115 start = ptr;
117 end = ptr;
120 ptr++;
123 *endptr = end;
124 return start;
127 static void CheckHeader(struct MungwallHeader *header, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
129 struct MungwallContext mwdata;
131 /* Do not report the fault twice on the same header */
132 if (header->mwh_fault)
133 return;
135 if (header->mwh_magicid != MUNGWALL_HEADER_ID)
136 header->mwh_fault = TRUE;
138 if (byteSize && (header->mwh_allocsize != byteSize))
140 mwdata.freeSize = byteSize;
141 header->mwh_fault = TRUE;
143 else
144 mwdata.freeSize = 0;
146 mwdata.pre_start = CheckWall((UBYTE *)header + MUNGWALLHEADER_SIZE, 0xDB, MUNGWALL_SIZE, &mwdata.pre_end);
147 if (mwdata.pre_start)
148 header->mwh_fault = TRUE;
150 mwdata.post_start = CheckWall((UBYTE *)header + MUNGWALL_BLOCK_SHIFT + header->mwh_allocsize, 0xDB, MUNGWALL_SIZE, &mwdata.post_end);
151 if (mwdata.post_start)
152 header->mwh_fault = TRUE;
154 if (header->mwh_fault)
156 /* Throw an alert with context */
157 mwdata.hdr = header;
158 mwdata.freeFunc = loc->function;
159 Exec_ExtAlert(AN_MemoryInsane, loc->caller, loc->stack, AT_MUNGWALL, &mwdata, SysBase);
162 * Our entry can be freed by another process while we are sitting in Alert().
163 * This is 100% safe as long as we don't touch the entry after Alert().
164 * Well, potentally dangerous case is list iteration in MungWall_Scan().
165 * What to do then? Use a semaphore? Won't do because of RemTask(NULL),
166 * during which SysBase->ThisTask becomes garbage, thus a semaphore can't
167 * be used.
173 * Check integrity of walls around the specified block.
175 * 'memoryBlock' is an address of the block from user's point of view
176 * (i. e. what was returned by allocation call)
177 * 'byteSize' is length of the block (again, from user's point of view(
179 * Returns address of the raw block (what really needs to be deallocated)
181 APTR MungWall_Check(APTR memoryBlock, IPTR byteSize, struct TraceLocation *loc, struct ExecBase *SysBase)
183 if (PrivExecBase(SysBase)->IntFlags & EXECF_MungWall)
185 struct MungwallHeader *header;
187 D(bug("[MungWall] Freeing %u bytes at 0x%p\n", byteSize, memoryBlock);)
189 /* Align size and block to the requirements (needed because of AllocAbs) */
190 byteSize += (IPTR)memoryBlock & (MEMCHUNK_TOTAL - 1);
191 memoryBlock = (APTR)AROS_ROUNDDOWN2((IPTR)memoryBlock, MEMCHUNK_TOTAL);
193 /* Take address of mungwall header */
194 header = memoryBlock - MUNGWALL_BLOCK_SHIFT;
197 * Remove from PrivExecBase->AllocMemList.
198 * Do it before checking, otherwise AvailMem() can hit into it and cause a deadlock
199 * while the alert is displayed.
201 Forbid();
202 Remove((struct Node *)header);
203 Permit();
205 /* Reset fault state in order to see who is freeing the bad entry */
206 header->mwh_fault = FALSE;
208 CheckHeader(header, byteSize, loc, SysBase);
210 /* Fill block with weird stuff to esploit bugs in applications
212 * DOH! There's some _BAD_ code around that assumes memory can still be
213 * accessed after freeing by just preventing task switching. In AROS,
214 * RemTask(NULL) suffers of this problem because DOS processes are
215 * created with their TCB placed in the tc_MemEntry list.
216 * The workaround is to avoid munging when current task is in TS_REMOVED
217 * state (RemTask() sets it). However RemTask() still needs reengineering
218 * before memory protection can be used. With MP deallocating memory can
219 * cause immediate blocking of access to it, so RemTask() needs to move
220 * the stack to some safe place and make sure that task structure is not
221 * accessed after freeing it.
223 if (GET_THIS_TASK->tc_State != TS_REMOVED)
224 MUNGE_BLOCK(memoryBlock, MEMFILL_FREE, byteSize);
226 /* Return real start of the block to deallocate */
227 memoryBlock = header;
230 return memoryBlock;
234 * Scan the whole allocations list, optionally removing entries
235 * belonging to a particular pool.
237 void MungWall_Scan(APTR pool, struct TraceLocation *loc, struct ExecBase *SysBase)
239 struct IntExecBase *sysBase = PrivExecBase(SysBase);
240 if (sysBase->IntFlags & EXECF_MungWall)
242 struct MungwallHeader *allocnode;
243 struct MungwallHeader *tmp;
245 DSCAN(bug("[Mungwall] Scan(), caller %s, SysBase 0x%p\n", function, SysBase);)
247 Forbid();
249 ForeachNodeSafe(&sysBase->AllocMemList, allocnode, tmp)
251 DSCAN(bug("[Mungwall] allocnode 0x%p, next 0x%p, %s(%lu)\n", allocnode, tmp, allocnode->mwh_AllocFunc, allocnode->mwh_allocsize);)
253 if (pool && (allocnode->mwh_pool == pool))
256 * If pool address is given, remove entries belonging to it.
257 * It's DeletePool() and they are going to be freed.
258 * Additionally we reset fault state on them. This will cause
259 * one more alert and we can track where the memory was freed.
260 * This will give us a hint on who was owning it.
262 Remove((struct Node *)allocnode);
263 allocnode->mwh_fault = FALSE;
266 CheckHeader(allocnode, 0, loc, SysBase);
269 Permit();