Cosmetic: Few commentaries added
[ode.git] / ode / src / util.cpp
blob81952bef390e389fd5459f017fc2b6cf1b546cbc
1 /*************************************************************************
2 * *
3 * Open Dynamics Engine, Copyright (C) 2001,2002 Russell L. Smith. *
4 * All rights reserved. Email: russ@q12.org Web: www.q12.org *
5 * *
6 * This library is free software; you can redistribute it and/or *
7 * modify it under the terms of EITHER: *
8 * (1) The GNU Lesser General Public License as published by the Free *
9 * Software Foundation; either version 2.1 of the License, or (at *
10 * your option) any later version. The text of the GNU Lesser *
11 * General Public License is included with this library in the *
12 * file LICENSE.TXT. *
13 * (2) The BSD-style license that is included with this library in *
14 * the file LICENSE-BSD.TXT. *
15 * *
16 * This library is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files *
19 * LICENSE.TXT and LICENSE-BSD.TXT for more details. *
20 * *
21 *************************************************************************/
23 #include <ode/ode.h>
24 #include "config.h"
25 #include "objects.h"
26 #include "joints/joint.h"
27 #include "util.h"
29 static void InternalFreeWorldProcessContext (dxWorldProcessContext *context);
31 //****************************************************************************
32 // Malloc based world stepping memory manager
34 /*extern */dxWorldProcessMemoryManager g_WorldProcessMallocMemoryManager(dAlloc, dRealloc, dFree);
35 /*extern */dxWorldProcessMemoryReserveInfo g_WorldProcessDefaultReserveInfo(dWORLDSTEP_RESERVEFACTOR_DEFAULT, dWORLDSTEP_RESERVESIZE_DEFAULT);
38 //****************************************************************************
39 // dxWorldProcessContext implementation
41 void dxWorldProcessContext::CleanupContext()
43 ResetState();
44 ClearPreallocations();
45 FreePreallocationsContext();
48 void dxWorldProcessContext::SavePreallocations(int islandcount, int const *islandsizes, dxBody *const *bodies, dxJoint *const *joints)
50 m_IslandCount = islandcount;
51 m_pIslandSizes = islandsizes;
52 m_pBodies = bodies;
53 m_pJoints = joints;
56 void dxWorldProcessContext::RetrievePreallocations(int &islandcount, int const *&islandsizes, dxBody *const *&bodies, dxJoint *const *&joints)
58 islandcount = m_IslandCount;
59 islandsizes = m_pIslandSizes;
60 bodies = m_pBodies;
61 joints = m_pJoints;
64 void dxWorldProcessContext::OffsetPreallocations(size_t stOffset)
66 // m_IslandCount = -- no offset for count
67 m_pIslandSizes = m_pIslandSizes ? (int const *)((size_t)m_pIslandSizes + stOffset) : NULL;
68 m_pBodies = m_pBodies ? (dxBody *const *)((size_t)m_pBodies + stOffset) : NULL;
69 m_pJoints = m_pJoints ? (dxJoint *const *)((size_t)m_pJoints + stOffset) : NULL;
72 void dxWorldProcessContext::CopyPreallocations(const dxWorldProcessContext *othercontext)
74 m_IslandCount = othercontext->m_IslandCount;
75 m_pIslandSizes = othercontext->m_pIslandSizes;
76 m_pBodies = othercontext->m_pBodies;
77 m_pJoints = othercontext->m_pJoints;
80 void dxWorldProcessContext::ClearPreallocations()
82 m_IslandCount = 0;
83 m_pIslandSizes = NULL;
84 m_pBodies = NULL;
85 m_pJoints = NULL;
88 void dxWorldProcessContext::FreePreallocationsContext()
90 if (m_pPreallocationcContext) {
91 InternalFreeWorldProcessContext(m_pPreallocationcContext);
92 m_pPreallocationcContext = NULL;
97 //****************************************************************************
98 // Auto disabling
100 void dInternalHandleAutoDisabling (dxWorld *world, dReal stepsize)
102 dxBody *bb;
103 for ( bb=world->firstbody; bb; bb=(dxBody*)bb->next )
105 // don't freeze objects mid-air (patch 1586738)
106 if ( bb->firstjoint == NULL ) continue;
108 // nothing to do unless this body is currently enabled and has
109 // the auto-disable flag set
110 if ( (bb->flags & (dxBodyAutoDisable|dxBodyDisabled)) != dxBodyAutoDisable ) continue;
112 // if sampling / threshold testing is disabled, we can never sleep.
113 if ( bb->adis.average_samples == 0 ) continue;
116 // see if the body is idle
119 #ifndef dNODEBUG
120 // sanity check
121 if ( bb->average_counter >= bb->adis.average_samples )
123 dUASSERT( bb->average_counter < bb->adis.average_samples, "buffer overflow" );
125 // something is going wrong, reset the average-calculations
126 bb->average_ready = 0; // not ready for average calculation
127 bb->average_counter = 0; // reset the buffer index
129 #endif // dNODEBUG
131 // sample the linear and angular velocity
132 bb->average_lvel_buffer[bb->average_counter][0] = bb->lvel[0];
133 bb->average_lvel_buffer[bb->average_counter][1] = bb->lvel[1];
134 bb->average_lvel_buffer[bb->average_counter][2] = bb->lvel[2];
135 bb->average_avel_buffer[bb->average_counter][0] = bb->avel[0];
136 bb->average_avel_buffer[bb->average_counter][1] = bb->avel[1];
137 bb->average_avel_buffer[bb->average_counter][2] = bb->avel[2];
138 bb->average_counter++;
140 // buffer ready test
141 if ( bb->average_counter >= bb->adis.average_samples )
143 bb->average_counter = 0; // fill the buffer from the beginning
144 bb->average_ready = 1; // this body is ready now for average calculation
147 int idle = 0; // Assume it's in motion unless we have samples to disprove it.
149 // enough samples?
150 if ( bb->average_ready )
152 idle = 1; // Initial assumption: IDLE
154 // the sample buffers are filled and ready for calculation
155 dVector3 average_lvel, average_avel;
157 // Store first velocity samples
158 average_lvel[0] = bb->average_lvel_buffer[0][0];
159 average_avel[0] = bb->average_avel_buffer[0][0];
160 average_lvel[1] = bb->average_lvel_buffer[0][1];
161 average_avel[1] = bb->average_avel_buffer[0][1];
162 average_lvel[2] = bb->average_lvel_buffer[0][2];
163 average_avel[2] = bb->average_avel_buffer[0][2];
165 // If we're not in "instantaneous mode"
166 if ( bb->adis.average_samples > 1 )
168 // add remaining velocities together
169 for ( unsigned int i = 1; i < bb->adis.average_samples; ++i )
171 average_lvel[0] += bb->average_lvel_buffer[i][0];
172 average_avel[0] += bb->average_avel_buffer[i][0];
173 average_lvel[1] += bb->average_lvel_buffer[i][1];
174 average_avel[1] += bb->average_avel_buffer[i][1];
175 average_lvel[2] += bb->average_lvel_buffer[i][2];
176 average_avel[2] += bb->average_avel_buffer[i][2];
179 // make average
180 dReal r1 = dReal( 1.0 ) / dReal( bb->adis.average_samples );
182 average_lvel[0] *= r1;
183 average_avel[0] *= r1;
184 average_lvel[1] *= r1;
185 average_avel[1] *= r1;
186 average_lvel[2] *= r1;
187 average_avel[2] *= r1;
190 // threshold test
191 dReal av_lspeed, av_aspeed;
192 av_lspeed = dCalcVectorDot3( average_lvel, average_lvel );
193 if ( av_lspeed > bb->adis.linear_average_threshold )
195 idle = 0; // average linear velocity is too high for idle
197 else
199 av_aspeed = dCalcVectorDot3( average_avel, average_avel );
200 if ( av_aspeed > bb->adis.angular_average_threshold )
202 idle = 0; // average angular velocity is too high for idle
207 // if it's idle, accumulate steps and time.
208 // these counters won't overflow because this code doesn't run for disabled bodies.
209 if (idle) {
210 bb->adis_stepsleft--;
211 bb->adis_timeleft -= stepsize;
213 else {
214 // Reset countdowns
215 bb->adis_stepsleft = bb->adis.idle_steps;
216 bb->adis_timeleft = bb->adis.idle_time;
219 // disable the body if it's idle for a long enough time
220 if ( bb->adis_stepsleft <= 0 && bb->adis_timeleft <= 0 )
222 bb->flags |= dxBodyDisabled; // set the disable flag
224 // disabling bodies should also include resetting the velocity
225 // should prevent jittering in big "islands"
226 bb->lvel[0] = 0;
227 bb->lvel[1] = 0;
228 bb->lvel[2] = 0;
229 bb->avel[0] = 0;
230 bb->avel[1] = 0;
231 bb->avel[2] = 0;
237 //****************************************************************************
238 // body rotation
240 // return sin(x)/x. this has a singularity at 0 so special handling is needed
241 // for small arguments.
243 static inline dReal sinc (dReal x)
245 // if |x| < 1e-4 then use a taylor series expansion. this two term expansion
246 // is actually accurate to one LS bit within this range if double precision
247 // is being used - so don't worry!
248 if (dFabs(x) < 1.0e-4) return REAL(1.0) - x*x*REAL(0.166666666666666666667);
249 else return dSin(x)/x;
253 // given a body b, apply its linear and angular rotation over the time
254 // interval h, thereby adjusting its position and orientation.
256 void dxStepBody (dxBody *b, dReal h)
258 // cap the angular velocity
259 if (b->flags & dxBodyMaxAngularSpeed) {
260 const dReal max_ang_speed = b->max_angular_speed;
261 const dReal aspeed = dCalcVectorDot3( b->avel, b->avel );
262 if (aspeed > max_ang_speed*max_ang_speed) {
263 const dReal coef = max_ang_speed/dSqrt(aspeed);
264 dScaleVector3(b->avel, coef);
267 // end of angular velocity cap
270 // handle linear velocity
271 for (int j=0; j<3; j++) b->posr.pos[j] += h * b->lvel[j];
273 if (b->flags & dxBodyFlagFiniteRotation) {
274 dVector3 irv; // infitesimal rotation vector
275 dQuaternion q; // quaternion for finite rotation
277 if (b->flags & dxBodyFlagFiniteRotationAxis) {
278 // split the angular velocity vector into a component along the finite
279 // rotation axis, and a component orthogonal to it.
280 dVector3 frv; // finite rotation vector
281 dReal k = dCalcVectorDot3 (b->finite_rot_axis,b->avel);
282 frv[0] = b->finite_rot_axis[0] * k;
283 frv[1] = b->finite_rot_axis[1] * k;
284 frv[2] = b->finite_rot_axis[2] * k;
285 irv[0] = b->avel[0] - frv[0];
286 irv[1] = b->avel[1] - frv[1];
287 irv[2] = b->avel[2] - frv[2];
289 // make a rotation quaternion q that corresponds to frv * h.
290 // compare this with the full-finite-rotation case below.
291 h *= REAL(0.5);
292 dReal theta = k * h;
293 q[0] = dCos(theta);
294 dReal s = sinc(theta) * h;
295 q[1] = frv[0] * s;
296 q[2] = frv[1] * s;
297 q[3] = frv[2] * s;
299 else {
300 // make a rotation quaternion q that corresponds to w * h
301 dReal wlen = dSqrt (b->avel[0]*b->avel[0] + b->avel[1]*b->avel[1] +
302 b->avel[2]*b->avel[2]);
303 h *= REAL(0.5);
304 dReal theta = wlen * h;
305 q[0] = dCos(theta);
306 dReal s = sinc(theta) * h;
307 q[1] = b->avel[0] * s;
308 q[2] = b->avel[1] * s;
309 q[3] = b->avel[2] * s;
312 // do the finite rotation
313 dQuaternion q2;
314 dQMultiply0 (q2,q,b->q);
315 for (int j=0; j<4; j++) b->q[j] = q2[j];
317 // do the infitesimal rotation if required
318 if (b->flags & dxBodyFlagFiniteRotationAxis) {
319 dReal dq[4];
320 dWtoDQ (irv,b->q,dq);
321 for (int j=0; j<4; j++) b->q[j] += h * dq[j];
324 else {
325 // the normal way - do an infitesimal rotation
326 dReal dq[4];
327 dWtoDQ (b->avel,b->q,dq);
328 for (int j=0; j<4; j++) b->q[j] += h * dq[j];
331 // normalize the quaternion and convert it to a rotation matrix
332 dNormalize4 (b->q);
333 dQtoR (b->q,b->posr.R);
335 // notify all attached geoms that this body has moved
336 for (dxGeom *geom = b->geom; geom; geom = dGeomGetBodyNext (geom))
337 dGeomMoved (geom);
339 // notify the user
340 if (b->moved_callback)
341 b->moved_callback(b);
344 // damping
345 if (b->flags & dxBodyLinearDamping) {
346 const dReal lin_threshold = b->dampingp.linear_threshold;
347 const dReal lin_speed = dCalcVectorDot3( b->lvel, b->lvel );
348 if ( lin_speed > lin_threshold) {
349 const dReal k = 1 - b->dampingp.linear_scale;
350 dScaleVector3(b->lvel, k);
353 if (b->flags & dxBodyAngularDamping) {
354 const dReal ang_threshold = b->dampingp.angular_threshold;
355 const dReal ang_speed = dCalcVectorDot3( b->avel, b->avel );
356 if ( ang_speed > ang_threshold) {
357 const dReal k = 1 - b->dampingp.angular_scale;
358 dScaleVector3(b->avel, k);
364 //****************************************************************************
365 // island processing
367 // This estimates dynamic memory requirements for dxProcessIslands
368 static size_t EstimateIslandsProcessingMemoryRequirements(dxWorld *world, size_t &sesize)
370 size_t res = 0;
372 size_t islandcounts = dEFFICIENT_SIZE(world->nb * 2 * sizeof(int));
373 res += islandcounts;
375 size_t bodiessize = dEFFICIENT_SIZE(world->nb * sizeof(dxBody*));
376 size_t jointssize = dEFFICIENT_SIZE(world->nj * sizeof(dxJoint*));
377 res += bodiessize + jointssize;
379 sesize = (bodiessize < jointssize) ? bodiessize : jointssize;
380 return res;
383 static size_t BuildIslandsAndEstimateStepperMemoryRequirements(dxWorldProcessContext *context,
384 dxWorld *world, dReal stepsize, dmemestimate_fn_t stepperestimate)
386 const int sizeelements = 2;
387 size_t maxreq = 0;
389 // handle auto-disabling of bodies
390 dInternalHandleAutoDisabling (world,stepsize);
392 int nb = world->nb, nj = world->nj;
393 // Make array for island body/joint counts
394 int *islandsizes = context->AllocateArray<int>(2 * nb);
395 int *sizescurr;
397 // make arrays for body and joint lists (for a single island) to go into
398 dxBody **body = context->AllocateArray<dxBody *>(nb);
399 dxJoint **joint = context->AllocateArray<dxJoint *>(nj);
401 BEGIN_STATE_SAVE(context, stackstate) {
402 // allocate a stack of unvisited bodies in the island. the maximum size of
403 // the stack can be the lesser of the number of bodies or joints, because
404 // new bodies are only ever added to the stack by going through untagged
405 // joints. all the bodies in the stack must be tagged!
406 int stackalloc = (nj < nb) ? nj : nb;
407 dxBody **stack = context->AllocateArray<dxBody *>(stackalloc);
410 // set all body/joint tags to 0
411 for (dxBody *b=world->firstbody; b; b=(dxBody*)b->next) b->tag = 0;
412 for (dxJoint *j=world->firstjoint; j; j=(dxJoint*)j->next) j->tag = 0;
415 sizescurr = islandsizes;
416 dxBody **bodystart = body;
417 dxJoint **jointstart = joint;
418 for (dxBody *bb=world->firstbody; bb; bb=(dxBody*)bb->next) {
419 // get bb = the next enabled, untagged body, and tag it
420 if (!bb->tag) {
421 if (!(bb->flags & dxBodyDisabled)) {
422 bb->tag = 1;
424 dxBody **bodycurr = bodystart;
425 dxJoint **jointcurr = jointstart;
427 // tag all bodies and joints starting from bb.
428 *bodycurr++ = bb;
430 int stacksize = 0;
431 dxBody *b = bb;
433 while (true) {
434 // traverse and tag all body's joints, add untagged connected bodies
435 // to stack
436 for (dxJointNode *n=b->firstjoint; n; n=n->next) {
437 dxJoint *njoint = n->joint;
438 if (!njoint->tag) {
439 if (njoint->isEnabled()) {
440 njoint->tag = 1;
441 *jointcurr++ = njoint;
443 dxBody *nbody = n->body;
444 // Body disabled flag is not checked here. This is how auto-enable works.
445 if (nbody && nbody->tag <= 0) {
446 nbody->tag = 1;
447 // Make sure all bodies are in the enabled state.
448 nbody->flags &= ~dxBodyDisabled;
449 stack[stacksize++] = nbody;
451 } else {
452 njoint->tag = -1; // Used in Step to prevent search over disabled joints (not needed for QuickStep so far)
456 dIASSERT(stacksize <= world->nb);
457 dIASSERT(stacksize <= world->nj);
459 if (stacksize == 0) {
460 break;
463 b = stack[--stacksize]; // pop body off stack
464 *bodycurr++ = b; // put body on body list
467 int bcount = bodycurr - bodystart;
468 int jcount = jointcurr - jointstart;
469 sizescurr[0] = bcount;
470 sizescurr[1] = jcount;
471 sizescurr += sizeelements;
473 size_t islandreq = stepperestimate(bodystart, bcount, jointstart, jcount);
474 maxreq = (maxreq > islandreq) ? maxreq : islandreq;
476 bodystart = bodycurr;
477 jointstart = jointcurr;
478 } else {
479 bb->tag = -1; // Not used so far (assigned to retain consistency with joints)
483 } END_STATE_SAVE(context, stackstate);
485 # ifndef dNODEBUG
486 // if debugging, check that all objects (except for disabled bodies,
487 // unconnected joints, and joints that are connected to disabled bodies)
488 // were tagged.
490 for (dxBody *b=world->firstbody; b; b=(dxBody*)b->next) {
491 if (b->flags & dxBodyDisabled) {
492 if (b->tag > 0) dDebug (0,"disabled body tagged");
494 else {
495 if (b->tag <= 0) dDebug (0,"enabled body not tagged");
498 for (dxJoint *j=world->firstjoint; j; j=(dxJoint*)j->next) {
499 if ( (( j->node[0].body && (j->node[0].body->flags & dxBodyDisabled)==0 ) ||
500 (j->node[1].body && (j->node[1].body->flags & dxBodyDisabled)==0) )
502 j->isEnabled() ) {
503 if (j->tag <= 0) dDebug (0,"attached enabled joint not tagged");
505 else {
506 if (j->tag > 0) dDebug (0,"unattached or disabled joint tagged");
510 # endif
512 int islandcount = (sizescurr - islandsizes) / sizeelements;
513 context->SavePreallocations(islandcount, islandsizes, body, joint);
515 return maxreq;
518 // this groups all joints and bodies in a world into islands. all objects
519 // in an island are reachable by going through connected bodies and joints.
520 // each island can be simulated separately.
521 // note that joints that are not attached to anything will not be included
522 // in any island, an so they do not affect the simulation.
524 // this function starts new island from unvisited bodies. however, it will
525 // never start a new islands from a disabled body. thus islands of disabled
526 // bodies will not be included in the simulation. disabled bodies are
527 // re-enabled if they are found to be part of an active island.
529 void dxProcessIslands (dxWorld *world, dReal stepsize, dstepper_fn_t stepper)
531 const int sizeelements = 2;
533 dxStepWorkingMemory *wmem = world->wmem;
534 dIASSERT(wmem != NULL);
536 dxWorldProcessContext *context = wmem->GetWorldProcessingContext();
538 int islandcount;
539 int const *islandsizes;
540 dxBody *const *body;
541 dxJoint *const *joint;
542 context->RetrievePreallocations(islandcount, islandsizes, body, joint);
544 dxBody *const *bodystart = body;
545 dxJoint *const *jointstart = joint;
547 int const *const sizesend = islandsizes + islandcount * sizeelements;
548 for (int const *sizescurr = islandsizes; sizescurr != sizesend; sizescurr += sizeelements) {
549 int bcount = sizescurr[0];
550 int jcount = sizescurr[1];
552 BEGIN_STATE_SAVE(context, stepperstate) {
553 // now do something with body and joint lists
554 stepper (context,world,bodystart,bcount,jointstart,jcount,stepsize);
555 } END_STATE_SAVE(context, stepperstate);
557 bodystart += bcount;
558 jointstart += jcount;
561 context->CleanupContext();
562 dIASSERT(context->IsStructureValid());
565 //****************************************************************************
566 // World processing context management
568 static size_t AdjustArenaSizeForReserveRequirements(size_t arenareq, float rsrvfactor, unsigned rsrvminimum)
570 float scaledarena = arenareq * rsrvfactor;
571 size_t adjustedarena = (scaledarena < SIZE_MAX) ? (size_t)scaledarena : SIZE_MAX;
572 size_t boundedarena = (adjustedarena > rsrvminimum) ? adjustedarena : (size_t)rsrvminimum;
573 return dEFFICIENT_SIZE(boundedarena);
576 static dxWorldProcessContext *InternalReallocateWorldProcessContext (
577 dxWorldProcessContext *oldcontext, size_t memreq,
578 const dxWorldProcessMemoryManager *memmgr, float rsrvfactor, unsigned rsrvminimum)
580 dxWorldProcessContext *context = oldcontext;
581 bool allocsuccess = false;
583 size_t oldarenasize;
584 void *pOldArena;
586 do {
587 size_t oldmemsize = oldcontext ? oldcontext->GetMemorySize() : 0;
588 if (!oldcontext || oldmemsize < memreq) {
589 oldarenasize = oldcontext ? dxWorldProcessContext::MakeArenaSize(oldmemsize) : 0;
590 pOldArena = oldcontext ? oldcontext->m_pArenaBegin : NULL;
592 if (!dxWorldProcessContext::IsArenaPossible(memreq)) {
593 break;
596 size_t arenareq = dxWorldProcessContext::MakeArenaSize(memreq);
597 size_t arenareq_with_reserve = AdjustArenaSizeForReserveRequirements(arenareq, rsrvfactor, rsrvminimum);
598 size_t memreq_with_reserve = memreq + (arenareq_with_reserve - arenareq);
600 if (oldcontext) {
602 if (oldcontext->m_pAllocCurrent != oldcontext->m_pAllocBegin) {
604 // Save old efficient offset and meaningful data size for the case if
605 // reallocation throws the block at different efficient offset
606 size_t oldcontextofs = (size_t)oldcontext - (size_t)pOldArena;
607 size_t datasize = (size_t)oldcontext->m_pAllocCurrent - (size_t)oldcontext;
609 // Extra EFFICIENT_ALIGNMENT bytes might be needed after re-allocation with different alignment
610 size_t shrunkarenasize = dEFFICIENT_SIZE(datasize + oldcontextofs) + EFFICIENT_ALIGNMENT;
611 if (shrunkarenasize < oldarenasize) {
613 void *pShrunkOldArena = oldcontext->m_pArenaMemMgr->m_fnShrink(pOldArena, oldarenasize, shrunkarenasize);
614 if (!pShrunkOldArena) {
615 break;
618 // In case if shrinking is not supported and memory manager had to allocate-copy-free
619 if (pShrunkOldArena != pOldArena) {
620 dxWorldProcessContext *shrunkcontext = (dxWorldProcessContext *)dEFFICIENT_PTR(pShrunkOldArena);
622 // Preform data shift in case if efficient alignment of new block
623 // does not match that of old block
624 size_t shrunkcontextofs = (size_t)shrunkcontext - (size_t)pShrunkOldArena;
625 size_t offsetdiff = oldcontextofs - shrunkcontextofs;
626 if (offsetdiff != 0) {
627 memmove(shrunkcontext, (void *)((size_t)shrunkcontext + offsetdiff), datasize);
630 // Make sure allocation pointers are valid - that is necessary to
631 // be able to calculate size and free old arena later
632 size_t shrunkdatasize = dxWorldProcessContext::MakeBufferSize(shrunkarenasize);
633 void *blockbegin = dEFFICIENT_PTR(shrunkcontext + 1);
634 void *blockend = dOFFSET_EFFICIENTLY(blockbegin, shrunkdatasize);
635 shrunkcontext->m_pAllocBegin = blockbegin;
636 shrunkcontext->m_pAllocEnd = blockend;
637 shrunkcontext->m_pAllocCurrent = blockend; // -- set to end to prevent possibility of further allocation
638 shrunkcontext->m_pArenaBegin = pShrunkOldArena;
640 size_t stOffset = ((size_t)pShrunkOldArena - (size_t)pOldArena) - offsetdiff;
641 shrunkcontext->OffsetPreallocations(stOffset);
643 oldcontext = shrunkcontext;
645 // Reassign to old arena variables for potential freeing at exit
646 pOldArena = pShrunkOldArena;
649 // Reassign to old arena variables for potential freeing at exit
650 oldarenasize = shrunkarenasize;
653 } else {
654 oldcontext->m_pArenaMemMgr->m_fnFree(pOldArena, oldarenasize);
655 oldcontext = NULL;
657 // Zero variables to avoid another freeing on exit
658 pOldArena = NULL;
659 oldarenasize = 0;
663 // Allocate new arena
664 void *pNewArena = memmgr->m_fnAlloc(arenareq_with_reserve);
665 if (!pNewArena) {
666 break;
669 context = (dxWorldProcessContext *)dEFFICIENT_PTR(pNewArena);
671 void *blockbegin = dEFFICIENT_PTR(context + 1);
672 void *blockend = dOFFSET_EFFICIENTLY(blockbegin, memreq_with_reserve);
674 context->m_pAllocBegin = blockbegin;
675 context->m_pAllocEnd = blockend;
676 context->m_pArenaBegin = pNewArena;
677 context->m_pAllocCurrent = blockbegin;
679 if (oldcontext) {
680 context->CopyPreallocations(oldcontext);
681 } else {
682 context->ClearPreallocations();
685 context->m_pArenaMemMgr = memmgr;
686 context->m_pPreallocationcContext = oldcontext;
689 allocsuccess = true;
690 } while (false);
692 if (!allocsuccess) {
693 if (pOldArena) {
694 dIASSERT(oldcontext);
695 oldcontext->m_pArenaMemMgr->m_fnFree(pOldArena, oldarenasize);
697 context = NULL;
700 return context;
703 static void InternalFreeWorldProcessContext (dxWorldProcessContext *context)
705 size_t memsize = context->GetMemorySize();
706 size_t arenasize = dxWorldProcessContext::MakeArenaSize(memsize);
708 void *pArenaBegin = context->m_pArenaBegin;
709 context->m_pArenaMemMgr->m_fnFree(pArenaBegin, arenasize);
713 bool dxReallocateWorldProcessContext (dxWorld *world,
714 dReal stepsize, dmemestimate_fn_t stepperestimate)
716 dxStepWorkingMemory *wmem = AllocateOnDemand(world->wmem);
717 if (!wmem) return false;
719 dxWorldProcessContext *oldcontext = wmem->GetWorldProcessingContext();
720 dIASSERT (!oldcontext || oldcontext->IsStructureValid());
722 const dxWorldProcessMemoryReserveInfo *reserveinfo = wmem->SureGetMemoryReserveInfo();
723 const dxWorldProcessMemoryManager *memmgr = wmem->SureGetMemoryManager();
725 dxWorldProcessContext *context = oldcontext;
727 size_t sesize;
728 size_t islandsreq = EstimateIslandsProcessingMemoryRequirements(world, sesize);
729 dIASSERT(islandsreq == dEFFICIENT_SIZE(islandsreq));
730 dIASSERT(sesize == dEFFICIENT_SIZE(sesize));
732 size_t stepperestimatereq = islandsreq + sesize;
733 context = InternalReallocateWorldProcessContext(context, stepperestimatereq, memmgr, 1.0f, reserveinfo->m_uiReserveMinimum);
735 if (context)
737 size_t stepperreq = BuildIslandsAndEstimateStepperMemoryRequirements(context, world, stepsize, stepperestimate);
738 dIASSERT(stepperreq == dEFFICIENT_SIZE(stepperreq));
740 size_t memreq = stepperreq + islandsreq;
741 context = InternalReallocateWorldProcessContext(context, memreq, memmgr, reserveinfo->m_fReserveFactor, reserveinfo->m_uiReserveMinimum);
744 wmem->SetWorldProcessingContext(context);
745 return context != NULL;
748 dxWorldProcessContext *dxReallocateTemporayWorldProcessContext(dxWorldProcessContext *oldcontext,
749 size_t memreq, const dxWorldProcessMemoryManager *memmgr/*=NULL*/, const dxWorldProcessMemoryReserveInfo *reserveinfo/*=NULL*/)
751 dxWorldProcessContext *context = oldcontext;
753 if (context && context->GetMemorySize() < memreq) {
754 dIASSERT(!context->IsPreallocationsContextAssigned());
756 InternalFreeWorldProcessContext(context);
757 context = NULL;
760 if (context == NULL) {
761 const dxWorldProcessMemoryManager *surememmgr = memmgr ? memmgr : &g_WorldProcessMallocMemoryManager;
762 const dxWorldProcessMemoryReserveInfo *surereserveinfo = reserveinfo ? reserveinfo : &g_WorldProcessDefaultReserveInfo;
763 context = InternalReallocateWorldProcessContext(context, memreq, surememmgr, reserveinfo->m_fReserveFactor, reserveinfo->m_uiReserveMinimum);
766 return context;
769 void dxFreeWorldProcessContext (dxWorldProcessContext *context)
771 // Free old arena for the case if context is freed after reallocation without
772 // a call to world stepping function
773 context->FreePreallocationsContext();
775 // Assert validity after old arena is freed as validation includes checking for
776 // old arena to be absent
777 dUASSERT (context->IsStructureValid(), "invalid context structure");
779 InternalFreeWorldProcessContext(context);