1 /*************************************************************************
3 * Open Dynamics Engine, Copyright (C) 2001,2002 Russell L. Smith. *
4 * All rights reserved. Email: russ@q12.org Web: www.q12.org *
6 * This library is free software; you can redistribute it and/or *
7 * modify it under the terms of EITHER: *
8 * (1) The GNU Lesser General Public License as published by the Free *
9 * Software Foundation; either version 2.1 of the License, or (at *
10 * your option) any later version. The text of the GNU Lesser *
11 * General Public License is included with this library in the *
13 * (2) The BSD-style license that is included with this library in *
14 * the file LICENSE-BSD.TXT. *
16 * This library is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the files *
19 * LICENSE.TXT and LICENSE-BSD.TXT for more details. *
21 *************************************************************************/
26 #include "joints/joint.h"
29 static void InternalFreeWorldProcessContext (dxWorldProcessContext
*context
);
31 //****************************************************************************
32 // Malloc based world stepping memory manager
34 /*extern */dxWorldProcessMemoryManager
g_WorldProcessMallocMemoryManager(dAlloc
, dRealloc
, dFree
);
35 /*extern */dxWorldProcessMemoryReserveInfo
g_WorldProcessDefaultReserveInfo(dWORLDSTEP_RESERVEFACTOR_DEFAULT
, dWORLDSTEP_RESERVESIZE_DEFAULT
);
38 //****************************************************************************
39 // dxWorldProcessContext implementation
41 void dxWorldProcessContext::CleanupContext()
44 ClearPreallocations();
45 FreePreallocationsContext();
48 void dxWorldProcessContext::SavePreallocations(int islandcount
, int const *islandsizes
, dxBody
*const *bodies
, dxJoint
*const *joints
)
50 m_IslandCount
= islandcount
;
51 m_pIslandSizes
= islandsizes
;
56 void dxWorldProcessContext::RetrievePreallocations(int &islandcount
, int const *&islandsizes
, dxBody
*const *&bodies
, dxJoint
*const *&joints
)
58 islandcount
= m_IslandCount
;
59 islandsizes
= m_pIslandSizes
;
64 void dxWorldProcessContext::OffsetPreallocations(size_t stOffset
)
66 // m_IslandCount = -- no offset for count
67 m_pIslandSizes
= m_pIslandSizes
? (int const *)((size_t)m_pIslandSizes
+ stOffset
) : NULL
;
68 m_pBodies
= m_pBodies
? (dxBody
*const *)((size_t)m_pBodies
+ stOffset
) : NULL
;
69 m_pJoints
= m_pJoints
? (dxJoint
*const *)((size_t)m_pJoints
+ stOffset
) : NULL
;
72 void dxWorldProcessContext::CopyPreallocations(const dxWorldProcessContext
*othercontext
)
74 m_IslandCount
= othercontext
->m_IslandCount
;
75 m_pIslandSizes
= othercontext
->m_pIslandSizes
;
76 m_pBodies
= othercontext
->m_pBodies
;
77 m_pJoints
= othercontext
->m_pJoints
;
80 void dxWorldProcessContext::ClearPreallocations()
83 m_pIslandSizes
= NULL
;
88 void dxWorldProcessContext::FreePreallocationsContext()
90 if (m_pPreallocationcContext
) {
91 InternalFreeWorldProcessContext(m_pPreallocationcContext
);
92 m_pPreallocationcContext
= NULL
;
97 //****************************************************************************
100 void dInternalHandleAutoDisabling (dxWorld
*world
, dReal stepsize
)
103 for ( bb
=world
->firstbody
; bb
; bb
=(dxBody
*)bb
->next
)
105 // don't freeze objects mid-air (patch 1586738)
106 if ( bb
->firstjoint
== NULL
) continue;
108 // nothing to do unless this body is currently enabled and has
109 // the auto-disable flag set
110 if ( (bb
->flags
& (dxBodyAutoDisable
|dxBodyDisabled
)) != dxBodyAutoDisable
) continue;
112 // if sampling / threshold testing is disabled, we can never sleep.
113 if ( bb
->adis
.average_samples
== 0 ) continue;
116 // see if the body is idle
121 if ( bb
->average_counter
>= bb
->adis
.average_samples
)
123 dUASSERT( bb
->average_counter
< bb
->adis
.average_samples
, "buffer overflow" );
125 // something is going wrong, reset the average-calculations
126 bb
->average_ready
= 0; // not ready for average calculation
127 bb
->average_counter
= 0; // reset the buffer index
131 // sample the linear and angular velocity
132 bb
->average_lvel_buffer
[bb
->average_counter
][0] = bb
->lvel
[0];
133 bb
->average_lvel_buffer
[bb
->average_counter
][1] = bb
->lvel
[1];
134 bb
->average_lvel_buffer
[bb
->average_counter
][2] = bb
->lvel
[2];
135 bb
->average_avel_buffer
[bb
->average_counter
][0] = bb
->avel
[0];
136 bb
->average_avel_buffer
[bb
->average_counter
][1] = bb
->avel
[1];
137 bb
->average_avel_buffer
[bb
->average_counter
][2] = bb
->avel
[2];
138 bb
->average_counter
++;
141 if ( bb
->average_counter
>= bb
->adis
.average_samples
)
143 bb
->average_counter
= 0; // fill the buffer from the beginning
144 bb
->average_ready
= 1; // this body is ready now for average calculation
147 int idle
= 0; // Assume it's in motion unless we have samples to disprove it.
150 if ( bb
->average_ready
)
152 idle
= 1; // Initial assumption: IDLE
154 // the sample buffers are filled and ready for calculation
155 dVector3 average_lvel
, average_avel
;
157 // Store first velocity samples
158 average_lvel
[0] = bb
->average_lvel_buffer
[0][0];
159 average_avel
[0] = bb
->average_avel_buffer
[0][0];
160 average_lvel
[1] = bb
->average_lvel_buffer
[0][1];
161 average_avel
[1] = bb
->average_avel_buffer
[0][1];
162 average_lvel
[2] = bb
->average_lvel_buffer
[0][2];
163 average_avel
[2] = bb
->average_avel_buffer
[0][2];
165 // If we're not in "instantaneous mode"
166 if ( bb
->adis
.average_samples
> 1 )
168 // add remaining velocities together
169 for ( unsigned int i
= 1; i
< bb
->adis
.average_samples
; ++i
)
171 average_lvel
[0] += bb
->average_lvel_buffer
[i
][0];
172 average_avel
[0] += bb
->average_avel_buffer
[i
][0];
173 average_lvel
[1] += bb
->average_lvel_buffer
[i
][1];
174 average_avel
[1] += bb
->average_avel_buffer
[i
][1];
175 average_lvel
[2] += bb
->average_lvel_buffer
[i
][2];
176 average_avel
[2] += bb
->average_avel_buffer
[i
][2];
180 dReal r1
= dReal( 1.0 ) / dReal( bb
->adis
.average_samples
);
182 average_lvel
[0] *= r1
;
183 average_avel
[0] *= r1
;
184 average_lvel
[1] *= r1
;
185 average_avel
[1] *= r1
;
186 average_lvel
[2] *= r1
;
187 average_avel
[2] *= r1
;
191 dReal av_lspeed
, av_aspeed
;
192 av_lspeed
= dCalcVectorDot3( average_lvel
, average_lvel
);
193 if ( av_lspeed
> bb
->adis
.linear_average_threshold
)
195 idle
= 0; // average linear velocity is too high for idle
199 av_aspeed
= dCalcVectorDot3( average_avel
, average_avel
);
200 if ( av_aspeed
> bb
->adis
.angular_average_threshold
)
202 idle
= 0; // average angular velocity is too high for idle
207 // if it's idle, accumulate steps and time.
208 // these counters won't overflow because this code doesn't run for disabled bodies.
210 bb
->adis_stepsleft
--;
211 bb
->adis_timeleft
-= stepsize
;
215 bb
->adis_stepsleft
= bb
->adis
.idle_steps
;
216 bb
->adis_timeleft
= bb
->adis
.idle_time
;
219 // disable the body if it's idle for a long enough time
220 if ( bb
->adis_stepsleft
<= 0 && bb
->adis_timeleft
<= 0 )
222 bb
->flags
|= dxBodyDisabled
; // set the disable flag
224 // disabling bodies should also include resetting the velocity
225 // should prevent jittering in big "islands"
237 //****************************************************************************
240 // return sin(x)/x. this has a singularity at 0 so special handling is needed
241 // for small arguments.
243 static inline dReal
sinc (dReal x
)
245 // if |x| < 1e-4 then use a taylor series expansion. this two term expansion
246 // is actually accurate to one LS bit within this range if double precision
247 // is being used - so don't worry!
248 if (dFabs(x
) < 1.0e-4) return REAL(1.0) - x
*x
*REAL(0.166666666666666666667);
249 else return dSin(x
)/x
;
253 // given a body b, apply its linear and angular rotation over the time
254 // interval h, thereby adjusting its position and orientation.
256 void dxStepBody (dxBody
*b
, dReal h
)
258 // cap the angular velocity
259 if (b
->flags
& dxBodyMaxAngularSpeed
) {
260 const dReal max_ang_speed
= b
->max_angular_speed
;
261 const dReal aspeed
= dCalcVectorDot3( b
->avel
, b
->avel
);
262 if (aspeed
> max_ang_speed
*max_ang_speed
) {
263 const dReal coef
= max_ang_speed
/dSqrt(aspeed
);
264 dScaleVector3(b
->avel
, coef
);
267 // end of angular velocity cap
270 // handle linear velocity
271 for (int j
=0; j
<3; j
++) b
->posr
.pos
[j
] += h
* b
->lvel
[j
];
273 if (b
->flags
& dxBodyFlagFiniteRotation
) {
274 dVector3 irv
; // infitesimal rotation vector
275 dQuaternion q
; // quaternion for finite rotation
277 if (b
->flags
& dxBodyFlagFiniteRotationAxis
) {
278 // split the angular velocity vector into a component along the finite
279 // rotation axis, and a component orthogonal to it.
280 dVector3 frv
; // finite rotation vector
281 dReal k
= dCalcVectorDot3 (b
->finite_rot_axis
,b
->avel
);
282 frv
[0] = b
->finite_rot_axis
[0] * k
;
283 frv
[1] = b
->finite_rot_axis
[1] * k
;
284 frv
[2] = b
->finite_rot_axis
[2] * k
;
285 irv
[0] = b
->avel
[0] - frv
[0];
286 irv
[1] = b
->avel
[1] - frv
[1];
287 irv
[2] = b
->avel
[2] - frv
[2];
289 // make a rotation quaternion q that corresponds to frv * h.
290 // compare this with the full-finite-rotation case below.
294 dReal s
= sinc(theta
) * h
;
300 // make a rotation quaternion q that corresponds to w * h
301 dReal wlen
= dSqrt (b
->avel
[0]*b
->avel
[0] + b
->avel
[1]*b
->avel
[1] +
302 b
->avel
[2]*b
->avel
[2]);
304 dReal theta
= wlen
* h
;
306 dReal s
= sinc(theta
) * h
;
307 q
[1] = b
->avel
[0] * s
;
308 q
[2] = b
->avel
[1] * s
;
309 q
[3] = b
->avel
[2] * s
;
312 // do the finite rotation
314 dQMultiply0 (q2
,q
,b
->q
);
315 for (int j
=0; j
<4; j
++) b
->q
[j
] = q2
[j
];
317 // do the infitesimal rotation if required
318 if (b
->flags
& dxBodyFlagFiniteRotationAxis
) {
320 dWtoDQ (irv
,b
->q
,dq
);
321 for (int j
=0; j
<4; j
++) b
->q
[j
] += h
* dq
[j
];
325 // the normal way - do an infitesimal rotation
327 dWtoDQ (b
->avel
,b
->q
,dq
);
328 for (int j
=0; j
<4; j
++) b
->q
[j
] += h
* dq
[j
];
331 // normalize the quaternion and convert it to a rotation matrix
333 dQtoR (b
->q
,b
->posr
.R
);
335 // notify all attached geoms that this body has moved
336 for (dxGeom
*geom
= b
->geom
; geom
; geom
= dGeomGetBodyNext (geom
))
340 if (b
->moved_callback
)
341 b
->moved_callback(b
);
345 if (b
->flags
& dxBodyLinearDamping
) {
346 const dReal lin_threshold
= b
->dampingp
.linear_threshold
;
347 const dReal lin_speed
= dCalcVectorDot3( b
->lvel
, b
->lvel
);
348 if ( lin_speed
> lin_threshold
) {
349 const dReal k
= 1 - b
->dampingp
.linear_scale
;
350 dScaleVector3(b
->lvel
, k
);
353 if (b
->flags
& dxBodyAngularDamping
) {
354 const dReal ang_threshold
= b
->dampingp
.angular_threshold
;
355 const dReal ang_speed
= dCalcVectorDot3( b
->avel
, b
->avel
);
356 if ( ang_speed
> ang_threshold
) {
357 const dReal k
= 1 - b
->dampingp
.angular_scale
;
358 dScaleVector3(b
->avel
, k
);
364 //****************************************************************************
367 // This estimates dynamic memory requirements for dxProcessIslands
368 static size_t EstimateIslandsProcessingMemoryRequirements(dxWorld
*world
, size_t &sesize
)
372 size_t islandcounts
= dEFFICIENT_SIZE(world
->nb
* 2 * sizeof(int));
375 size_t bodiessize
= dEFFICIENT_SIZE(world
->nb
* sizeof(dxBody
*));
376 size_t jointssize
= dEFFICIENT_SIZE(world
->nj
* sizeof(dxJoint
*));
377 res
+= bodiessize
+ jointssize
;
379 sesize
= (bodiessize
< jointssize
) ? bodiessize
: jointssize
;
383 static size_t BuildIslandsAndEstimateStepperMemoryRequirements(dxWorldProcessContext
*context
,
384 dxWorld
*world
, dReal stepsize
, dmemestimate_fn_t stepperestimate
)
386 const int sizeelements
= 2;
389 // handle auto-disabling of bodies
390 dInternalHandleAutoDisabling (world
,stepsize
);
392 int nb
= world
->nb
, nj
= world
->nj
;
393 // Make array for island body/joint counts
394 int *islandsizes
= context
->AllocateArray
<int>(2 * nb
);
397 // make arrays for body and joint lists (for a single island) to go into
398 dxBody
**body
= context
->AllocateArray
<dxBody
*>(nb
);
399 dxJoint
**joint
= context
->AllocateArray
<dxJoint
*>(nj
);
401 BEGIN_STATE_SAVE(context
, stackstate
) {
402 // allocate a stack of unvisited bodies in the island. the maximum size of
403 // the stack can be the lesser of the number of bodies or joints, because
404 // new bodies are only ever added to the stack by going through untagged
405 // joints. all the bodies in the stack must be tagged!
406 int stackalloc
= (nj
< nb
) ? nj
: nb
;
407 dxBody
**stack
= context
->AllocateArray
<dxBody
*>(stackalloc
);
410 // set all body/joint tags to 0
411 for (dxBody
*b
=world
->firstbody
; b
; b
=(dxBody
*)b
->next
) b
->tag
= 0;
412 for (dxJoint
*j
=world
->firstjoint
; j
; j
=(dxJoint
*)j
->next
) j
->tag
= 0;
415 sizescurr
= islandsizes
;
416 dxBody
**bodystart
= body
;
417 dxJoint
**jointstart
= joint
;
418 for (dxBody
*bb
=world
->firstbody
; bb
; bb
=(dxBody
*)bb
->next
) {
419 // get bb = the next enabled, untagged body, and tag it
421 if (!(bb
->flags
& dxBodyDisabled
)) {
424 dxBody
**bodycurr
= bodystart
;
425 dxJoint
**jointcurr
= jointstart
;
427 // tag all bodies and joints starting from bb.
434 // traverse and tag all body's joints, add untagged connected bodies
436 for (dxJointNode
*n
=b
->firstjoint
; n
; n
=n
->next
) {
437 dxJoint
*njoint
= n
->joint
;
439 if (njoint
->isEnabled()) {
441 *jointcurr
++ = njoint
;
443 dxBody
*nbody
= n
->body
;
444 // Body disabled flag is not checked here. This is how auto-enable works.
445 if (nbody
&& nbody
->tag
<= 0) {
447 // Make sure all bodies are in the enabled state.
448 nbody
->flags
&= ~dxBodyDisabled
;
449 stack
[stacksize
++] = nbody
;
452 njoint
->tag
= -1; // Used in Step to prevent search over disabled joints (not needed for QuickStep so far)
456 dIASSERT(stacksize
<= world
->nb
);
457 dIASSERT(stacksize
<= world
->nj
);
459 if (stacksize
== 0) {
463 b
= stack
[--stacksize
]; // pop body off stack
464 *bodycurr
++ = b
; // put body on body list
467 int bcount
= bodycurr
- bodystart
;
468 int jcount
= jointcurr
- jointstart
;
469 sizescurr
[0] = bcount
;
470 sizescurr
[1] = jcount
;
471 sizescurr
+= sizeelements
;
473 size_t islandreq
= stepperestimate(bodystart
, bcount
, jointstart
, jcount
);
474 maxreq
= (maxreq
> islandreq
) ? maxreq
: islandreq
;
476 bodystart
= bodycurr
;
477 jointstart
= jointcurr
;
479 bb
->tag
= -1; // Not used so far (assigned to retain consistency with joints)
483 } END_STATE_SAVE(context
, stackstate
);
486 // if debugging, check that all objects (except for disabled bodies,
487 // unconnected joints, and joints that are connected to disabled bodies)
490 for (dxBody
*b
=world
->firstbody
; b
; b
=(dxBody
*)b
->next
) {
491 if (b
->flags
& dxBodyDisabled
) {
492 if (b
->tag
> 0) dDebug (0,"disabled body tagged");
495 if (b
->tag
<= 0) dDebug (0,"enabled body not tagged");
498 for (dxJoint
*j
=world
->firstjoint
; j
; j
=(dxJoint
*)j
->next
) {
499 if ( (( j
->node
[0].body
&& (j
->node
[0].body
->flags
& dxBodyDisabled
)==0 ) ||
500 (j
->node
[1].body
&& (j
->node
[1].body
->flags
& dxBodyDisabled
)==0) )
503 if (j
->tag
<= 0) dDebug (0,"attached enabled joint not tagged");
506 if (j
->tag
> 0) dDebug (0,"unattached or disabled joint tagged");
512 int islandcount
= (sizescurr
- islandsizes
) / sizeelements
;
513 context
->SavePreallocations(islandcount
, islandsizes
, body
, joint
);
518 // this groups all joints and bodies in a world into islands. all objects
519 // in an island are reachable by going through connected bodies and joints.
520 // each island can be simulated separately.
521 // note that joints that are not attached to anything will not be included
522 // in any island, an so they do not affect the simulation.
524 // this function starts new island from unvisited bodies. however, it will
525 // never start a new islands from a disabled body. thus islands of disabled
526 // bodies will not be included in the simulation. disabled bodies are
527 // re-enabled if they are found to be part of an active island.
529 void dxProcessIslands (dxWorld
*world
, dReal stepsize
, dstepper_fn_t stepper
)
531 const int sizeelements
= 2;
533 dxStepWorkingMemory
*wmem
= world
->wmem
;
534 dIASSERT(wmem
!= NULL
);
536 dxWorldProcessContext
*context
= wmem
->GetWorldProcessingContext();
539 int const *islandsizes
;
541 dxJoint
*const *joint
;
542 context
->RetrievePreallocations(islandcount
, islandsizes
, body
, joint
);
544 dxBody
*const *bodystart
= body
;
545 dxJoint
*const *jointstart
= joint
;
547 int const *const sizesend
= islandsizes
+ islandcount
* sizeelements
;
548 for (int const *sizescurr
= islandsizes
; sizescurr
!= sizesend
; sizescurr
+= sizeelements
) {
549 int bcount
= sizescurr
[0];
550 int jcount
= sizescurr
[1];
552 BEGIN_STATE_SAVE(context
, stepperstate
) {
553 // now do something with body and joint lists
554 stepper (context
,world
,bodystart
,bcount
,jointstart
,jcount
,stepsize
);
555 } END_STATE_SAVE(context
, stepperstate
);
558 jointstart
+= jcount
;
561 context
->CleanupContext();
562 dIASSERT(context
->IsStructureValid());
565 //****************************************************************************
566 // World processing context management
568 static size_t AdjustArenaSizeForReserveRequirements(size_t arenareq
, float rsrvfactor
, unsigned rsrvminimum
)
570 float scaledarena
= arenareq
* rsrvfactor
;
571 size_t adjustedarena
= (scaledarena
< SIZE_MAX
) ? (size_t)scaledarena
: SIZE_MAX
;
572 size_t boundedarena
= (adjustedarena
> rsrvminimum
) ? adjustedarena
: (size_t)rsrvminimum
;
573 return dEFFICIENT_SIZE(boundedarena
);
576 static dxWorldProcessContext
*InternalReallocateWorldProcessContext (
577 dxWorldProcessContext
*oldcontext
, size_t memreq
,
578 const dxWorldProcessMemoryManager
*memmgr
, float rsrvfactor
, unsigned rsrvminimum
)
580 dxWorldProcessContext
*context
= oldcontext
;
581 bool allocsuccess
= false;
587 size_t oldmemsize
= oldcontext
? oldcontext
->GetMemorySize() : 0;
588 if (!oldcontext
|| oldmemsize
< memreq
) {
589 oldarenasize
= oldcontext
? dxWorldProcessContext::MakeArenaSize(oldmemsize
) : 0;
590 pOldArena
= oldcontext
? oldcontext
->m_pArenaBegin
: NULL
;
592 if (!dxWorldProcessContext::IsArenaPossible(memreq
)) {
596 size_t arenareq
= dxWorldProcessContext::MakeArenaSize(memreq
);
597 size_t arenareq_with_reserve
= AdjustArenaSizeForReserveRequirements(arenareq
, rsrvfactor
, rsrvminimum
);
598 size_t memreq_with_reserve
= memreq
+ (arenareq_with_reserve
- arenareq
);
602 if (oldcontext
->m_pAllocCurrent
!= oldcontext
->m_pAllocBegin
) {
604 // Save old efficient offset and meaningful data size for the case if
605 // reallocation throws the block at different efficient offset
606 size_t oldcontextofs
= (size_t)oldcontext
- (size_t)pOldArena
;
607 size_t datasize
= (size_t)oldcontext
->m_pAllocCurrent
- (size_t)oldcontext
;
609 // Extra EFFICIENT_ALIGNMENT bytes might be needed after re-allocation with different alignment
610 size_t shrunkarenasize
= dEFFICIENT_SIZE(datasize
+ oldcontextofs
) + EFFICIENT_ALIGNMENT
;
611 if (shrunkarenasize
< oldarenasize
) {
613 void *pShrunkOldArena
= oldcontext
->m_pArenaMemMgr
->m_fnShrink(pOldArena
, oldarenasize
, shrunkarenasize
);
614 if (!pShrunkOldArena
) {
618 // In case if shrinking is not supported and memory manager had to allocate-copy-free
619 if (pShrunkOldArena
!= pOldArena
) {
620 dxWorldProcessContext
*shrunkcontext
= (dxWorldProcessContext
*)dEFFICIENT_PTR(pShrunkOldArena
);
622 // Preform data shift in case if efficient alignment of new block
623 // does not match that of old block
624 size_t shrunkcontextofs
= (size_t)shrunkcontext
- (size_t)pShrunkOldArena
;
625 size_t offsetdiff
= oldcontextofs
- shrunkcontextofs
;
626 if (offsetdiff
!= 0) {
627 memmove(shrunkcontext
, (void *)((size_t)shrunkcontext
+ offsetdiff
), datasize
);
630 // Make sure allocation pointers are valid - that is necessary to
631 // be able to calculate size and free old arena later
632 size_t shrunkdatasize
= dxWorldProcessContext::MakeBufferSize(shrunkarenasize
);
633 void *blockbegin
= dEFFICIENT_PTR(shrunkcontext
+ 1);
634 void *blockend
= dOFFSET_EFFICIENTLY(blockbegin
, shrunkdatasize
);
635 shrunkcontext
->m_pAllocBegin
= blockbegin
;
636 shrunkcontext
->m_pAllocEnd
= blockend
;
637 shrunkcontext
->m_pAllocCurrent
= blockend
; // -- set to end to prevent possibility of further allocation
638 shrunkcontext
->m_pArenaBegin
= pShrunkOldArena
;
640 size_t stOffset
= ((size_t)pShrunkOldArena
- (size_t)pOldArena
) - offsetdiff
;
641 shrunkcontext
->OffsetPreallocations(stOffset
);
643 oldcontext
= shrunkcontext
;
645 // Reassign to old arena variables for potential freeing at exit
646 pOldArena
= pShrunkOldArena
;
649 // Reassign to old arena variables for potential freeing at exit
650 oldarenasize
= shrunkarenasize
;
654 oldcontext
->m_pArenaMemMgr
->m_fnFree(pOldArena
, oldarenasize
);
657 // Zero variables to avoid another freeing on exit
663 // Allocate new arena
664 void *pNewArena
= memmgr
->m_fnAlloc(arenareq_with_reserve
);
669 context
= (dxWorldProcessContext
*)dEFFICIENT_PTR(pNewArena
);
671 void *blockbegin
= dEFFICIENT_PTR(context
+ 1);
672 void *blockend
= dOFFSET_EFFICIENTLY(blockbegin
, memreq_with_reserve
);
674 context
->m_pAllocBegin
= blockbegin
;
675 context
->m_pAllocEnd
= blockend
;
676 context
->m_pArenaBegin
= pNewArena
;
677 context
->m_pAllocCurrent
= blockbegin
;
680 context
->CopyPreallocations(oldcontext
);
682 context
->ClearPreallocations();
685 context
->m_pArenaMemMgr
= memmgr
;
686 context
->m_pPreallocationcContext
= oldcontext
;
694 dIASSERT(oldcontext
);
695 oldcontext
->m_pArenaMemMgr
->m_fnFree(pOldArena
, oldarenasize
);
703 static void InternalFreeWorldProcessContext (dxWorldProcessContext
*context
)
705 size_t memsize
= context
->GetMemorySize();
706 size_t arenasize
= dxWorldProcessContext::MakeArenaSize(memsize
);
708 void *pArenaBegin
= context
->m_pArenaBegin
;
709 context
->m_pArenaMemMgr
->m_fnFree(pArenaBegin
, arenasize
);
713 bool dxReallocateWorldProcessContext (dxWorld
*world
,
714 dReal stepsize
, dmemestimate_fn_t stepperestimate
)
716 dxStepWorkingMemory
*wmem
= AllocateOnDemand(world
->wmem
);
717 if (!wmem
) return false;
719 dxWorldProcessContext
*oldcontext
= wmem
->GetWorldProcessingContext();
720 dIASSERT (!oldcontext
|| oldcontext
->IsStructureValid());
722 const dxWorldProcessMemoryReserveInfo
*reserveinfo
= wmem
->SureGetMemoryReserveInfo();
723 const dxWorldProcessMemoryManager
*memmgr
= wmem
->SureGetMemoryManager();
725 dxWorldProcessContext
*context
= oldcontext
;
728 size_t islandsreq
= EstimateIslandsProcessingMemoryRequirements(world
, sesize
);
729 dIASSERT(islandsreq
== dEFFICIENT_SIZE(islandsreq
));
730 dIASSERT(sesize
== dEFFICIENT_SIZE(sesize
));
732 size_t stepperestimatereq
= islandsreq
+ sesize
;
733 context
= InternalReallocateWorldProcessContext(context
, stepperestimatereq
, memmgr
, 1.0f
, reserveinfo
->m_uiReserveMinimum
);
737 size_t stepperreq
= BuildIslandsAndEstimateStepperMemoryRequirements(context
, world
, stepsize
, stepperestimate
);
738 dIASSERT(stepperreq
== dEFFICIENT_SIZE(stepperreq
));
740 size_t memreq
= stepperreq
+ islandsreq
;
741 context
= InternalReallocateWorldProcessContext(context
, memreq
, memmgr
, reserveinfo
->m_fReserveFactor
, reserveinfo
->m_uiReserveMinimum
);
744 wmem
->SetWorldProcessingContext(context
);
745 return context
!= NULL
;
748 dxWorldProcessContext
*dxReallocateTemporayWorldProcessContext(dxWorldProcessContext
*oldcontext
,
749 size_t memreq
, const dxWorldProcessMemoryManager
*memmgr
/*=NULL*/, const dxWorldProcessMemoryReserveInfo
*reserveinfo
/*=NULL*/)
751 dxWorldProcessContext
*context
= oldcontext
;
753 if (context
&& context
->GetMemorySize() < memreq
) {
754 dIASSERT(!context
->IsPreallocationsContextAssigned());
756 InternalFreeWorldProcessContext(context
);
760 if (context
== NULL
) {
761 const dxWorldProcessMemoryManager
*surememmgr
= memmgr
? memmgr
: &g_WorldProcessMallocMemoryManager
;
762 const dxWorldProcessMemoryReserveInfo
*surereserveinfo
= reserveinfo
? reserveinfo
: &g_WorldProcessDefaultReserveInfo
;
763 context
= InternalReallocateWorldProcessContext(context
, memreq
, surememmgr
, reserveinfo
->m_fReserveFactor
, reserveinfo
->m_uiReserveMinimum
);
769 void dxFreeWorldProcessContext (dxWorldProcessContext
*context
)
771 // Free old arena for the case if context is freed after reallocation without
772 // a call to world stepping function
773 context
->FreePreallocationsContext();
775 // Assert validity after old arena is freed as validation includes checking for
776 // old arena to be absent
777 dUASSERT (context
->IsStructureValid(), "invalid context structure");
779 InternalFreeWorldProcessContext(context
);