3 * Job queue runner utility methods
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
24 use MediaWiki\MediaWikiServices
;
25 use MediaWiki\Logger\LoggerFactory
;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory
;
27 use Psr\Log\LoggerAwareInterface
;
28 use Psr\Log\LoggerInterface
;
29 use Wikimedia\ScopedCallback
;
32 * Job queue runner utility methods
37 class JobRunner
implements LoggerAwareInterface
{
38 /** @var callable|null Debug output handler */
42 * @var LoggerInterface $logger
46 const MAX_ALLOWED_LAG
= 3; // abort if more than this much DB lag is present
47 const LAG_CHECK_PERIOD
= 1.0; // check replica DB lag this many seconds
48 const ERROR_BACKOFF_TTL
= 1; // seconds to back off a queue due to errors
51 * @param callable $debug Optional debug output handler
53 public function setDebugHandler( $debug ) {
54 $this->debug
= $debug;
58 * @param LoggerInterface $logger
61 public function setLogger( LoggerInterface
$logger ) {
62 $this->logger
= $logger;
66 * @param LoggerInterface $logger
68 public function __construct( LoggerInterface
$logger = null ) {
69 if ( $logger === null ) {
70 $logger = LoggerFactory
::getInstance( 'runJobs' );
72 $this->setLogger( $logger );
76 * Run jobs of the specified number/type for the specified time
78 * The response map has a 'job' field that lists status of each job, including:
79 * - type : the job type
80 * - status : ok/failed
81 * - error : any error message string
82 * - time : the job run time in ms
83 * The response map also has:
84 * - backoffs : the (job type => seconds) map of backoff times
85 * - elapsed : the total time spent running tasks in ms
86 * - reached : the reason the script finished, one of (none-ready, job-limit, time-limit,
89 * This method outputs status information only if a debug handler was set.
90 * Any exceptions are caught and logged, but are not reported as output.
92 * @param array $options Map of parameters:
93 * - type : the job type (or false for the default types)
94 * - maxJobs : maximum number of jobs to run
95 * - maxTime : maximum time in seconds before stopping
96 * - throttle : whether to respect job backoff configuration
97 * @return array Summary response that can easily be JSON serialized
99 public function run( array $options ) {
100 global $wgJobClasses, $wgTrxProfilerLimits;
102 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
104 $type = isset( $options['type'] ) ?
$options['type'] : false;
105 $maxJobs = isset( $options['maxJobs'] ) ?
$options['maxJobs'] : false;
106 $maxTime = isset( $options['maxTime'] ) ?
$options['maxTime'] : false;
107 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
109 // Bail if job type is invalid
110 if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
111 $response['reached'] = 'none-possible';
114 // Bail out if DB is in read-only mode
115 if ( wfReadOnly() ) {
116 $response['reached'] = 'read-only';
120 $lbFactory = MediaWikiServices
::getInstance()->getDBLoadBalancerFactory();
121 // Bail out if there is too much DB lag.
122 // This check should not block as we want to try other wiki queues.
123 list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
124 if ( $maxLag >= self
::MAX_ALLOWED_LAG
) {
125 $response['reached'] = 'replica-lag-limit';
129 // Flush any pending DB writes for sanity
130 $lbFactory->commitAll( __METHOD__
);
132 // Catch huge single updates that lead to replica DB lag
133 $trxProfiler = Profiler
::instance()->getTransactionProfiler();
134 $trxProfiler->setLogger( LoggerFactory
::getInstance( 'DBPerformance' ) );
135 $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__
);
137 // Some jobs types should not run until a certain timestamp
138 $backoffs = []; // map of (type => UNIX expiry)
139 $backoffDeltas = []; // map of (type => seconds)
140 $wait = 'wait'; // block to read backoffs the first time
142 $group = JobQueueGroup
::singleton();
143 $stats = MediaWikiServices
::getInstance()->getStatsdDataFactory();
146 $startTime = microtime( true ); // time since jobs started running
147 $lastCheckTime = 1; // timestamp of last replica DB check
149 // Sync the persistent backoffs with concurrent runners
150 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
151 $blacklist = $noThrottle ?
[] : array_keys( $backoffs );
152 $wait = 'nowait'; // less important now
154 if ( $type === false ) {
156 JobQueueGroup
::TYPE_DEFAULT
,
157 JobQueueGroup
::USE_CACHE
,
160 } elseif ( in_array( $type, $blacklist ) ) {
161 $job = false; // requested queue in backoff state
163 $job = $group->pop( $type ); // job from a single queue
165 $lbFactory->commitMasterChanges( __METHOD__
); // flush any JobQueueDB writes
167 if ( $job ) { // found a job
170 $jType = $job->getType();
172 WebRequest
::overrideRequestId( $job->getRequestId() );
174 // Back off of certain jobs for a while (for throttling and for errors)
175 $ttw = $this->getBackoffTimeToWait( $job );
177 // Always add the delta for other runners in case the time running the
178 // job negated the backoff for each individually but not collectively.
179 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
180 ?
$backoffDeltas[$jType] +
$ttw
182 $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
185 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
186 if ( $info['status'] !== false ||
!$job->allowRetries() ) {
187 $group->ack( $job ); // succeeded or job cannot be retried
188 $lbFactory->commitMasterChanges( __METHOD__
); // flush any JobQueueDB writes
191 // Back off of certain jobs for a while (for throttling and for errors)
192 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
193 $ttw = max( $ttw, self
::ERROR_BACKOFF_TTL
); // too many errors
194 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
195 ?
$backoffDeltas[$jType] +
$ttw
199 $response['jobs'][] = [
201 'status' => ( $info['status'] === false ) ?
'failed' : 'ok',
202 'error' => $info['error'],
203 'time' => $info['timeMs']
205 $timeMsTotal +
= $info['timeMs'];
207 // Break out if we hit the job count or wall time limits...
208 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
209 $response['reached'] = 'job-limit';
211 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
212 $response['reached'] = 'time-limit';
216 // Don't let any of the main DB replica DBs get backed up.
217 // This only waits for so long before exiting and letting
218 // other wikis in the farm (on different masters) get a chance.
219 $timePassed = microtime( true ) - $lastCheckTime;
220 if ( $timePassed >= self
::LAG_CHECK_PERIOD ||
$timePassed < 0 ) {
222 $lbFactory->waitForReplication( [
223 'ifWritesSince' => $lastCheckTime,
224 'timeout' => self
::MAX_ALLOWED_LAG
226 } catch ( DBReplicationWaitError
$e ) {
227 $response['reached'] = 'replica-lag-limit';
230 $lastCheckTime = microtime( true );
232 // Don't let any queue replica DBs/backups fall behind
233 if ( $jobsPopped > 0 && ( $jobsPopped %
100 ) == 0 ) {
234 $group->waitForBackups();
237 // Bail if near-OOM instead of in a job
238 if ( !$this->checkMemoryOK() ) {
239 $response['reached'] = 'memory-limit';
243 } while ( $job ); // stop when there are no jobs
245 // Sync the persistent backoffs for the next runJobs.php pass
246 if ( $backoffDeltas ) {
247 $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
250 $response['backoffs'] = $backoffs;
251 $response['elapsed'] = $timeMsTotal;
258 * @param LBFactory $lbFactory
259 * @param StatsdDataFactory $stats
260 * @param float $popTime
261 * @return array Map of status/error/timeMs
263 private function executeJob( Job
$job, LBFactory
$lbFactory, $stats, $popTime ) {
264 $jType = $job->getType();
265 $msg = $job->toString() . " STARTING";
266 $this->logger
->debug( $msg );
267 $this->debugCallback( $msg );
270 $rssStart = $this->getMaxRssKb();
271 $jobStartTime = microtime( true );
273 $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
274 $lbFactory->beginMasterChanges( $fnameTrxOwner );
275 $status = $job->run();
276 $error = $job->getLastError();
277 $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
278 // Run any deferred update tasks; doUpdates() manages transactions itself
279 DeferredUpdates
::doUpdates();
280 } catch ( Exception
$e ) {
281 MWExceptionHandler
::rollbackMasterChangesAndLog( $e );
283 $error = get_class( $e ) . ': ' . $e->getMessage();
285 // Always attempt to call teardown() even if Job throws exception.
287 $job->teardown( $status );
288 } catch ( Exception
$e ) {
289 MWExceptionHandler
::logException( $e );
292 // Commit all outstanding connections that are in a transaction
293 // to get a fresh repeatable read snapshot on every connection.
294 // Note that jobs are still responsible for handling replica DB lag.
295 $lbFactory->flushReplicaSnapshots( __METHOD__
);
296 // Clear out title cache data from prior snapshots
297 MediaWikiServices
::getInstance()->getLinkCache()->clear();
298 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
299 $rssEnd = $this->getMaxRssKb();
301 // Record how long jobs wait before getting popped
302 $readyTs = $job->getReadyTimestamp();
304 $pickupDelay = max( 0, $popTime - $readyTs );
305 $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
306 $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
308 // Record root job age for jobs being run
309 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
310 if ( $rootTimestamp ) {
311 $age = max( 0, $popTime - wfTimestamp( TS_UNIX
, $rootTimestamp ) );
312 $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
314 // Track the execution time for jobs
315 $stats->timing( "jobqueue.run.$jType", $timeMs );
316 // Track RSS increases for jobs (in case of memory leaks)
317 if ( $rssStart && $rssEnd ) {
318 $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
321 if ( $status === false ) {
322 $msg = $job->toString() . " t=$timeMs error={$error}";
323 $this->logger
->error( $msg );
324 $this->debugCallback( $msg );
326 $msg = $job->toString() . " t=$timeMs good";
327 $this->logger
->info( $msg );
328 $this->debugCallback( $msg );
331 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
335 * @return int|null Max memory RSS in kilobytes
337 private function getMaxRssKb() {
338 $info = wfGetRusage() ?
: [];
339 // see https://linux.die.net/man/2/getrusage
340 return isset( $info['ru_maxrss'] ) ?
(int)$info['ru_maxrss'] : null;
345 * @return int Seconds for this runner to avoid doing more jobs of this type
346 * @see $wgJobBackoffThrottling
348 private function getBackoffTimeToWait( Job
$job ) {
349 global $wgJobBackoffThrottling;
351 if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
352 $job instanceof DuplicateJob
// no work was done
354 return 0; // not throttled
357 $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
358 if ( $itemsPerSecond <= 0 ) {
359 return 0; // not throttled
363 if ( $job->workItemCount() > 0 ) {
364 $exactSeconds = $job->workItemCount() / $itemsPerSecond;
365 // use randomized rounding
366 $seconds = floor( $exactSeconds );
367 $remainder = $exactSeconds - $seconds;
368 $seconds +
= ( mt_rand() / mt_getrandmax() < $remainder ) ?
1 : 0;
371 return (int)$seconds;
375 * Get the previous backoff expiries from persistent storage
376 * On I/O or lock acquisition failure this returns the original $backoffs.
378 * @param array $backoffs Map of (job type => UNIX timestamp)
379 * @param string $mode Lock wait mode - "wait" or "nowait"
380 * @return array Map of (job type => backoff expiry timestamp)
382 private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
383 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
384 if ( is_file( $file ) ) {
385 $noblock = ( $mode === 'nowait' ) ? LOCK_NB
: 0;
386 $handle = fopen( $file, 'rb' );
387 if ( !flock( $handle, LOCK_SH |
$noblock ) ) {
389 return $backoffs; // don't wait on lock
391 $content = stream_get_contents( $handle );
392 flock( $handle, LOCK_UN
);
394 $ctime = microtime( true );
395 $cBackoffs = json_decode( $content, true ) ?
: [];
396 foreach ( $cBackoffs as $type => $timestamp ) {
397 if ( $timestamp < $ctime ) {
398 unset( $cBackoffs[$type] );
409 * Merge the current backoff expiries from persistent storage
411 * The $deltas map is set to an empty array on success.
412 * On I/O or lock acquisition failure this returns the original $backoffs.
414 * @param array $backoffs Map of (job type => UNIX timestamp)
415 * @param array $deltas Map of (job type => seconds)
416 * @param string $mode Lock wait mode - "wait" or "nowait"
417 * @return array The new backoffs account for $backoffs and the latest file data
419 private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
421 return $this->loadBackoffs( $backoffs, $mode );
424 $noblock = ( $mode === 'nowait' ) ? LOCK_NB
: 0;
425 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
426 $handle = fopen( $file, 'wb+' );
427 if ( !flock( $handle, LOCK_EX |
$noblock ) ) {
429 return $backoffs; // don't wait on lock
431 $ctime = microtime( true );
432 $content = stream_get_contents( $handle );
433 $cBackoffs = json_decode( $content, true ) ?
: [];
434 foreach ( $deltas as $type => $seconds ) {
435 $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
436 ?
$cBackoffs[$type] +
$seconds
439 foreach ( $cBackoffs as $type => $timestamp ) {
440 if ( $timestamp < $ctime ) {
441 unset( $cBackoffs[$type] );
444 ftruncate( $handle, 0 );
445 fwrite( $handle, json_encode( $cBackoffs ) );
446 flock( $handle, LOCK_UN
);
455 * Make sure that this script is not too close to the memory usage limit.
456 * It is better to die in between jobs than OOM right in the middle of one.
459 private function checkMemoryOK() {
460 static $maxBytes = null;
461 if ( $maxBytes === null ) {
463 if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
464 list( , $num, $unit ) = $m;
465 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
466 $maxBytes = $num * $conv[strtolower( $unit )];
471 $usedBytes = memory_get_usage();
472 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
473 $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
474 $this->debugCallback( $msg );
475 $this->logger
->error( $msg );
484 * Log the job message
485 * @param string $msg The message to log
487 private function debugCallback( $msg ) {
488 if ( $this->debug
) {
489 call_user_func_array( $this->debug
, [ wfTimestamp( TS_DB
) . " $msg\n" ] );
494 * Issue a commit on all masters who are currently in a transaction and have
495 * made changes to the database. It also supports sometimes waiting for the
496 * local wiki's replica DBs to catch up. See the documentation for
497 * $wgJobSerialCommitThreshold for more.
499 * @param LBFactory $lbFactory
501 * @param string $fnameTrxOwner
504 private function commitMasterChanges( LBFactory
$lbFactory, Job
$job, $fnameTrxOwner ) {
505 global $wgJobSerialCommitThreshold;
508 $lb = $lbFactory->getMainLB( wfWikiID() );
509 if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
510 // Generally, there is one master connection to the local DB
511 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
512 // We need natively blocking fast locks
513 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
514 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY
);
515 if ( $time < $wgJobSerialCommitThreshold ) {
522 // There are no replica DBs or writes are all to foreign DB (we don't handle that)
527 $lbFactory->commitMasterChanges( $fnameTrxOwner );
531 $ms = intval( 1000 * $time );
532 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
533 $this->logger
->info( $msg );
534 $this->debugCallback( $msg );
536 // Wait for an exclusive lock to commit
537 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__
, 30 ) ) {
538 // This will trigger a rollback in the main loop
539 throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
541 $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
542 $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__
);
545 // Wait for the replica DBs to catch up
546 $pos = $lb->getMasterPos();
548 $lb->waitForAll( $pos );
551 // Actually commit the DB master changes
552 $lbFactory->commitMasterChanges( $fnameTrxOwner );
553 ScopedCallback
::consume( $unlocker );