Localisation updates from https://translatewiki.net.
[mediawiki.git] / maintenance / storage / recompressTracked.php
blob25db725c7ec7a47ea1b08a7eb60ecaf166b3df75
1 <?php
2 /**
3 * Moves blobs indexed by trackBlobs.php to a specified list of destination
4 * clusters, and recompresses them in the process.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 * http://www.gnu.org/copyleft/gpl.html
21 * @file
22 * @ingroup Maintenance ExternalStorage
25 use MediaWiki\Logger\LegacyLogger;
26 use MediaWiki\MediaWikiServices;
27 use MediaWiki\Shell\Shell;
28 use MediaWiki\Storage\SqlBlobStore;
29 use MediaWiki\Title\Title;
30 use MediaWiki\WikiMap\WikiMap;
31 use Wikimedia\AtEase\AtEase;
33 $optionsWithArgs = RecompressTracked::getOptionsWithArgs();
34 require __DIR__ . '/../CommandLineInc.php';
36 if ( count( $args ) < 1 ) {
37 echo "Usage: php recompressTracked.php [options] <cluster> [... <cluster>...]
38 Moves blobs indexed by trackBlobs.php to a specified list of destination clusters,
39 and recompresses them in the process. Restartable.
41 Options:
42 --procs <procs> Set the number of child processes (default 1)
43 --copy-only Copy only, do not update the text table. Restart
44 without this option to complete.
45 --debug-log <file> Log debugging data to the specified file
46 --info-log <file> Log progress messages to the specified file
47 --critical-log <file> Log error messages to the specified file
49 exit( 1 );
52 $job = RecompressTracked::newFromCommandLine( $args, $options );
53 $job->execute();
55 /**
56 * Maintenance script that moves blobs indexed by trackBlobs.php to a specified
57 * list of destination clusters, and recompresses them in the process.
59 * @ingroup Maintenance ExternalStorage
61 class RecompressTracked {
62 /** @var string[] */
63 public $destClusters;
64 /** @var int */
65 public $batchSize = 1000;
66 /** @var int */
67 public $orphanBatchSize = 1000;
68 /** @var int */
69 public $reportingInterval = 10;
70 /** @var int */
71 public $numProcs = 1;
72 /** @var int */
73 public $numBatches = 0;
74 /** @var string */
75 public $pageBlobClass;
76 /** @var string */
77 public $orphanBlobClass;
78 /** @var resource[] */
79 public $childPipes;
80 /** @var resource[] */
81 public $childProcs;
82 /** @var int */
83 public $prevChildId;
84 /** @var bool */
85 public $copyOnly = false;
86 /** @var bool */
87 public $isChild = false;
88 /** @var int|false */
89 public $childId = false;
90 /** @var bool */
91 public $noCount = false;
92 public ?string $debugLog = null;
93 public ?string $infoLog = null;
94 public ?string $criticalLog = null;
95 /** @var ExternalStoreDB */
96 public $store;
97 /** @var SqlBlobStore */
98 private $blobStore;
100 /** @var string[] */
101 private static $optionsWithArgs = [
102 'procs',
103 'child-id',
104 'debug-log',
105 'info-log',
106 'critical-log'
109 /** @var string[] */
110 private static $cmdLineOptionMap = [
111 'no-count' => 'noCount',
112 'procs' => 'numProcs',
113 'copy-only' => 'copyOnly',
114 'child' => 'isChild',
115 'child-id' => 'childId',
116 'debug-log' => 'debugLog',
117 'info-log' => 'infoLog',
118 'critical-log' => 'criticalLog',
121 public static function getOptionsWithArgs() {
122 return self::$optionsWithArgs;
125 public static function newFromCommandLine( $args, $options ) {
126 $jobOptions = [ 'destClusters' => $args ];
127 foreach ( self::$cmdLineOptionMap as $cmdOption => $classOption ) {
128 if ( isset( $options[$cmdOption] ) ) {
129 $jobOptions[$classOption] = $options[$cmdOption];
133 return new self( $jobOptions );
136 public function __construct( $options ) {
137 foreach ( $options as $name => $value ) {
138 $this->$name = $value;
140 $esFactory = MediaWikiServices::getInstance()->getExternalStoreFactory();
141 $this->store = $esFactory->getStore( 'DB' );
142 if ( !$this->isChild ) {
143 $GLOBALS['wgDebugLogPrefix'] = "RCT M: ";
144 } elseif ( $this->childId !== false ) {
145 $GLOBALS['wgDebugLogPrefix'] = "RCT {$this->childId}: ";
147 $this->pageBlobClass = function_exists( 'xdiff_string_bdiff' ) ?
148 DiffHistoryBlob::class : ConcatenatedGzipHistoryBlob::class;
149 $this->orphanBlobClass = ConcatenatedGzipHistoryBlob::class;
151 $this->blobStore = MediaWikiServices::getInstance()
152 ->getBlobStoreFactory()
153 ->newSqlBlobStore();
156 public function debug( $msg ) {
157 wfDebug( "$msg" );
158 if ( $this->debugLog ) {
159 $this->logToFile( $msg, $this->debugLog );
163 public function info( $msg ) {
164 echo "$msg\n";
165 if ( $this->infoLog ) {
166 $this->logToFile( $msg, $this->infoLog );
170 public function critical( $msg ) {
171 echo "$msg\n";
172 if ( $this->criticalLog ) {
173 $this->logToFile( $msg, $this->criticalLog );
177 private function logToFile( $msg, $file ) {
178 $header = '[' . date( 'd\TH:i:s' ) . '] ' . wfHostname() . ' ' . posix_getpid();
179 if ( $this->childId !== false ) {
180 $header .= "({$this->childId})";
182 $header .= ' ' . WikiMap::getCurrentWikiDbDomain()->getId();
183 LegacyLogger::emit( sprintf( "%-50s %s\n", $header, $msg ), $file );
187 * Wait until the selected replica DB has caught up to the master.
188 * This allows us to use the replica DB for things that were committed in a
189 * previous part of this batch process.
191 private function syncDBs() {
192 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication( [ 'timeout' => 100_000 ] );
196 * Execute parent or child depending on the isChild option
198 public function execute() {
199 if ( $this->isChild ) {
200 $this->executeChild();
201 } else {
202 $this->executeParent();
207 * Execute the parent process
209 public function executeParent() {
210 if ( !$this->checkTrackingTable() ) {
211 return;
214 $this->syncDBs();
215 $this->startChildProcs();
216 $this->doAllPages();
217 $this->doAllOrphans();
218 $this->killChildProcs();
222 * Make sure the tracking table exists and isn't empty
223 * @return bool
225 private function checkTrackingTable() {
226 $row = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase()->newSelectQueryBuilder()
227 ->select( '*' )
228 ->from( 'blob_tracking' )
229 ->caller( __METHOD__ )->fetchRow();
230 if ( !$row ) {
231 $this->info( "Warning: blob_tracking table contains no rows, skipping this wiki." );
233 return false;
236 return true;
240 * Start the worker processes.
241 * These processes will listen on stdin for commands.
242 * This necessary because text recompression is slow: loading, compressing and
243 * writing are all slow.
245 private function startChildProcs() {
246 $wiki = WikiMap::getCurrentWikiId();
248 $cmd = 'php ' . Shell::escape( __FILE__ );
249 foreach ( self::$cmdLineOptionMap as $cmdOption => $classOption ) {
250 if ( $cmdOption == 'child-id' ) {
251 continue;
253 if ( in_array( $cmdOption, self::$optionsWithArgs ) && isset( $this->$classOption ) ) {
254 // @phan-suppress-next-line PhanTypeMismatchArgument False positive
255 $cmd .= " --$cmdOption " . Shell::escape( $this->$classOption );
256 } elseif ( $this->$classOption ) {
257 $cmd .= " --$cmdOption";
260 $cmd .= ' --child' .
261 ' --wiki ' . Shell::escape( $wiki ) .
262 ' ' . Shell::escape( ...$this->destClusters );
264 $this->childPipes = $this->childProcs = [];
265 for ( $i = 0; $i < $this->numProcs; $i++ ) {
266 $pipes = [];
267 $spec = [
268 [ 'pipe', 'r' ],
269 [ 'file', 'php://stdout', 'w' ],
270 [ 'file', 'php://stderr', 'w' ]
272 AtEase::suppressWarnings();
273 $proc = proc_open( "$cmd --child-id $i", $spec, $pipes );
274 AtEase::restoreWarnings();
275 if ( !$proc ) {
276 $this->critical( "Error opening child process: $cmd" );
277 exit( 1 );
279 $this->childProcs[$i] = $proc;
280 $this->childPipes[$i] = $pipes[0];
282 $this->prevChildId = -1;
286 * Gracefully terminate the child processes
288 private function killChildProcs() {
289 $this->info( "Waiting for child processes to finish..." );
290 for ( $i = 0; $i < $this->numProcs; $i++ ) {
291 $this->dispatchToChild( $i, 'quit' );
293 for ( $i = 0; $i < $this->numProcs; $i++ ) {
294 $status = proc_close( $this->childProcs[$i] );
295 if ( $status ) {
296 $this->critical( "Warning: child #$i exited with status $status" );
299 $this->info( "Done." );
303 * Dispatch a command to the next available child process.
304 * This may block until a child process finishes its work and becomes available.
305 * @param array|string ...$args
307 private function dispatch( ...$args ) {
308 $pipes = $this->childPipes;
309 $x = [];
310 $y = [];
311 $numPipes = stream_select( $x, $pipes, $y, 3600 );
312 if ( !$numPipes ) {
313 $this->critical( "Error waiting to write to child process. Aborting" );
314 exit( 1 );
316 for ( $i = 0; $i < $this->numProcs; $i++ ) {
317 $childId = ( $i + $this->prevChildId + 1 ) % $this->numProcs;
318 if ( isset( $pipes[$childId] ) ) {
319 $this->prevChildId = $childId;
320 $this->dispatchToChild( $childId, $args );
322 return;
325 $this->critical( "Unreachable" );
326 exit( 1 );
330 * Dispatch a command to a specified child process
331 * @param int $childId
332 * @param array|string $args
334 private function dispatchToChild( $childId, $args ) {
335 $args = (array)$args;
336 $cmd = implode( ' ', $args );
337 fwrite( $this->childPipes[$childId], "$cmd\n" );
341 * Move all tracked pages to the new clusters
343 private function doAllPages() {
344 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
345 $i = 0;
346 $startId = 0;
347 if ( $this->noCount ) {
348 $numPages = '[unknown]';
349 } else {
350 $numPages = $dbr->newSelectQueryBuilder()
351 ->select( 'COUNT(DISTINCT bt_page)' )
352 ->from( 'blob_tracking' )
353 ->where( [ 'bt_moved' => 0 ] )
354 ->caller( __METHOD__ )->fetchField();
356 if ( $this->copyOnly ) {
357 $this->info( "Copying pages..." );
358 } else {
359 $this->info( "Moving pages..." );
361 while ( true ) {
362 $res = $dbr->newSelectQueryBuilder()
363 ->select( [ 'bt_page' ] )
364 ->distinct()
365 ->from( 'blob_tracking' )
366 ->where( [ 'bt_moved' => 0, $dbr->expr( 'bt_page', '>', $startId ) ] )
367 ->orderBy( 'bt_page' )
368 ->limit( $this->batchSize )
369 ->caller( __METHOD__ )->fetchResultSet();
370 if ( !$res->numRows() ) {
371 break;
373 foreach ( $res as $row ) {
374 $startId = $row->bt_page;
375 $this->dispatch( 'doPage', $row->bt_page );
376 $i++;
378 $this->report( 'pages', $i, $numPages );
380 $this->report( 'pages', $i, $numPages );
381 if ( $this->copyOnly ) {
382 $this->info( "All page copies queued." );
383 } else {
384 $this->info( "All page moves queued." );
389 * Display a progress report
390 * @param string $label
391 * @param int $current
392 * @param int $end
394 private function report( $label, $current, $end ) {
395 $this->numBatches++;
396 if ( $current == $end || $this->numBatches >= $this->reportingInterval ) {
397 $this->numBatches = 0;
398 $this->info( "$label: $current / $end" );
399 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
404 * Move all orphan text to the new clusters
406 private function doAllOrphans() {
407 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
408 $startId = 0;
409 $i = 0;
410 if ( $this->noCount ) {
411 $numOrphans = '[unknown]';
412 } else {
413 $numOrphans = $dbr->newSelectQueryBuilder()
414 ->select( 'COUNT(DISTINCT bt_text_id)' )
415 ->from( 'blob_tracking' )
416 ->where( [ 'bt_moved' => 0, 'bt_page' => 0 ] )
417 ->caller( __METHOD__ )->fetchField();
418 if ( !$numOrphans ) {
419 return;
422 if ( $this->copyOnly ) {
423 $this->info( "Copying orphans..." );
424 } else {
425 $this->info( "Moving orphans..." );
428 while ( true ) {
429 $res = $dbr->newSelectQueryBuilder()
430 ->select( [ 'bt_text_id' ] )
431 ->distinct()
432 ->from( 'blob_tracking' )
433 ->where( [ 'bt_moved' => 0, 'bt_page' => 0, $dbr->expr( 'bt_text_id', '>', $startId ) ] )
434 ->orderBy( 'bt_text_id' )
435 ->limit( $this->batchSize )
436 ->caller( __METHOD__ )->fetchResultSet();
437 if ( !$res->numRows() ) {
438 break;
440 $ids = [];
441 foreach ( $res as $row ) {
442 $startId = $row->bt_text_id;
443 $ids[] = $row->bt_text_id;
444 $i++;
446 // Need to send enough orphan IDs to the child at a time to fill a blob,
447 // so orphanBatchSize needs to be at least ~100.
448 // batchSize can be smaller or larger.
449 while ( count( $ids ) > $this->orphanBatchSize ) {
450 $args = array_slice( $ids, 0, $this->orphanBatchSize );
451 $ids = array_slice( $ids, $this->orphanBatchSize );
452 array_unshift( $args, 'doOrphanList' );
453 $this->dispatch( ...$args );
455 if ( count( $ids ) ) {
456 $args = $ids;
457 array_unshift( $args, 'doOrphanList' );
458 $this->dispatch( ...$args );
461 $this->report( 'orphans', $i, $numOrphans );
463 $this->report( 'orphans', $i, $numOrphans );
464 $this->info( "All orphans queued." );
468 * Main entry point for worker processes
470 public function executeChild() {
471 $this->debug( 'starting' );
472 $this->syncDBs();
474 while ( !feof( STDIN ) ) {
475 $line = rtrim( fgets( STDIN ) );
476 if ( $line == '' ) {
477 continue;
479 $this->debug( $line );
480 $args = explode( ' ', $line );
481 $cmd = array_shift( $args );
482 switch ( $cmd ) {
483 case 'doPage':
484 $this->doPage( intval( $args[0] ) );
485 break;
486 case 'doOrphanList':
487 $this->doOrphanList( array_map( 'intval', $args ) );
488 break;
489 case 'quit':
490 return;
492 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
497 * Move tracked text in a given page
499 * @param int $pageId
501 private function doPage( $pageId ) {
502 $title = Title::newFromID( $pageId );
503 if ( $title ) {
504 $titleText = $title->getPrefixedText();
505 } else {
506 $titleText = '[deleted]';
508 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
510 // Finish any incomplete transactions
511 if ( !$this->copyOnly ) {
512 $this->finishIncompleteMoves( [ 'bt_page' => $pageId ] );
513 $this->syncDBs();
516 $startId = 0;
517 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass );
519 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
520 while ( true ) {
521 $res = $dbr->newSelectQueryBuilder()
522 ->select( '*' )
523 ->from( 'blob_tracking' )
524 ->join( 'text', null, 'bt_text_id=old_id' )
525 ->where( [
526 'bt_page' => $pageId,
527 $dbr->expr( 'bt_text_id', '>', $startId ),
528 'bt_moved' => 0,
529 'bt_new_url' => null,
531 ->orderBy( 'bt_text_id' )
532 ->limit( $this->batchSize )
533 ->caller( __METHOD__ )->fetchResultSet();
534 if ( !$res->numRows() ) {
535 break;
538 $lastTextId = 0;
539 foreach ( $res as $row ) {
540 $startId = $row->bt_text_id;
541 if ( $lastTextId == $row->bt_text_id ) {
542 // Duplicate (null edit)
543 continue;
545 $lastTextId = $row->bt_text_id;
546 // Load the text
547 $text = $this->blobStore->expandBlob( $row->old_text, $row->old_flags );
548 if ( $text === false ) {
549 $this->critical( "Error loading {$row->bt_rev_id}/{$row->bt_text_id}" );
550 continue;
553 // Queue it
554 if ( !$trx->addItem( $text, $row->bt_text_id ) ) {
555 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
556 $trx->commit();
557 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass );
558 $lbFactory->waitForReplication();
563 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
564 $trx->commit();
568 * Atomic move operation.
570 * Write the new URL to the text table and set the bt_moved flag.
572 * This is done in a single transaction to provide restartable behavior
573 * without data loss.
575 * The transaction is kept short to reduce locking.
577 * @param int $textId
578 * @param string $url
580 public function moveTextRow( $textId, $url ) {
581 if ( $this->copyOnly ) {
582 $this->critical( "Internal error: can't call moveTextRow() in --copy-only mode" );
583 exit( 1 );
585 $dbw = MediaWikiServices::getInstance()->getConnectionProvider()->getPrimaryDatabase();
587 $dbw->begin( __METHOD__ );
588 $dbw->newUpdateQueryBuilder()
589 ->update( 'text' )
590 ->set( [
591 'old_text' => $url,
592 'old_flags' => 'external,utf-8',
594 ->where( [
595 'old_id' => $textId
597 ->caller( __METHOD__ )
598 ->execute();
599 $dbw->newUpdateQueryBuilder()
600 ->update( 'blob_tracking' )
601 ->set( [ 'bt_moved' => 1 ] )
602 ->where( [ 'bt_text_id' => $textId ] )
603 ->caller( __METHOD__ )
604 ->execute();
605 $dbw->commit( __METHOD__ );
609 * Moves are done in two phases: bt_new_url and then bt_moved.
610 * - bt_new_url indicates that the text has been copied to the new cluster.
611 * - bt_moved indicates that the text table has been updated.
613 * This function completes any moves that only have done bt_new_url. This
614 * can happen when the script is interrupted, or when --copy-only is used.
616 * @param array $conds
618 private function finishIncompleteMoves( $conds ) {
619 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
620 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
622 $startId = 0;
623 $conds = array_merge( $conds, [
624 'bt_moved' => 0,
625 $dbr->expr( 'bt_new_url', '!=', null ),
626 ] );
627 while ( true ) {
628 $res = $dbr->newSelectQueryBuilder()
629 ->select( '*' )
630 ->from( 'blob_tracking' )
631 ->where( $conds )
632 ->andWhere( $dbr->expr( 'bt_text_id', '>', $startId ) )
633 ->orderBy( 'bt_text_id' )
634 ->limit( $this->batchSize )
635 ->caller( __METHOD__ )->fetchResultSet();
636 if ( !$res->numRows() ) {
637 break;
639 $this->debug( 'Incomplete: ' . $res->numRows() . ' rows' );
640 foreach ( $res as $row ) {
641 $startId = $row->bt_text_id;
642 $this->moveTextRow( $row->bt_text_id, $row->bt_new_url );
643 if ( $row->bt_text_id % 10 == 0 ) {
644 $lbFactory->waitForReplication();
651 * Returns the name of the next target cluster
652 * @return string
654 public function getTargetCluster() {
655 $cluster = next( $this->destClusters );
656 if ( $cluster === false ) {
657 $cluster = reset( $this->destClusters );
660 return $cluster;
664 * Move an orphan text_id to the new cluster
666 * @param array $textIds
668 private function doOrphanList( $textIds ) {
669 // Finish incomplete moves
670 if ( !$this->copyOnly ) {
671 $this->finishIncompleteMoves( [ 'bt_text_id' => $textIds ] );
672 $this->syncDBs();
675 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass );
677 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
678 $res = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase()->newSelectQueryBuilder()
679 ->select( [ 'old_id', 'old_text', 'old_flags' ] )
680 ->distinct()
681 ->from( 'text' )
682 ->join( 'blob_tracking', null, 'bt_text_id=old_id' )
683 ->where( [ 'old_id' => $textIds, 'bt_moved' => 0 ] )
684 ->caller( __METHOD__ )->fetchResultSet();
686 foreach ( $res as $row ) {
687 $text = $this->blobStore->expandBlob( $row->old_text, $row->old_flags );
688 if ( $text === false ) {
689 $this->critical( "Error: cannot load revision text for old_id={$row->old_id}" );
690 continue;
693 if ( !$trx->addItem( $text, $row->old_id ) ) {
694 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
695 $trx->commit();
696 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass );
697 $lbFactory->waitForReplication();
700 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
701 $trx->commit();
706 * Class to represent a recompression operation for a single CGZ blob
708 class CgzCopyTransaction {
709 /** @var RecompressTracked */
710 public $parent;
711 /** @var string */
712 public $blobClass;
713 /** @var ConcatenatedGzipHistoryBlob|false */
714 public $cgz;
715 /** @var string[] */
716 public $referrers;
717 /** @var array */
718 private $texts;
721 * Create a transaction from a RecompressTracked object
722 * @param RecompressTracked $parent
723 * @param string $blobClass
725 public function __construct( $parent, $blobClass ) {
726 $this->blobClass = $blobClass;
727 $this->cgz = false;
728 $this->texts = [];
729 $this->parent = $parent;
733 * Add text.
734 * Returns false if it's ready to commit.
735 * @param string $text
736 * @param int $textId
737 * @return bool
739 public function addItem( $text, $textId ) {
740 if ( !$this->cgz ) {
741 $class = $this->blobClass;
742 $this->cgz = new $class;
744 $hash = $this->cgz->addItem( $text );
745 $this->referrers[$textId] = $hash;
746 $this->texts[$textId] = $text;
748 return $this->cgz->isHappy();
751 public function getSize() {
752 return count( $this->texts );
756 * Recompress text after some aberrant modification
758 public function recompress() {
759 $class = $this->blobClass;
760 $this->cgz = new $class;
761 $this->referrers = [];
762 foreach ( $this->texts as $textId => $text ) {
763 $hash = $this->cgz->addItem( $text );
764 $this->referrers[$textId] = $hash;
769 * Commit the blob.
770 * Does nothing if no text items have been added.
771 * May skip the move if --copy-only is set.
773 public function commit() {
774 $originalCount = count( $this->texts );
775 if ( !$originalCount ) {
776 return;
779 /* Check to see if the target text_ids have been moved already.
781 * We originally read from the replica DB, so this can happen when a single
782 * text_id is shared between multiple pages. It's rare, but possible
783 * if a delete/move/undelete cycle splits up a null edit.
785 * We do a locking read to prevent closer-run race conditions.
787 $dbw = MediaWikiServices::getInstance()->getConnectionProvider()->getPrimaryDatabase();
788 $dbw->begin( __METHOD__ );
789 $res = $dbw->newSelectQueryBuilder()
790 ->select( [ 'bt_text_id', 'bt_moved' ] )
791 ->forUpdate()
792 ->from( 'blob_tracking' )
793 ->where( [ 'bt_text_id' => array_keys( $this->referrers ) ] )
794 ->caller( __METHOD__ )->fetchResultSet();
795 $dirty = false;
796 foreach ( $res as $row ) {
797 if ( $row->bt_moved ) {
798 # This row has already been moved, remove it
799 $this->parent->debug( "TRX: conflict detected in old_id={$row->bt_text_id}" );
800 unset( $this->texts[$row->bt_text_id] );
801 $dirty = true;
805 // Recompress the blob if necessary
806 if ( $dirty ) {
807 if ( !count( $this->texts ) ) {
808 // All have been moved already
809 if ( $originalCount > 1 ) {
810 // This is suspcious, make noise
811 $this->parent->critical(
812 "Warning: concurrent operation detected, are there two conflicting " .
813 "processes running, doing the same job?" );
816 return;
818 $this->recompress();
821 // Insert the data into the destination cluster
822 $targetCluster = $this->parent->getTargetCluster();
823 $store = $this->parent->store;
824 $targetDB = $store->getPrimary( $targetCluster );
825 $targetDB->begin( __METHOD__ );
826 $baseUrl = $this->parent->store->store( $targetCluster, serialize( $this->cgz ) );
828 // Write the new URLs to the blob_tracking table
829 foreach ( $this->referrers as $textId => $hash ) {
830 $url = $baseUrl . '/' . $hash;
831 $dbw->newUpdateQueryBuilder()
832 ->update( 'blob_tracking' )
833 ->set( [ 'bt_new_url' => $url ] )
834 ->where( [
835 'bt_text_id' => $textId,
836 'bt_moved' => 0, # Check for concurrent conflicting update
838 ->caller( __METHOD__ )
839 ->execute();
842 $targetDB->commit( __METHOD__ );
843 // Critical section here: interruption at this point causes blob duplication
844 // Reversing the order of the commits would cause data loss instead
845 $dbw->commit( __METHOD__ );
847 // Write the new URLs to the text table and set the moved flag
848 if ( !$this->parent->copyOnly ) {
849 foreach ( $this->referrers as $textId => $hash ) {
850 $url = $baseUrl . '/' . $hash;
851 $this->parent->moveTextRow( $textId, $url );