Merge "rest: Return a 400 for invalid render IDs"
[mediawiki.git] / maintenance / storage / recompressTracked.php
blob691cc43426c778cd3fd4dbdbfa22de733f5c6858
1 <?php
2 /**
3 * Moves blobs indexed by trackBlobs.php to a specified list of destination
4 * clusters, and recompresses them in the process.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 * http://www.gnu.org/copyleft/gpl.html
21 * @file
22 * @ingroup Maintenance ExternalStorage
25 use MediaWiki\Logger\LegacyLogger;
26 use MediaWiki\MediaWikiServices;
27 use MediaWiki\Shell\Shell;
28 use MediaWiki\Storage\SqlBlobStore;
29 use MediaWiki\Title\Title;
30 use MediaWiki\WikiMap\WikiMap;
31 use Wikimedia\AtEase\AtEase;
33 $optionsWithArgs = RecompressTracked::getOptionsWithArgs();
34 require __DIR__ . '/../CommandLineInc.php';
36 if ( count( $args ) < 1 ) {
37 echo "Usage: php recompressTracked.php [options] <cluster> [... <cluster>...]
38 Moves blobs indexed by trackBlobs.php to a specified list of destination clusters,
39 and recompresses them in the process. Restartable.
41 Options:
42 --procs <procs> Set the number of child processes (default 1)
43 --copy-only Copy only, do not update the text table. Restart
44 without this option to complete.
45 --debug-log <file> Log debugging data to the specified file
46 --info-log <file> Log progress messages to the specified file
47 --critical-log <file> Log error messages to the specified file
49 exit( 1 );
52 $job = RecompressTracked::newFromCommandLine( $args, $options );
53 $job->execute();
55 /**
56 * Maintenance script that moves blobs indexed by trackBlobs.php to a specified
57 * list of destination clusters, and recompresses them in the process.
59 * @ingroup Maintenance ExternalStorage
61 class RecompressTracked {
62 /** @var string[] */
63 public $destClusters;
64 /** @var int */
65 public $batchSize = 1000;
66 /** @var int */
67 public $orphanBatchSize = 1000;
68 /** @var int */
69 public $reportingInterval = 10;
70 /** @var int */
71 public $numProcs = 1;
72 /** @var int */
73 public $numBatches = 0;
74 /** @var string */
75 public $pageBlobClass;
76 /** @var string */
77 public $orphanBlobClass;
78 /** @var resource[] */
79 public $childPipes;
80 /** @var resource[] */
81 public $childProcs;
82 /** @var int */
83 public $prevChildId;
84 /** @var bool */
85 public $copyOnly = false;
86 /** @var bool */
87 public $isChild = false;
88 /** @var int|false */
89 public $childId = false;
90 /** @var bool */
91 public $noCount = false;
92 public ?string $debugLog = null;
93 public ?string $infoLog = null;
94 public ?string $criticalLog = null;
95 /** @var ExternalStoreDB */
96 public $store;
97 /** @var SqlBlobStore */
98 private $blobStore;
100 /** @var string[] */
101 private static $optionsWithArgs = [
102 'procs',
103 'child-id',
104 'debug-log',
105 'info-log',
106 'critical-log'
109 /** @var string[] */
110 private static $cmdLineOptionMap = [
111 'no-count' => 'noCount',
112 'procs' => 'numProcs',
113 'copy-only' => 'copyOnly',
114 'child' => 'isChild',
115 'child-id' => 'childId',
116 'debug-log' => 'debugLog',
117 'info-log' => 'infoLog',
118 'critical-log' => 'criticalLog',
121 public static function getOptionsWithArgs() {
122 return self::$optionsWithArgs;
125 public static function newFromCommandLine( $args, $options ) {
126 $jobOptions = [ 'destClusters' => $args ];
127 foreach ( self::$cmdLineOptionMap as $cmdOption => $classOption ) {
128 if ( isset( $options[$cmdOption] ) ) {
129 $jobOptions[$classOption] = $options[$cmdOption];
133 return new self( $jobOptions );
136 public function __construct( $options ) {
137 foreach ( $options as $name => $value ) {
138 $this->$name = $value;
140 $esFactory = MediaWikiServices::getInstance()->getExternalStoreFactory();
141 $this->store = $esFactory->getStore( 'DB' );
142 if ( !$this->isChild ) {
143 $GLOBALS['wgDebugLogPrefix'] = "RCT M: ";
144 } elseif ( $this->childId !== false ) {
145 $GLOBALS['wgDebugLogPrefix'] = "RCT {$this->childId}: ";
147 $this->pageBlobClass = function_exists( 'xdiff_string_bdiff' ) ?
148 DiffHistoryBlob::class : ConcatenatedGzipHistoryBlob::class;
149 $this->orphanBlobClass = ConcatenatedGzipHistoryBlob::class;
151 $this->blobStore = MediaWikiServices::getInstance()
152 ->getBlobStoreFactory()
153 ->newSqlBlobStore();
156 public function debug( $msg ) {
157 wfDebug( "$msg" );
158 if ( $this->debugLog ) {
159 $this->logToFile( $msg, $this->debugLog );
163 public function info( $msg ) {
164 echo "$msg\n";
165 if ( $this->infoLog ) {
166 $this->logToFile( $msg, $this->infoLog );
170 public function critical( $msg ) {
171 echo "$msg\n";
172 if ( $this->criticalLog ) {
173 $this->logToFile( $msg, $this->criticalLog );
177 private function logToFile( $msg, $file ) {
178 $header = '[' . date( 'd\TH:i:s' ) . '] ' . wfHostname() . ' ' . posix_getpid();
179 if ( $this->childId !== false ) {
180 $header .= "({$this->childId})";
182 $header .= ' ' . WikiMap::getCurrentWikiDbDomain()->getId();
183 LegacyLogger::emit( sprintf( "%-50s %s\n", $header, $msg ), $file );
187 * Wait until the selected replica DB has caught up to the master.
188 * This allows us to use the replica DB for things that were committed in a
189 * previous part of this batch process.
191 private function syncDBs() {
192 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication( [ 'timeout' => 100_000 ] );
196 * Execute parent or child depending on the isChild option
198 public function execute() {
199 if ( $this->isChild ) {
200 $this->executeChild();
201 } else {
202 $this->executeParent();
207 * Execute the parent process
209 public function executeParent() {
210 if ( !$this->checkTrackingTable() ) {
211 return;
214 $this->syncDBs();
215 $this->startChildProcs();
216 $this->doAllPages();
217 $this->doAllOrphans();
218 $this->killChildProcs();
222 * Make sure the tracking table exists and isn't empty
223 * @return bool
225 private function checkTrackingTable() {
226 $row = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase()->newSelectQueryBuilder()
227 ->select( '*' )
228 ->from( 'blob_tracking' )
229 ->caller( __METHOD__ )->fetchRow();
230 if ( !$row ) {
231 $this->info( "Warning: blob_tracking table contains no rows, skipping this wiki." );
233 return false;
236 return true;
240 * Start the worker processes.
241 * These processes will listen on stdin for commands.
242 * This necessary because text recompression is slow: loading, compressing and
243 * writing are all slow.
245 private function startChildProcs() {
246 $wiki = WikiMap::getCurrentWikiId();
248 $cmd = 'php ' . Shell::escape( __FILE__ );
249 foreach ( self::$cmdLineOptionMap as $cmdOption => $classOption ) {
250 if ( $cmdOption == 'child-id' ) {
251 continue;
253 // @phan-suppress-next-line MediaWikiNoIssetIfDefined
254 if ( in_array( $cmdOption, self::$optionsWithArgs ) && isset( $this->$classOption ) ) {
255 // @phan-suppress-next-line PhanTypeMismatchArgument False positive
256 $cmd .= " --$cmdOption " . Shell::escape( $this->$classOption );
257 } elseif ( $this->$classOption ) {
258 $cmd .= " --$cmdOption";
261 $cmd .= ' --child' .
262 ' --wiki ' . Shell::escape( $wiki ) .
263 ' ' . Shell::escape( ...$this->destClusters );
265 $this->childPipes = $this->childProcs = [];
266 for ( $i = 0; $i < $this->numProcs; $i++ ) {
267 $pipes = [];
268 $spec = [
269 [ 'pipe', 'r' ],
270 [ 'file', 'php://stdout', 'w' ],
271 [ 'file', 'php://stderr', 'w' ]
273 AtEase::suppressWarnings();
274 $proc = proc_open( "$cmd --child-id $i", $spec, $pipes );
275 AtEase::restoreWarnings();
276 if ( !$proc ) {
277 $this->critical( "Error opening child process: $cmd" );
278 exit( 1 );
280 $this->childProcs[$i] = $proc;
281 $this->childPipes[$i] = $pipes[0];
283 $this->prevChildId = -1;
287 * Gracefully terminate the child processes
289 private function killChildProcs() {
290 $this->info( "Waiting for child processes to finish..." );
291 for ( $i = 0; $i < $this->numProcs; $i++ ) {
292 $this->dispatchToChild( $i, 'quit' );
294 for ( $i = 0; $i < $this->numProcs; $i++ ) {
295 $status = proc_close( $this->childProcs[$i] );
296 if ( $status ) {
297 $this->critical( "Warning: child #$i exited with status $status" );
300 $this->info( "Done." );
304 * Dispatch a command to the next available child process.
305 * This may block until a child process finishes its work and becomes available.
306 * @param array|string ...$args
308 private function dispatch( ...$args ) {
309 $pipes = $this->childPipes;
310 $x = [];
311 $y = [];
312 $numPipes = stream_select( $x, $pipes, $y, 3600 );
313 if ( !$numPipes ) {
314 $this->critical( "Error waiting to write to child process. Aborting" );
315 exit( 1 );
317 for ( $i = 0; $i < $this->numProcs; $i++ ) {
318 $childId = ( $i + $this->prevChildId + 1 ) % $this->numProcs;
319 if ( isset( $pipes[$childId] ) ) {
320 $this->prevChildId = $childId;
321 $this->dispatchToChild( $childId, $args );
323 return;
326 $this->critical( "Unreachable" );
327 exit( 1 );
331 * Dispatch a command to a specified child process
332 * @param int $childId
333 * @param array|string $args
335 private function dispatchToChild( $childId, $args ) {
336 $args = (array)$args;
337 $cmd = implode( ' ', $args );
338 fwrite( $this->childPipes[$childId], "$cmd\n" );
342 * Move all tracked pages to the new clusters
344 private function doAllPages() {
345 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
346 $i = 0;
347 $startId = 0;
348 if ( $this->noCount ) {
349 $numPages = '[unknown]';
350 } else {
351 $numPages = $dbr->newSelectQueryBuilder()
352 ->select( 'COUNT(DISTINCT bt_page)' )
353 ->from( 'blob_tracking' )
354 ->where( [ 'bt_moved' => 0 ] )
355 ->caller( __METHOD__ )->fetchField();
357 if ( $this->copyOnly ) {
358 $this->info( "Copying pages..." );
359 } else {
360 $this->info( "Moving pages..." );
362 while ( true ) {
363 $res = $dbr->newSelectQueryBuilder()
364 ->select( [ 'bt_page' ] )
365 ->distinct()
366 ->from( 'blob_tracking' )
367 ->where( [ 'bt_moved' => 0, $dbr->expr( 'bt_page', '>', $startId ) ] )
368 ->orderBy( 'bt_page' )
369 ->limit( $this->batchSize )
370 ->caller( __METHOD__ )->fetchResultSet();
371 if ( !$res->numRows() ) {
372 break;
374 foreach ( $res as $row ) {
375 $startId = $row->bt_page;
376 $this->dispatch( 'doPage', $row->bt_page );
377 $i++;
379 $this->report( 'pages', $i, $numPages );
381 $this->report( 'pages', $i, $numPages );
382 if ( $this->copyOnly ) {
383 $this->info( "All page copies queued." );
384 } else {
385 $this->info( "All page moves queued." );
390 * Display a progress report
391 * @param string $label
392 * @param int $current
393 * @param int $end
395 private function report( $label, $current, $end ) {
396 $this->numBatches++;
397 if ( $current == $end || $this->numBatches >= $this->reportingInterval ) {
398 $this->numBatches = 0;
399 $this->info( "$label: $current / $end" );
400 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
405 * Move all orphan text to the new clusters
407 private function doAllOrphans() {
408 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
409 $startId = 0;
410 $i = 0;
411 if ( $this->noCount ) {
412 $numOrphans = '[unknown]';
413 } else {
414 $numOrphans = $dbr->newSelectQueryBuilder()
415 ->select( 'COUNT(DISTINCT bt_text_id)' )
416 ->from( 'blob_tracking' )
417 ->where( [ 'bt_moved' => 0, 'bt_page' => 0 ] )
418 ->caller( __METHOD__ )->fetchField();
419 if ( !$numOrphans ) {
420 return;
423 if ( $this->copyOnly ) {
424 $this->info( "Copying orphans..." );
425 } else {
426 $this->info( "Moving orphans..." );
429 while ( true ) {
430 $res = $dbr->newSelectQueryBuilder()
431 ->select( [ 'bt_text_id' ] )
432 ->distinct()
433 ->from( 'blob_tracking' )
434 ->where( [ 'bt_moved' => 0, 'bt_page' => 0, $dbr->expr( 'bt_text_id', '>', $startId ) ] )
435 ->orderBy( 'bt_text_id' )
436 ->limit( $this->batchSize )
437 ->caller( __METHOD__ )->fetchResultSet();
438 if ( !$res->numRows() ) {
439 break;
441 $ids = [];
442 foreach ( $res as $row ) {
443 $startId = $row->bt_text_id;
444 $ids[] = $row->bt_text_id;
445 $i++;
447 // Need to send enough orphan IDs to the child at a time to fill a blob,
448 // so orphanBatchSize needs to be at least ~100.
449 // batchSize can be smaller or larger.
450 while ( count( $ids ) > $this->orphanBatchSize ) {
451 $args = array_slice( $ids, 0, $this->orphanBatchSize );
452 $ids = array_slice( $ids, $this->orphanBatchSize );
453 array_unshift( $args, 'doOrphanList' );
454 $this->dispatch( ...$args );
456 if ( count( $ids ) ) {
457 $args = $ids;
458 array_unshift( $args, 'doOrphanList' );
459 $this->dispatch( ...$args );
462 $this->report( 'orphans', $i, $numOrphans );
464 $this->report( 'orphans', $i, $numOrphans );
465 $this->info( "All orphans queued." );
469 * Main entry point for worker processes
471 public function executeChild() {
472 $this->debug( 'starting' );
473 $this->syncDBs();
475 while ( !feof( STDIN ) ) {
476 $line = rtrim( fgets( STDIN ) );
477 if ( $line == '' ) {
478 continue;
480 $this->debug( $line );
481 $args = explode( ' ', $line );
482 $cmd = array_shift( $args );
483 switch ( $cmd ) {
484 case 'doPage':
485 $this->doPage( intval( $args[0] ) );
486 break;
487 case 'doOrphanList':
488 $this->doOrphanList( array_map( 'intval', $args ) );
489 break;
490 case 'quit':
491 return;
493 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
498 * Move tracked text in a given page
500 * @param int $pageId
502 private function doPage( $pageId ) {
503 $title = Title::newFromID( $pageId );
504 if ( $title ) {
505 $titleText = $title->getPrefixedText();
506 } else {
507 $titleText = '[deleted]';
509 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
511 // Finish any incomplete transactions
512 if ( !$this->copyOnly ) {
513 $this->finishIncompleteMoves( [ 'bt_page' => $pageId ] );
514 $this->syncDBs();
517 $startId = 0;
518 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass );
520 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
521 while ( true ) {
522 $res = $dbr->newSelectQueryBuilder()
523 ->select( '*' )
524 ->from( 'blob_tracking' )
525 ->join( 'text', null, 'bt_text_id=old_id' )
526 ->where( [
527 'bt_page' => $pageId,
528 $dbr->expr( 'bt_text_id', '>', $startId ),
529 'bt_moved' => 0,
530 'bt_new_url' => null,
532 ->orderBy( 'bt_text_id' )
533 ->limit( $this->batchSize )
534 ->caller( __METHOD__ )->fetchResultSet();
535 if ( !$res->numRows() ) {
536 break;
539 $lastTextId = 0;
540 foreach ( $res as $row ) {
541 $startId = $row->bt_text_id;
542 if ( $lastTextId == $row->bt_text_id ) {
543 // Duplicate (null edit)
544 continue;
546 $lastTextId = $row->bt_text_id;
547 // Load the text
548 $text = $this->blobStore->expandBlob( $row->old_text, $row->old_flags );
549 if ( $text === false ) {
550 $this->critical( "Error loading {$row->bt_rev_id}/{$row->bt_text_id}" );
551 continue;
554 // Queue it
555 if ( !$trx->addItem( $text, $row->bt_text_id ) ) {
556 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
557 $trx->commit();
558 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass );
559 $lbFactory->waitForReplication();
564 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
565 $trx->commit();
569 * Atomic move operation.
571 * Write the new URL to the text table and set the bt_moved flag.
573 * This is done in a single transaction to provide restartable behavior
574 * without data loss.
576 * The transaction is kept short to reduce locking.
578 * @param int $textId
579 * @param string $url
581 public function moveTextRow( $textId, $url ) {
582 if ( $this->copyOnly ) {
583 $this->critical( "Internal error: can't call moveTextRow() in --copy-only mode" );
584 exit( 1 );
586 $dbw = MediaWikiServices::getInstance()->getConnectionProvider()->getPrimaryDatabase();
588 $dbw->begin( __METHOD__ );
589 $dbw->newUpdateQueryBuilder()
590 ->update( 'text' )
591 ->set( [
592 'old_text' => $url,
593 'old_flags' => 'external,utf-8',
595 ->where( [
596 'old_id' => $textId
598 ->caller( __METHOD__ )
599 ->execute();
600 $dbw->newUpdateQueryBuilder()
601 ->update( 'blob_tracking' )
602 ->set( [ 'bt_moved' => 1 ] )
603 ->where( [ 'bt_text_id' => $textId ] )
604 ->caller( __METHOD__ )
605 ->execute();
606 $dbw->commit( __METHOD__ );
610 * Moves are done in two phases: bt_new_url and then bt_moved.
611 * - bt_new_url indicates that the text has been copied to the new cluster.
612 * - bt_moved indicates that the text table has been updated.
614 * This function completes any moves that only have done bt_new_url. This
615 * can happen when the script is interrupted, or when --copy-only is used.
617 * @param array $conds
619 private function finishIncompleteMoves( $conds ) {
620 $dbr = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase();
621 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
623 $startId = 0;
624 $conds = array_merge( $conds, [
625 'bt_moved' => 0,
626 $dbr->expr( 'bt_new_url', '!=', null ),
627 ] );
628 while ( true ) {
629 $res = $dbr->newSelectQueryBuilder()
630 ->select( '*' )
631 ->from( 'blob_tracking' )
632 ->where( $conds )
633 ->andWhere( $dbr->expr( 'bt_text_id', '>', $startId ) )
634 ->orderBy( 'bt_text_id' )
635 ->limit( $this->batchSize )
636 ->caller( __METHOD__ )->fetchResultSet();
637 if ( !$res->numRows() ) {
638 break;
640 $this->debug( 'Incomplete: ' . $res->numRows() . ' rows' );
641 foreach ( $res as $row ) {
642 $startId = $row->bt_text_id;
643 $this->moveTextRow( $row->bt_text_id, $row->bt_new_url );
644 if ( $row->bt_text_id % 10 == 0 ) {
645 $lbFactory->waitForReplication();
652 * Returns the name of the next target cluster
653 * @return string
655 public function getTargetCluster() {
656 $cluster = next( $this->destClusters );
657 if ( $cluster === false ) {
658 $cluster = reset( $this->destClusters );
661 return $cluster;
665 * Move an orphan text_id to the new cluster
667 * @param array $textIds
669 private function doOrphanList( $textIds ) {
670 // Finish incomplete moves
671 if ( !$this->copyOnly ) {
672 $this->finishIncompleteMoves( [ 'bt_text_id' => $textIds ] );
673 $this->syncDBs();
676 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass );
678 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
679 $res = MediaWikiServices::getInstance()->getConnectionProvider()->getReplicaDatabase()->newSelectQueryBuilder()
680 ->select( [ 'old_id', 'old_text', 'old_flags' ] )
681 ->distinct()
682 ->from( 'text' )
683 ->join( 'blob_tracking', null, 'bt_text_id=old_id' )
684 ->where( [ 'old_id' => $textIds, 'bt_moved' => 0 ] )
685 ->caller( __METHOD__ )->fetchResultSet();
687 foreach ( $res as $row ) {
688 $text = $this->blobStore->expandBlob( $row->old_text, $row->old_flags );
689 if ( $text === false ) {
690 $this->critical( "Error: cannot load revision text for old_id={$row->old_id}" );
691 continue;
694 if ( !$trx->addItem( $text, $row->old_id ) ) {
695 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
696 $trx->commit();
697 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass );
698 $lbFactory->waitForReplication();
701 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
702 $trx->commit();
707 * Class to represent a recompression operation for a single CGZ blob
709 class CgzCopyTransaction {
710 /** @var RecompressTracked */
711 public $parent;
712 /** @var string */
713 public $blobClass;
714 /** @var ConcatenatedGzipHistoryBlob|false */
715 public $cgz;
716 /** @var string[] */
717 public $referrers;
718 /** @var array */
719 private $texts;
722 * Create a transaction from a RecompressTracked object
723 * @param RecompressTracked $parent
724 * @param string $blobClass
726 public function __construct( $parent, $blobClass ) {
727 $this->blobClass = $blobClass;
728 $this->cgz = false;
729 $this->texts = [];
730 $this->parent = $parent;
734 * Add text.
735 * Returns false if it's ready to commit.
736 * @param string $text
737 * @param int $textId
738 * @return bool
740 public function addItem( $text, $textId ) {
741 if ( !$this->cgz ) {
742 $class = $this->blobClass;
743 $this->cgz = new $class;
745 $hash = $this->cgz->addItem( $text );
746 $this->referrers[$textId] = $hash;
747 $this->texts[$textId] = $text;
749 return $this->cgz->isHappy();
752 public function getSize() {
753 return count( $this->texts );
757 * Recompress text after some aberrant modification
759 public function recompress() {
760 $class = $this->blobClass;
761 $this->cgz = new $class;
762 $this->referrers = [];
763 foreach ( $this->texts as $textId => $text ) {
764 $hash = $this->cgz->addItem( $text );
765 $this->referrers[$textId] = $hash;
770 * Commit the blob.
771 * Does nothing if no text items have been added.
772 * May skip the move if --copy-only is set.
774 public function commit() {
775 $originalCount = count( $this->texts );
776 if ( !$originalCount ) {
777 return;
780 /* Check to see if the target text_ids have been moved already.
782 * We originally read from the replica DB, so this can happen when a single
783 * text_id is shared between multiple pages. It's rare, but possible
784 * if a delete/move/undelete cycle splits up a null edit.
786 * We do a locking read to prevent closer-run race conditions.
788 $dbw = MediaWikiServices::getInstance()->getConnectionProvider()->getPrimaryDatabase();
789 $dbw->begin( __METHOD__ );
790 $res = $dbw->newSelectQueryBuilder()
791 ->select( [ 'bt_text_id', 'bt_moved' ] )
792 ->forUpdate()
793 ->from( 'blob_tracking' )
794 ->where( [ 'bt_text_id' => array_keys( $this->referrers ) ] )
795 ->caller( __METHOD__ )->fetchResultSet();
796 $dirty = false;
797 foreach ( $res as $row ) {
798 if ( $row->bt_moved ) {
799 # This row has already been moved, remove it
800 $this->parent->debug( "TRX: conflict detected in old_id={$row->bt_text_id}" );
801 unset( $this->texts[$row->bt_text_id] );
802 $dirty = true;
806 // Recompress the blob if necessary
807 if ( $dirty ) {
808 if ( !count( $this->texts ) ) {
809 // All have been moved already
810 if ( $originalCount > 1 ) {
811 // This is suspcious, make noise
812 $this->parent->critical(
813 "Warning: concurrent operation detected, are there two conflicting " .
814 "processes running, doing the same job?" );
817 return;
819 $this->recompress();
822 // Insert the data into the destination cluster
823 $targetCluster = $this->parent->getTargetCluster();
824 $store = $this->parent->store;
825 $targetDB = $store->getPrimary( $targetCluster );
826 $targetDB->begin( __METHOD__ );
827 $baseUrl = $this->parent->store->store( $targetCluster, serialize( $this->cgz ) );
829 // Write the new URLs to the blob_tracking table
830 foreach ( $this->referrers as $textId => $hash ) {
831 $url = $baseUrl . '/' . $hash;
832 $dbw->newUpdateQueryBuilder()
833 ->update( 'blob_tracking' )
834 ->set( [ 'bt_new_url' => $url ] )
835 ->where( [
836 'bt_text_id' => $textId,
837 'bt_moved' => 0, # Check for concurrent conflicting update
839 ->caller( __METHOD__ )
840 ->execute();
843 $targetDB->commit( __METHOD__ );
844 // Critical section here: interruption at this point causes blob duplication
845 // Reversing the order of the commits would cause data loss instead
846 $dbw->commit( __METHOD__ );
848 // Write the new URLs to the text table and set the moved flag
849 if ( !$this->parent->copyOnly ) {
850 foreach ( $this->referrers as $textId => $hash ) {
851 $url = $baseUrl . '/' . $hash;
852 $this->parent->moveTextRow( $textId, $url );