Update docs/hooks.txt for ShowSearchHitTitle
[mediawiki.git] / includes / libs / filebackend / FileOpBatch.php
blob71b5c7d575bf7b79849be0aee5b7584fdf87d9e7
1 <?php
2 /**
3 * Helper class for representing batch file operations.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * http://www.gnu.org/copyleft/gpl.html
20 * @file
21 * @ingroup FileBackend
22 * @author Aaron Schulz
25 /**
26 * Helper class for representing batch file operations.
27 * Do not use this class from places outside FileBackend.
29 * Methods should avoid throwing exceptions at all costs.
31 * @ingroup FileBackend
32 * @since 1.20
34 class FileOpBatch {
35 /* Timeout related parameters */
36 const MAX_BATCH_SIZE = 1000; // integer
38 /**
39 * Attempt to perform a series of file operations.
40 * Callers are responsible for handling file locking.
42 * $opts is an array of options, including:
43 * - force : Errors that would normally cause a rollback do not.
44 * The remaining operations are still attempted if any fail.
45 * - nonJournaled : Don't log this operation batch in the file journal.
46 * - concurrency : Try to do this many operations in parallel when possible.
48 * The resulting StatusValue will be "OK" unless:
49 * - a) unexpected operation errors occurred (network partitions, disk full...)
50 * - b) significant operation errors occurred and 'force' was not set
52 * @param FileOp[] $performOps List of FileOp operations
53 * @param array $opts Batch operation options
54 * @param FileJournal $journal Journal to log operations to
55 * @return StatusValue
57 public static function attempt( array $performOps, array $opts, FileJournal $journal ) {
58 $status = StatusValue::newGood();
60 $n = count( $performOps );
61 if ( $n > self::MAX_BATCH_SIZE ) {
62 $status->fatal( 'backend-fail-batchsize', $n, self::MAX_BATCH_SIZE );
64 return $status;
67 $batchId = $journal->getTimestampedUUID();
68 $ignoreErrors = !empty( $opts['force'] );
69 $journaled = empty( $opts['nonJournaled'] );
70 $maxConcurrency = isset( $opts['concurrency'] ) ? $opts['concurrency'] : 1;
72 $entries = []; // file journal entry list
73 $predicates = FileOp::newPredicates(); // account for previous ops in prechecks
74 $curBatch = []; // concurrent FileOp sub-batch accumulation
75 $curBatchDeps = FileOp::newDependencies(); // paths used in FileOp sub-batch
76 $pPerformOps = []; // ordered list of concurrent FileOp sub-batches
77 $lastBackend = null; // last op backend name
78 // Do pre-checks for each operation; abort on failure...
79 foreach ( $performOps as $index => $fileOp ) {
80 $backendName = $fileOp->getBackend()->getName();
81 $fileOp->setBatchId( $batchId ); // transaction ID
82 // Decide if this op can be done concurrently within this sub-batch
83 // or if a new concurrent sub-batch must be started after this one...
84 if ( $fileOp->dependsOn( $curBatchDeps )
85 || count( $curBatch ) >= $maxConcurrency
86 || ( $backendName !== $lastBackend && count( $curBatch ) )
87 ) {
88 $pPerformOps[] = $curBatch; // push this batch
89 $curBatch = []; // start a new sub-batch
90 $curBatchDeps = FileOp::newDependencies();
92 $lastBackend = $backendName;
93 $curBatch[$index] = $fileOp; // keep index
94 // Update list of affected paths in this batch
95 $curBatchDeps = $fileOp->applyDependencies( $curBatchDeps );
96 // Simulate performing the operation...
97 $oldPredicates = $predicates;
98 $subStatus = $fileOp->precheck( $predicates ); // updates $predicates
99 $status->merge( $subStatus );
100 if ( $subStatus->isOK() ) {
101 if ( $journaled ) { // journal log entries
102 $entries = array_merge( $entries,
103 $fileOp->getJournalEntries( $oldPredicates, $predicates ) );
105 } else { // operation failed?
106 $status->success[$index] = false;
107 ++$status->failCount;
108 if ( !$ignoreErrors ) {
109 return $status; // abort
113 // Push the last sub-batch
114 if ( count( $curBatch ) ) {
115 $pPerformOps[] = $curBatch;
118 // Log the operations in the file journal...
119 if ( count( $entries ) ) {
120 $subStatus = $journal->logChangeBatch( $entries, $batchId );
121 if ( !$subStatus->isOK() ) {
122 $status->merge( $subStatus );
124 return $status; // abort
128 if ( $ignoreErrors ) { // treat precheck() fatals as mere warnings
129 $status->setResult( true, $status->value );
132 // Attempt each operation (in parallel if allowed and possible)...
133 self::runParallelBatches( $pPerformOps, $status );
135 return $status;
139 * Attempt a list of file operations sub-batches in series.
141 * The operations *in* each sub-batch will be done in parallel.
142 * The caller is responsible for making sure the operations
143 * within any given sub-batch do not depend on each other.
144 * This will abort remaining ops on failure.
146 * @param array $pPerformOps Batches of file ops (batches use original indexes)
147 * @param StatusValue $status
149 protected static function runParallelBatches( array $pPerformOps, StatusValue $status ) {
150 $aborted = false; // set to true on unexpected errors
151 foreach ( $pPerformOps as $performOpsBatch ) {
152 /** @var FileOp[] $performOpsBatch */
153 if ( $aborted ) { // check batch op abort flag...
154 // We can't continue (even with $ignoreErrors) as $predicates is wrong.
155 // Log the remaining ops as failed for recovery...
156 foreach ( $performOpsBatch as $i => $fileOp ) {
157 $status->success[$i] = false;
158 ++$status->failCount;
159 $performOpsBatch[$i]->logFailure( 'attempt_aborted' );
161 continue;
163 /** @var StatusValue[] $statuses */
164 $statuses = [];
165 $opHandles = [];
166 // Get the backend; all sub-batch ops belong to a single backend
167 /** @var FileBackendStore $backend */
168 $backend = reset( $performOpsBatch )->getBackend();
169 // Get the operation handles or actually do it if there is just one.
170 // If attemptAsync() returns a StatusValue, it was either due to an error
171 // or the backend does not support async ops and did it synchronously.
172 foreach ( $performOpsBatch as $i => $fileOp ) {
173 if ( !isset( $status->success[$i] ) ) { // didn't already fail in precheck()
174 // Parallel ops may be disabled in config due to missing dependencies,
175 // (e.g. needing popen()). When they are, $performOpsBatch has size 1.
176 $subStatus = ( count( $performOpsBatch ) > 1 )
177 ? $fileOp->attemptAsync()
178 : $fileOp->attempt();
179 if ( $subStatus->value instanceof FileBackendStoreOpHandle ) {
180 $opHandles[$i] = $subStatus->value; // deferred
181 } else {
182 $statuses[$i] = $subStatus; // done already
186 // Try to do all the operations concurrently...
187 $statuses = $statuses + $backend->executeOpHandlesInternal( $opHandles );
188 // Marshall and merge all the responses (blocking)...
189 foreach ( $performOpsBatch as $i => $fileOp ) {
190 if ( !isset( $status->success[$i] ) ) { // didn't already fail in precheck()
191 $subStatus = $statuses[$i];
192 $status->merge( $subStatus );
193 if ( $subStatus->isOK() ) {
194 $status->success[$i] = true;
195 ++$status->successCount;
196 } else {
197 $status->success[$i] = false;
198 ++$status->failCount;
199 $aborted = true; // set abort flag; we can't continue