summaryrefslogtreecommitdiff
path: root/includes/filebackend/FileOpBatch.php
diff options
context:
space:
mode:
authorPierre Schmitz <pierre@archlinux.de>2014-12-27 15:41:37 +0100
committerPierre Schmitz <pierre@archlinux.de>2014-12-31 11:43:28 +0100
commitc1f9b1f7b1b77776192048005dcc66dcf3df2bfb (patch)
tree2b38796e738dd74cb42ecd9bfd151803108386bc /includes/filebackend/FileOpBatch.php
parentb88ab0086858470dd1f644e64cb4e4f62bb2be9b (diff)
Update to MediaWiki 1.24.1
Diffstat (limited to 'includes/filebackend/FileOpBatch.php')
-rw-r--r--includes/filebackend/FileOpBatch.php21
1 files changed, 9 insertions, 12 deletions
diff --git a/includes/filebackend/FileOpBatch.php b/includes/filebackend/FileOpBatch.php
index 785c0bc9..b0d83e01 100644
--- a/includes/filebackend/FileOpBatch.php
+++ b/includes/filebackend/FileOpBatch.php
@@ -55,13 +55,13 @@ class FileOpBatch {
* @return Status
*/
public static function attempt( array $performOps, array $opts, FileJournal $journal ) {
- wfProfileIn( __METHOD__ );
+ $section = new ProfileSection( __METHOD__ );
$status = Status::newGood();
$n = count( $performOps );
if ( $n > self::MAX_BATCH_SIZE ) {
$status->fatal( 'backend-fail-batchsize', $n, self::MAX_BATCH_SIZE );
- wfProfileOut( __METHOD__ );
+
return $status;
}
@@ -107,7 +107,6 @@ class FileOpBatch {
$status->success[$index] = false;
++$status->failCount;
if ( !$ignoreErrors ) {
- wfProfileOut( __METHOD__ );
return $status; // abort
}
}
@@ -121,7 +120,6 @@ class FileOpBatch {
if ( count( $entries ) ) {
$subStatus = $journal->logChangeBatch( $entries, $batchId );
if ( !$subStatus->isOK() ) {
- wfProfileOut( __METHOD__ );
return $subStatus; // abort
}
}
@@ -133,7 +131,6 @@ class FileOpBatch {
// Attempt each operation (in parallel if allowed and possible)...
self::runParallelBatches( $pPerformOps, $status );
- wfProfileOut( __METHOD__ );
return $status;
}
@@ -145,9 +142,8 @@ class FileOpBatch {
* within any given sub-batch do not depend on each other.
* This will abort remaining ops on failure.
*
- * @param Array $pPerformOps
+ * @param array $pPerformOps Batches of file ops (batches use original indexes)
* @param Status $status
- * @return bool Success
*/
protected static function runParallelBatches( array $pPerformOps, Status $status ) {
$aborted = false; // set to true on unexpected errors
@@ -156,6 +152,8 @@ class FileOpBatch {
// We can't continue (even with $ignoreErrors) as $predicates is wrong.
// Log the remaining ops as failed for recovery...
foreach ( $performOpsBatch as $i => $fileOp ) {
+ $status->success[$i] = false;
+ ++$status->failCount;
$performOpsBatch[$i]->logFailure( 'attempt_aborted' );
}
continue;
@@ -168,9 +166,9 @@ class FileOpBatch {
// If attemptAsync() returns a Status, it was either due to an error
// or the backend does not support async ops and did it synchronously.
foreach ( $performOpsBatch as $i => $fileOp ) {
- if ( !$fileOp->failed() ) { // failed => already has Status
- // If the batch is just one operation, it's faster to avoid
- // pipelining as that can involve creating new TCP connections.
+ if ( !isset( $status->success[$i] ) ) { // didn't already fail in precheck()
+ // Parallel ops may be disabled in config due to missing dependencies,
+ // (e.g. needing popen()). When they are, $performOpsBatch has size 1.
$subStatus = ( count( $performOpsBatch ) > 1 )
? $fileOp->attemptAsync()
: $fileOp->attempt();
@@ -185,7 +183,7 @@ class FileOpBatch {
$statuses = $statuses + $backend->executeOpHandlesInternal( $opHandles );
// Marshall and merge all the responses (blocking)...
foreach ( $performOpsBatch as $i => $fileOp ) {
- if ( !$fileOp->failed() ) { // failed => already has Status
+ if ( !isset( $status->success[$i] ) ) { // didn't already fail in precheck()
$subStatus = $statuses[$i];
$status->merge( $subStatus );
if ( $subStatus->isOK() ) {
@@ -199,6 +197,5 @@ class FileOpBatch {
}
}
}
- return $status;
}
}