From c1f9b1f7b1b77776192048005dcc66dcf3df2bfb Mon Sep 17 00:00:00 2001 From: Pierre Schmitz Date: Sat, 27 Dec 2014 15:41:37 +0100 Subject: Update to MediaWiki 1.24.1 --- includes/jobqueue/JobQueueFederated.php | 559 ++++++++++++++++++++++++++++++++ 1 file changed, 559 insertions(+) create mode 100644 includes/jobqueue/JobQueueFederated.php (limited to 'includes/jobqueue/JobQueueFederated.php') diff --git a/includes/jobqueue/JobQueueFederated.php b/includes/jobqueue/JobQueueFederated.php new file mode 100644 index 00000000..c4301eed --- /dev/null +++ b/includes/jobqueue/JobQueueFederated.php @@ -0,0 +1,559 @@ + JobQueue) reverse sorted by weight */ + protected $partitionQueues = array(); + + /** @var BagOStuff */ + protected $cache; + + /** @var int Maximum number of partitions to try */ + protected $maxPartitionsTry; + + const CACHE_TTL_SHORT = 30; // integer; seconds to cache info without re-validating + const CACHE_TTL_LONG = 300; // integer; seconds to cache info that is kept up to date + + /** + * @param array $params Possible keys: + * - sectionsByWiki : A map of wiki IDs to section names. + * Wikis will default to using the section "default". + * - partitionsBySection : Map of section names to maps of (partition name => weight). + * A section called 'default' must be defined if not all wikis + * have explicitly defined sections. + * - configByPartition : Map of queue partition names to configuration arrays. + * These configuration arrays are passed to JobQueue::factory(). + * The options set here are overriden by those passed to this + * the federated queue itself (e.g. 'order' and 'claimTTL'). + * - partitionsNoPush : List of partition names that can handle pop() but not push(). + * This can be used to migrate away from a certain partition. + * - maxPartitionsTry : Maximum number of times to attempt job insertion using + * different partition queues. This improves availability + * during failure, at the cost of added latency and somewhat + * less reliable job de-duplication mechanisms. + * @throws MWException + */ + protected function __construct( array $params ) { + parent::__construct( $params ); + $section = isset( $params['sectionsByWiki'][$this->wiki] ) + ? $params['sectionsByWiki'][$this->wiki] + : 'default'; + if ( !isset( $params['partitionsBySection'][$section] ) ) { + throw new MWException( "No configuration for section '$section'." ); + } + $this->maxPartitionsTry = isset( $params['maxPartitionsTry'] ) + ? $params['maxPartitionsTry'] + : 2; + // Get the full partition map + $partitionMap = $params['partitionsBySection'][$section]; + arsort( $partitionMap, SORT_NUMERIC ); + // Get the partitions jobs can actually be pushed to + $partitionPushMap = $partitionMap; + if ( isset( $params['partitionsNoPush'] ) ) { + foreach ( $params['partitionsNoPush'] as $partition ) { + unset( $partitionPushMap[$partition] ); + } + } + // Get the config to pass to merge into each partition queue config + $baseConfig = $params; + foreach ( array( 'class', 'sectionsByWiki', 'maxPartitionsTry', + 'partitionsBySection', 'configByPartition', 'partitionsNoPush' ) as $o + ) { + unset( $baseConfig[$o] ); // partition queue doesn't care about this + } + // Get the partition queue objects + foreach ( $partitionMap as $partition => $w ) { + if ( !isset( $params['configByPartition'][$partition] ) ) { + throw new MWException( "No configuration for partition '$partition'." ); + } + $this->partitionQueues[$partition] = JobQueue::factory( + $baseConfig + $params['configByPartition'][$partition] ); + } + // Ring of all partitions + $this->partitionRing = new HashRing( $partitionMap ); + // Get the ring of partitions to push jobs into + if ( count( $partitionPushMap ) === count( $partitionMap ) ) { + $this->partitionPushRing = clone $this->partitionRing; // faster + } else { + $this->partitionPushRing = new HashRing( $partitionPushMap ); + } + // Aggregate cache some per-queue values if there are multiple partition queues + $this->cache = count( $partitionMap ) > 1 ? wfGetMainCache() : new EmptyBagOStuff(); + } + + protected function supportedOrders() { + // No FIFO due to partitioning, though "rough timestamp order" is supported + return array( 'undefined', 'random', 'timestamp' ); + } + + protected function optimalOrder() { + return 'undefined'; // defer to the partitions + } + + protected function supportsDelayedJobs() { + return true; // defer checks to the partitions + } + + protected function doIsEmpty() { + $key = $this->getCacheKey( 'empty' ); + + $isEmpty = $this->cache->get( $key ); + if ( $isEmpty === 'true' ) { + return true; + } elseif ( $isEmpty === 'false' ) { + return false; + } + + $empty = true; + $failed = 0; + foreach ( $this->partitionQueues as $queue ) { + try { + $empty = $empty && $queue->doIsEmpty(); + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + + $this->cache->add( $key, $empty ? 'true' : 'false', self::CACHE_TTL_LONG ); + return $empty; + } + + protected function doGetSize() { + return $this->getCrossPartitionSum( 'size', 'doGetSize' ); + } + + protected function doGetAcquiredCount() { + return $this->getCrossPartitionSum( 'acquiredcount', 'doGetAcquiredCount' ); + } + + protected function doGetDelayedCount() { + return $this->getCrossPartitionSum( 'delayedcount', 'doGetDelayedCount' ); + } + + protected function doGetAbandonedCount() { + return $this->getCrossPartitionSum( 'abandonedcount', 'doGetAbandonedCount' ); + } + + /** + * @param string $type + * @param string $method + * @return int + */ + protected function getCrossPartitionSum( $type, $method ) { + $key = $this->getCacheKey( $type ); + + $count = $this->cache->get( $key ); + if ( $count !== false ) { + return $count; + } + + $failed = 0; + foreach ( $this->partitionQueues as $queue ) { + try { + $count += $queue->$method(); + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + + $this->cache->set( $key, $count, self::CACHE_TTL_SHORT ); + + return $count; + } + + protected function doBatchPush( array $jobs, $flags ) { + // Local ring variable that may be changed to point to a new ring on failure + $partitionRing = $this->partitionPushRing; + // Try to insert the jobs and update $partitionsTry on any failures. + // Retry to insert any remaning jobs again, ignoring the bad partitions. + $jobsLeft = $jobs; + // @codingStandardsIgnoreStart Generic.CodeAnalysis.ForLoopWithTestFunctionCall.NotAllowed + for ( $i = $this->maxPartitionsTry; $i > 0 && count( $jobsLeft ); --$i ) { + // @codingStandardsIgnoreEnd + try { + $partitionRing->getLiveRing(); + } catch ( UnexpectedValueException $e ) { + break; // all servers down; nothing to insert to + } + $jobsLeft = $this->tryJobInsertions( $jobsLeft, $partitionRing, $flags ); + } + if ( count( $jobsLeft ) ) { + throw new JobQueueError( + "Could not insert job(s), {$this->maxPartitionsTry} partitions tried." ); + } + } + + /** + * @param array $jobs + * @param HashRing $partitionRing + * @param int $flags + * @throws JobQueueError + * @return array List of Job object that could not be inserted + */ + protected function tryJobInsertions( array $jobs, HashRing &$partitionRing, $flags ) { + $jobsLeft = array(); + + // Because jobs are spread across partitions, per-job de-duplication needs + // to use a consistent hash to avoid allowing duplicate jobs per partition. + // When inserting a batch of de-duplicated jobs, QOS_ATOMIC is disregarded. + $uJobsByPartition = array(); // (partition name => job list) + /** @var Job $job */ + foreach ( $jobs as $key => $job ) { + if ( $job->ignoreDuplicates() ) { + $sha1 = sha1( serialize( $job->getDeduplicationInfo() ) ); + $uJobsByPartition[$partitionRing->getLiveLocation( $sha1 )][] = $job; + unset( $jobs[$key] ); + } + } + // Get the batches of jobs that are not de-duplicated + if ( $flags & self::QOS_ATOMIC ) { + $nuJobBatches = array( $jobs ); // all or nothing + } else { + // Split the jobs into batches and spread them out over servers if there + // are many jobs. This helps keep the partitions even. Otherwise, send all + // the jobs to a single partition queue to avoids the extra connections. + $nuJobBatches = array_chunk( $jobs, 300 ); + } + + // Insert the de-duplicated jobs into the queues... + foreach ( $uJobsByPartition as $partition => $jobBatch ) { + /** @var JobQueue $queue */ + $queue = $this->partitionQueues[$partition]; + try { + $ok = true; + $queue->doBatchPush( $jobBatch, $flags | self::QOS_ATOMIC ); + } catch ( JobQueueError $e ) { + $ok = false; + MWExceptionHandler::logException( $e ); + } + if ( $ok ) { + $key = $this->getCacheKey( 'empty' ); + $this->cache->set( $key, 'false', self::CACHE_TTL_LONG ); + } else { + if ( !$partitionRing->ejectFromLiveRing( $partition, 5 ) ) { // blacklist + throw new JobQueueError( "Could not insert job(s), no partitions available." ); + } + $jobsLeft = array_merge( $jobsLeft, $jobBatch ); // not inserted + } + } + + // Insert the jobs that are not de-duplicated into the queues... + foreach ( $nuJobBatches as $jobBatch ) { + $partition = ArrayUtils::pickRandom( $partitionRing->getLiveLocationWeights() ); + $queue = $this->partitionQueues[$partition]; + try { + $ok = true; + $queue->doBatchPush( $jobBatch, $flags | self::QOS_ATOMIC ); + } catch ( JobQueueError $e ) { + $ok = false; + MWExceptionHandler::logException( $e ); + } + if ( $ok ) { + $key = $this->getCacheKey( 'empty' ); + $this->cache->set( $key, 'false', self::CACHE_TTL_LONG ); + } else { + if ( !$partitionRing->ejectFromLiveRing( $partition, 5 ) ) { // blacklist + throw new JobQueueError( "Could not insert job(s), no partitions available." ); + } + $jobsLeft = array_merge( $jobsLeft, $jobBatch ); // not inserted + } + } + + return $jobsLeft; + } + + protected function doPop() { + $partitionsTry = $this->partitionRing->getLiveLocationWeights(); // (partition => weight) + + $failed = 0; + while ( count( $partitionsTry ) ) { + $partition = ArrayUtils::pickRandom( $partitionsTry ); + if ( $partition === false ) { + break; // all partitions at 0 weight + } + + /** @var JobQueue $queue */ + $queue = $this->partitionQueues[$partition]; + try { + $job = $queue->pop(); + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + $job = false; + } + if ( $job ) { + $job->metadata['QueuePartition'] = $partition; + + return $job; + } else { + unset( $partitionsTry[$partition] ); // blacklist partition + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + + $key = $this->getCacheKey( 'empty' ); + $this->cache->set( $key, 'true', self::CACHE_TTL_LONG ); + + return false; + } + + protected function doAck( Job $job ) { + if ( !isset( $job->metadata['QueuePartition'] ) ) { + throw new MWException( "The given job has no defined partition name." ); + } + + return $this->partitionQueues[$job->metadata['QueuePartition']]->ack( $job ); + } + + protected function doIsRootJobOldDuplicate( Job $job ) { + $params = $job->getRootJobParams(); + $sigature = $params['rootJobSignature']; + $partition = $this->partitionPushRing->getLiveLocation( $sigature ); + try { + return $this->partitionQueues[$partition]->doIsRootJobOldDuplicate( $job ); + } catch ( JobQueueError $e ) { + if ( $this->partitionPushRing->ejectFromLiveRing( $partition, 5 ) ) { + $partition = $this->partitionPushRing->getLiveLocation( $sigature ); + return $this->partitionQueues[$partition]->doIsRootJobOldDuplicate( $job ); + } + } + + return false; + } + + protected function doDeduplicateRootJob( Job $job ) { + $params = $job->getRootJobParams(); + $sigature = $params['rootJobSignature']; + $partition = $this->partitionPushRing->getLiveLocation( $sigature ); + try { + return $this->partitionQueues[$partition]->doDeduplicateRootJob( $job ); + } catch ( JobQueueError $e ) { + if ( $this->partitionPushRing->ejectFromLiveRing( $partition, 5 ) ) { + $partition = $this->partitionPushRing->getLiveLocation( $sigature ); + return $this->partitionQueues[$partition]->doDeduplicateRootJob( $job ); + } + } + + return false; + } + + protected function doDelete() { + $failed = 0; + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + try { + $queue->doDelete(); + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + return true; + } + + protected function doWaitForBackups() { + $failed = 0; + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + try { + $queue->waitForBackups(); + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + } + + protected function doGetPeriodicTasks() { + $tasks = array(); + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $partition => $queue ) { + foreach ( $queue->getPeriodicTasks() as $task => $def ) { + $tasks["{$partition}:{$task}"] = $def; + } + } + + return $tasks; + } + + protected function doFlushCaches() { + static $types = array( + 'empty', + 'size', + 'acquiredcount', + 'delayedcount', + 'abandonedcount' + ); + + foreach ( $types as $type ) { + $this->cache->delete( $this->getCacheKey( $type ) ); + } + + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + $queue->doFlushCaches(); + } + } + + public function getAllQueuedJobs() { + $iterator = new AppendIterator(); + + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + $iterator->append( $queue->getAllQueuedJobs() ); + } + + return $iterator; + } + + public function getAllDelayedJobs() { + $iterator = new AppendIterator(); + + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + $iterator->append( $queue->getAllDelayedJobs() ); + } + + return $iterator; + } + + public function getCoalesceLocationInternal() { + return "JobQueueFederated:wiki:{$this->wiki}" . + sha1( serialize( array_keys( $this->partitionQueues ) ) ); + } + + protected function doGetSiblingQueuesWithJobs( array $types ) { + $result = array(); + + $failed = 0; + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + try { + $nonEmpty = $queue->doGetSiblingQueuesWithJobs( $types ); + if ( is_array( $nonEmpty ) ) { + $result = array_unique( array_merge( $result, $nonEmpty ) ); + } else { + return null; // not supported on all partitions; bail + } + if ( count( $result ) == count( $types ) ) { + break; // short-circuit + } + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + + return array_values( $result ); + } + + protected function doGetSiblingQueueSizes( array $types ) { + $result = array(); + $failed = 0; + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + try { + $sizes = $queue->doGetSiblingQueueSizes( $types ); + if ( is_array( $sizes ) ) { + foreach ( $sizes as $type => $size ) { + $result[$type] = isset( $result[$type] ) ? $result[$type] + $size : $size; + } + } else { + return null; // not supported on all partitions; bail + } + } catch ( JobQueueError $e ) { + ++$failed; + MWExceptionHandler::logException( $e ); + } + } + $this->throwErrorIfAllPartitionsDown( $failed ); + + return $result; + } + + /** + * Throw an error if no partitions available + * + * @param int $down The number of up partitions down + * @return void + * @throws JobQueueError + */ + protected function throwErrorIfAllPartitionsDown( $down ) { + if ( $down >= count( $this->partitionQueues ) ) { + throw new JobQueueError( 'No queue partitions available.' ); + } + } + + public function setTestingPrefix( $key ) { + /** @var JobQueue $queue */ + foreach ( $this->partitionQueues as $queue ) { + $queue->setTestingPrefix( $key ); + } + } + + /** + * @param string $property + * @return string + */ + private function getCacheKey( $property ) { + list( $db, $prefix ) = wfSplitWikiID( $this->wiki ); + + return wfForeignMemcKey( $db, $prefix, 'jobqueue', $this->type, $property ); + } +} -- cgit v1.2.2