]> scripts.mit.edu Git - autoinstallsdev/mediawiki.git/blob - includes/jobqueue/JobRunner.php
MediaWiki 1.30.2
[autoinstallsdev/mediawiki.git] / includes / jobqueue / JobRunner.php
1 <?php
2 /**
3  * Job queue runner utility methods
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18  * http://www.gnu.org/copyleft/gpl.html
19  *
20  * @file
21  * @ingroup JobQueue
22  */
23
24 use MediaWiki\MediaWikiServices;
25 use MediaWiki\Logger\LoggerFactory;
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29 use Wikimedia\ScopedCallback;
30 use Wikimedia\Rdbms\LBFactory;
31 use Wikimedia\Rdbms\DBError;
32 use Wikimedia\Rdbms\DBReplicationWaitError;
33
34 /**
35  * Job queue runner utility methods
36  *
37  * @ingroup JobQueue
38  * @since 1.24
39  */
40 class JobRunner implements LoggerAwareInterface {
41         /** @var Config */
42         protected $config;
43         /** @var callable|null Debug output handler */
44         protected $debug;
45
46         /**
47          * @var LoggerInterface $logger
48          */
49         protected $logger;
50
51         const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
52         const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
53         const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
54         const READONLY_BACKOFF_TTL = 30; // seconds to back off a queue due to read-only errors
55
56         /**
57          * @param callable $debug Optional debug output handler
58          */
59         public function setDebugHandler( $debug ) {
60                 $this->debug = $debug;
61         }
62
63         /**
64          * @param LoggerInterface $logger
65          * @return void
66          */
67         public function setLogger( LoggerInterface $logger ) {
68                 $this->logger = $logger;
69         }
70
71         /**
72          * @param LoggerInterface $logger
73          */
74         public function __construct( LoggerInterface $logger = null ) {
75                 if ( $logger === null ) {
76                         $logger = LoggerFactory::getInstance( 'runJobs' );
77                 }
78                 $this->setLogger( $logger );
79                 $this->config = MediaWikiServices::getInstance()->getMainConfig();
80         }
81
82         /**
83          * Run jobs of the specified number/type for the specified time
84          *
85          * The response map has a 'job' field that lists status of each job, including:
86          *   - type   : the job type
87          *   - status : ok/failed
88          *   - error  : any error message string
89          *   - time   : the job run time in ms
90          * The response map also has:
91          *   - backoffs : the (job type => seconds) map of backoff times
92          *   - elapsed  : the total time spent running tasks in ms
93          *   - reached  : the reason the script finished, one of (none-ready, job-limit, time-limit,
94          *  memory-limit)
95          *
96          * This method outputs status information only if a debug handler was set.
97          * Any exceptions are caught and logged, but are not reported as output.
98          *
99          * @param array $options Map of parameters:
100          *    - type     : the job type (or false for the default types)
101          *    - maxJobs  : maximum number of jobs to run
102          *    - maxTime  : maximum time in seconds before stopping
103          *    - throttle : whether to respect job backoff configuration
104          * @return array Summary response that can easily be JSON serialized
105          */
106         public function run( array $options ) {
107                 $jobClasses = $this->config->get( 'JobClasses' );
108                 $profilerLimits = $this->config->get( 'TrxProfilerLimits' );
109
110                 $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
111
112                 $type = isset( $options['type'] ) ? $options['type'] : false;
113                 $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
114                 $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
115                 $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
116
117                 // Bail if job type is invalid
118                 if ( $type !== false && !isset( $jobClasses[$type] ) ) {
119                         $response['reached'] = 'none-possible';
120                         return $response;
121                 }
122                 // Bail out if DB is in read-only mode
123                 if ( wfReadOnly() ) {
124                         $response['reached'] = 'read-only';
125                         return $response;
126                 }
127
128                 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
129                 // Bail out if there is too much DB lag.
130                 // This check should not block as we want to try other wiki queues.
131                 list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
132                 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
133                         $response['reached'] = 'replica-lag-limit';
134                         return $response;
135                 }
136
137                 // Flush any pending DB writes for sanity
138                 $lbFactory->commitAll( __METHOD__ );
139
140                 // Catch huge single updates that lead to replica DB lag
141                 $trxProfiler = Profiler::instance()->getTransactionProfiler();
142                 $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
143                 $trxProfiler->setExpectations( $profilerLimits['JobRunner'], __METHOD__ );
144
145                 // Some jobs types should not run until a certain timestamp
146                 $backoffs = []; // map of (type => UNIX expiry)
147                 $backoffDeltas = []; // map of (type => seconds)
148                 $wait = 'wait'; // block to read backoffs the first time
149
150                 $group = JobQueueGroup::singleton();
151                 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
152                 $jobsPopped = 0;
153                 $timeMsTotal = 0;
154                 $startTime = microtime( true ); // time since jobs started running
155                 $lastCheckTime = 1; // timestamp of last replica DB check
156                 do {
157                         // Sync the persistent backoffs with concurrent runners
158                         $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
159                         $blacklist = $noThrottle ? [] : array_keys( $backoffs );
160                         $wait = 'nowait'; // less important now
161
162                         if ( $type === false ) {
163                                 $job = $group->pop(
164                                         JobQueueGroup::TYPE_DEFAULT,
165                                         JobQueueGroup::USE_CACHE,
166                                         $blacklist
167                                 );
168                         } elseif ( in_array( $type, $blacklist ) ) {
169                                 $job = false; // requested queue in backoff state
170                         } else {
171                                 $job = $group->pop( $type ); // job from a single queue
172                         }
173                         $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
174
175                         if ( $job ) { // found a job
176                                 ++$jobsPopped;
177                                 $popTime = time();
178                                 $jType = $job->getType();
179
180                                 WebRequest::overrideRequestId( $job->getRequestId() );
181
182                                 // Back off of certain jobs for a while (for throttling and for errors)
183                                 $ttw = $this->getBackoffTimeToWait( $job );
184                                 if ( $ttw > 0 ) {
185                                         // Always add the delta for other runners in case the time running the
186                                         // job negated the backoff for each individually but not collectively.
187                                         $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
188                                                 ? $backoffDeltas[$jType] + $ttw
189                                                 : $ttw;
190                                         $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
191                                 }
192
193                                 $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
194                                 if ( $info['status'] !== false || !$job->allowRetries() ) {
195                                         $group->ack( $job ); // succeeded or job cannot be retried
196                                         $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
197                                 }
198
199                                 // Back off of certain jobs for a while (for throttling and for errors)
200                                 if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
201                                         $ttw = max( $ttw, $this->getErrorBackoffTTL( $info['error'] ) );
202                                         $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
203                                                 ? $backoffDeltas[$jType] + $ttw
204                                                 : $ttw;
205                                 }
206
207                                 $response['jobs'][] = [
208                                         'type'   => $jType,
209                                         'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
210                                         'error'  => $info['error'],
211                                         'time'   => $info['timeMs']
212                                 ];
213                                 $timeMsTotal += $info['timeMs'];
214
215                                 // Break out if we hit the job count or wall time limits...
216                                 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
217                                         $response['reached'] = 'job-limit';
218                                         break;
219                                 } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
220                                         $response['reached'] = 'time-limit';
221                                         break;
222                                 }
223
224                                 // Don't let any of the main DB replica DBs get backed up.
225                                 // This only waits for so long before exiting and letting
226                                 // other wikis in the farm (on different masters) get a chance.
227                                 $timePassed = microtime( true ) - $lastCheckTime;
228                                 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
229                                         try {
230                                                 $lbFactory->waitForReplication( [
231                                                         'ifWritesSince' => $lastCheckTime,
232                                                         'timeout' => self::MAX_ALLOWED_LAG
233                                                 ] );
234                                         } catch ( DBReplicationWaitError $e ) {
235                                                 $response['reached'] = 'replica-lag-limit';
236                                                 break;
237                                         }
238                                         $lastCheckTime = microtime( true );
239                                 }
240                                 // Don't let any queue replica DBs/backups fall behind
241                                 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
242                                         $group->waitForBackups();
243                                 }
244
245                                 // Bail if near-OOM instead of in a job
246                                 if ( !$this->checkMemoryOK() ) {
247                                         $response['reached'] = 'memory-limit';
248                                         break;
249                                 }
250                         }
251                 } while ( $job ); // stop when there are no jobs
252
253                 // Sync the persistent backoffs for the next runJobs.php pass
254                 if ( $backoffDeltas ) {
255                         $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
256                 }
257
258                 $response['backoffs'] = $backoffs;
259                 $response['elapsed'] = $timeMsTotal;
260
261                 return $response;
262         }
263
264         /**
265          * @param string $error
266          * @return int TTL in seconds
267          */
268         private function getErrorBackoffTTL( $error ) {
269                 return strpos( $error, 'DBReadOnlyError' ) !== false
270                         ? self::READONLY_BACKOFF_TTL
271                         : self::ERROR_BACKOFF_TTL;
272         }
273
274         /**
275          * @param Job $job
276          * @param LBFactory $lbFactory
277          * @param StatsdDataFactory $stats
278          * @param float $popTime
279          * @return array Map of status/error/timeMs
280          */
281         private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
282                 $jType = $job->getType();
283                 $msg = $job->toString() . " STARTING";
284                 $this->logger->debug( $msg, [
285                         'job_type' => $job->getType(),
286                 ] );
287                 $this->debugCallback( $msg );
288
289                 // Run the job...
290                 $rssStart = $this->getMaxRssKb();
291                 $jobStartTime = microtime( true );
292                 try {
293                         $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
294                         $lbFactory->beginMasterChanges( $fnameTrxOwner );
295                         $status = $job->run();
296                         $error = $job->getLastError();
297                         $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
298                         // Important: this must be the last deferred update added (T100085, T154425)
299                         DeferredUpdates::addCallableUpdate( [ JobQueueGroup::class, 'pushLazyJobs' ] );
300                         // Run any deferred update tasks; doUpdates() manages transactions itself
301                         DeferredUpdates::doUpdates();
302                 } catch ( Exception $e ) {
303                         MWExceptionHandler::rollbackMasterChangesAndLog( $e );
304                         $status = false;
305                         $error = get_class( $e ) . ': ' . $e->getMessage();
306                 }
307                 // Always attempt to call teardown() even if Job throws exception.
308                 try {
309                         $job->teardown( $status );
310                 } catch ( Exception $e ) {
311                         MWExceptionHandler::logException( $e );
312                 }
313
314                 // Commit all outstanding connections that are in a transaction
315                 // to get a fresh repeatable read snapshot on every connection.
316                 // Note that jobs are still responsible for handling replica DB lag.
317                 $lbFactory->flushReplicaSnapshots( __METHOD__ );
318                 // Clear out title cache data from prior snapshots
319                 MediaWikiServices::getInstance()->getLinkCache()->clear();
320                 $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
321                 $rssEnd = $this->getMaxRssKb();
322
323                 // Record how long jobs wait before getting popped
324                 $readyTs = $job->getReadyTimestamp();
325                 if ( $readyTs ) {
326                         $pickupDelay = max( 0, $popTime - $readyTs );
327                         $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
328                         $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
329                 }
330                 // Record root job age for jobs being run
331                 $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
332                 if ( $rootTimestamp ) {
333                         $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
334                         $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
335                 }
336                 // Track the execution time for jobs
337                 $stats->timing( "jobqueue.run.$jType", $timeMs );
338                 // Track RSS increases for jobs (in case of memory leaks)
339                 if ( $rssStart && $rssEnd ) {
340                         $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
341                 }
342
343                 if ( $status === false ) {
344                         $msg = $job->toString() . " t={job_duration} error={job_error}";
345                         $this->logger->error( $msg, [
346                                 'job_type' => $job->getType(),
347                                 'job_duration' => $timeMs,
348                                 'job_error' => $error,
349                         ] );
350
351                         $msg = $job->toString() . " t=$timeMs error={$error}";
352                         $this->debugCallback( $msg );
353                 } else {
354                         $msg = $job->toString() . " t={job_duration} good";
355                         $this->logger->info( $msg, [
356                                 'job_type' => $job->getType(),
357                                 'job_duration' => $timeMs,
358                         ] );
359
360                         $msg = $job->toString() . " t=$timeMs good";
361                         $this->debugCallback( $msg );
362                 }
363
364                 return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
365         }
366
367         /**
368          * @return int|null Max memory RSS in kilobytes
369          */
370         private function getMaxRssKb() {
371                 $info = wfGetRusage() ?: [];
372                 // see https://linux.die.net/man/2/getrusage
373                 return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
374         }
375
376         /**
377          * @param Job $job
378          * @return int Seconds for this runner to avoid doing more jobs of this type
379          * @see $wgJobBackoffThrottling
380          */
381         private function getBackoffTimeToWait( Job $job ) {
382                 $throttling = $this->config->get( 'JobBackoffThrottling' );
383
384                 if ( !isset( $throttling[$job->getType()] ) || $job instanceof DuplicateJob ) {
385                         return 0; // not throttled
386                 }
387
388                 $itemsPerSecond = $throttling[$job->getType()];
389                 if ( $itemsPerSecond <= 0 ) {
390                         return 0; // not throttled
391                 }
392
393                 $seconds = 0;
394                 if ( $job->workItemCount() > 0 ) {
395                         $exactSeconds = $job->workItemCount() / $itemsPerSecond;
396                         // use randomized rounding
397                         $seconds = floor( $exactSeconds );
398                         $remainder = $exactSeconds - $seconds;
399                         $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
400                 }
401
402                 return (int)$seconds;
403         }
404
405         /**
406          * Get the previous backoff expiries from persistent storage
407          * On I/O or lock acquisition failure this returns the original $backoffs.
408          *
409          * @param array $backoffs Map of (job type => UNIX timestamp)
410          * @param string $mode Lock wait mode - "wait" or "nowait"
411          * @return array Map of (job type => backoff expiry timestamp)
412          */
413         private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
414                 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
415                 if ( is_file( $file ) ) {
416                         $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
417                         $handle = fopen( $file, 'rb' );
418                         if ( !flock( $handle, LOCK_SH | $noblock ) ) {
419                                 fclose( $handle );
420                                 return $backoffs; // don't wait on lock
421                         }
422                         $content = stream_get_contents( $handle );
423                         flock( $handle, LOCK_UN );
424                         fclose( $handle );
425                         $ctime = microtime( true );
426                         $cBackoffs = json_decode( $content, true ) ?: [];
427                         foreach ( $cBackoffs as $type => $timestamp ) {
428                                 if ( $timestamp < $ctime ) {
429                                         unset( $cBackoffs[$type] );
430                                 }
431                         }
432                 } else {
433                         $cBackoffs = [];
434                 }
435
436                 return $cBackoffs;
437         }
438
439         /**
440          * Merge the current backoff expiries from persistent storage
441          *
442          * The $deltas map is set to an empty array on success.
443          * On I/O or lock acquisition failure this returns the original $backoffs.
444          *
445          * @param array $backoffs Map of (job type => UNIX timestamp)
446          * @param array $deltas Map of (job type => seconds)
447          * @param string $mode Lock wait mode - "wait" or "nowait"
448          * @return array The new backoffs account for $backoffs and the latest file data
449          */
450         private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
451                 if ( !$deltas ) {
452                         return $this->loadBackoffs( $backoffs, $mode );
453                 }
454
455                 $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
456                 $file = wfTempDir() . '/mw-runJobs-backoffs.json';
457                 $handle = fopen( $file, 'wb+' );
458                 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
459                         fclose( $handle );
460                         return $backoffs; // don't wait on lock
461                 }
462                 $ctime = microtime( true );
463                 $content = stream_get_contents( $handle );
464                 $cBackoffs = json_decode( $content, true ) ?: [];
465                 foreach ( $deltas as $type => $seconds ) {
466                         $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
467                                 ? $cBackoffs[$type] + $seconds
468                                 : $ctime + $seconds;
469                 }
470                 foreach ( $cBackoffs as $type => $timestamp ) {
471                         if ( $timestamp < $ctime ) {
472                                 unset( $cBackoffs[$type] );
473                         }
474                 }
475                 ftruncate( $handle, 0 );
476                 fwrite( $handle, json_encode( $cBackoffs ) );
477                 flock( $handle, LOCK_UN );
478                 fclose( $handle );
479
480                 $deltas = [];
481
482                 return $cBackoffs;
483         }
484
485         /**
486          * Make sure that this script is not too close to the memory usage limit.
487          * It is better to die in between jobs than OOM right in the middle of one.
488          * @return bool
489          */
490         private function checkMemoryOK() {
491                 static $maxBytes = null;
492                 if ( $maxBytes === null ) {
493                         $m = [];
494                         if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
495                                 list( , $num, $unit ) = $m;
496                                 $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
497                                 $maxBytes = $num * $conv[strtolower( $unit )];
498                         } else {
499                                 $maxBytes = 0;
500                         }
501                 }
502                 $usedBytes = memory_get_usage();
503                 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
504                         $msg = "Detected excessive memory usage ({used_bytes}/{max_bytes}).";
505                         $this->logger->error( $msg, [
506                                 'used_bytes' => $usedBytes,
507                                 'max_bytes' => $maxBytes,
508                         ] );
509
510                         $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
511                         $this->debugCallback( $msg );
512
513                         return false;
514                 }
515
516                 return true;
517         }
518
519         /**
520          * Log the job message
521          * @param string $msg The message to log
522          */
523         private function debugCallback( $msg ) {
524                 if ( $this->debug ) {
525                         call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
526                 }
527         }
528
529         /**
530          * Issue a commit on all masters who are currently in a transaction and have
531          * made changes to the database. It also supports sometimes waiting for the
532          * local wiki's replica DBs to catch up. See the documentation for
533          * $wgJobSerialCommitThreshold for more.
534          *
535          * @param LBFactory $lbFactory
536          * @param Job $job
537          * @param string $fnameTrxOwner
538          * @throws DBError
539          */
540         private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
541                 $syncThreshold = $this->config->get( 'JobSerialCommitThreshold' );
542
543                 $time = false;
544                 $lb = $lbFactory->getMainLB( wfWikiID() );
545                 if ( $syncThreshold !== false && $lb->getServerCount() > 1 ) {
546                         // Generally, there is one master connection to the local DB
547                         $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
548                         // We need natively blocking fast locks
549                         if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
550                                 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
551                                 if ( $time < $syncThreshold ) {
552                                         $dbwSerial = false;
553                                 }
554                         } else {
555                                 $dbwSerial = false;
556                         }
557                 } else {
558                         // There are no replica DBs or writes are all to foreign DB (we don't handle that)
559                         $dbwSerial = false;
560                 }
561
562                 if ( !$dbwSerial ) {
563                         $lbFactory->commitMasterChanges(
564                                 $fnameTrxOwner,
565                                 // Abort if any transaction was too big
566                                 [ 'maxWriteDuration' => $this->config->get( 'MaxJobDBWriteDuration' ) ]
567                         );
568
569                         return;
570                 }
571
572                 $ms = intval( 1000 * $time );
573
574                 $msg = $job->toString() . " COMMIT ENQUEUED [{job_commit_write_ms}ms of writes]";
575                 $this->logger->info( $msg, [
576                         'job_type' => $job->getType(),
577                         'job_commit_write_ms' => $ms,
578                 ] );
579
580                 $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
581                 $this->debugCallback( $msg );
582
583                 // Wait for an exclusive lock to commit
584                 if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
585                         // This will trigger a rollback in the main loop
586                         throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
587                 }
588                 $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
589                         $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
590                 } );
591
592                 // Wait for the replica DBs to catch up
593                 $pos = $lb->getMasterPos();
594                 if ( $pos ) {
595                         $lb->waitForAll( $pos );
596                 }
597
598                 // Actually commit the DB master changes
599                 $lbFactory->commitMasterChanges(
600                         $fnameTrxOwner,
601                         // Abort if any transaction was too big
602                         [ 'maxWriteDuration' => $this->config->get( 'MaxJobDBWriteDuration' ) ]
603                 );
604                 ScopedCallback::consume( $unlocker );
605         }
606 }