aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAaron Schulz <aschulz@wikimedia.org>2019-03-08 17:36:21 -0800
committerAaron Schulz <aschulz@wikimedia.org>2019-03-09 01:53:31 +0000
commit7dc2f3993272d55f1b43d1ce6fd7a3be50bf39d2 (patch)
treee8876e207457e70c5923a0402b36d47b42017f56
parent8f242b26e7f14aea198cf72f85c42f767bc095af (diff)
downloadmediawikicore-7dc2f3993272d55f1b43d1ce6fd7a3be50bf39d2.tar.gz
mediawikicore-7dc2f3993272d55f1b43d1ce6fd7a3be50bf39d2.zip
Make HTMLCacheUpdateJob avoid waiting on replication for no reason
Change-Id: Ica381dad0bdb0555add1c7925e0e48991e7e0964
-rw-r--r--includes/jobqueue/jobs/HTMLCacheUpdateJob.php8
1 files changed, 5 insertions, 3 deletions
diff --git a/includes/jobqueue/jobs/HTMLCacheUpdateJob.php b/includes/jobqueue/jobs/HTMLCacheUpdateJob.php
index 7d0ada5e9744..73fa947790bb 100644
--- a/includes/jobqueue/jobs/HTMLCacheUpdateJob.php
+++ b/includes/jobqueue/jobs/HTMLCacheUpdateJob.php
@@ -135,9 +135,8 @@ class HTMLCacheUpdateJob extends Job {
$ticket = $factory->getEmptyTransactionTicket( __METHOD__ );
// Update page_touched (skipping pages already touched since the root job).
// Check $wgUpdateRowsPerQuery for sanity; batch jobs are sized by that already.
- foreach ( array_chunk( $pageIds, $wgUpdateRowsPerQuery ) as $batch ) {
- $factory->commitAndWaitForReplication( __METHOD__, $ticket );
-
+ $batches = array_chunk( $pageIds, $wgUpdateRowsPerQuery );
+ foreach ( $batches as $batch ) {
$dbw->update( 'page',
[ 'page_touched' => $dbw->timestamp( $touchTimestamp ) ],
[ 'page_id' => $batch,
@@ -146,6 +145,9 @@ class HTMLCacheUpdateJob extends Job {
],
__METHOD__
);
+ if ( count( $batches ) > 1 ) {
+ $factory->commitAndWaitForReplication( __METHOD__, $ticket );
+ }
}
// Get the list of affected pages (races only mean something else did the purge)
$titleArray = TitleArray::newFromResult( $dbw->select(