Skip to content

Commit 4b0f7d6

Browse files
committed
Revert "For inplace update, send nontransactional invalidations."
This reverts commit 95c5acb (v17) and counterparts in each other non-master branch. If released, that commit would have caused a worst-in-years minor release regression, via undetected LWLock self-deadlock. This commit and its self-deadlock fix warrant more bake time in the master branch. Reported by Alexander Lakhin. Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
1 parent 5e503e1 commit 4b0f7d6

File tree

11 files changed

+117
-289
lines changed

11 files changed

+117
-289
lines changed

src/backend/access/heap/heapam.c

Lines changed: 7 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -6146,24 +6146,6 @@ heap_inplace_update_and_unlock(Relation relation,
61466146
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
61476147
elog(ERROR, "wrong tuple length");
61486148

6149-
/*
6150-
* Construct shared cache inval if necessary. Note that because we only
6151-
* pass the new version of the tuple, this mustn't be used for any
6152-
* operations that could change catcache lookup keys. But we aren't
6153-
* bothering with index updates either, so that's true a fortiori.
6154-
*/
6155-
CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
6156-
6157-
/*
6158-
* Unlink relcache init files as needed. If unlinking, acquire
6159-
* RelCacheInitLock until after associated invalidations. By doing this
6160-
* in advance, if we checkpoint and then crash between inplace
6161-
* XLogInsert() and inval, we don't rely on StartupXLOG() ->
6162-
* RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6163-
* neglect to PANIC on EIO.
6164-
*/
6165-
PreInplace_Inval();
6166-
61676149
/* NO EREPORT(ERROR) from here till changes are logged */
61686150
START_CRIT_SECTION();
61696151

@@ -6207,28 +6189,17 @@ heap_inplace_update_and_unlock(Relation relation,
62076189
PageSetLSN(BufferGetPage(buffer), recptr);
62086190
}
62096191

6210-
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6211-
6212-
/*
6213-
* Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6214-
* do this before UnlockTuple().
6215-
*
6216-
* If we're mutating a tuple visible only to this transaction, there's an
6217-
* equivalent transactional inval from the action that created the tuple,
6218-
* and this inval is superfluous.
6219-
*/
6220-
AtInplace_Inval();
6221-
62226192
END_CRIT_SECTION();
6223-
UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
62246193

6225-
AcceptInvalidationMessages(); /* local processing of just-sent inval */
6194+
heap_inplace_unlock(relation, oldtup, buffer);
62266195

62276196
/*
6228-
* Queue a transactional inval. The immediate invalidation we just sent
6229-
* is the only one known to be necessary. To reduce risk from the
6230-
* transition to immediate invalidation, continue sending a transactional
6231-
* invalidation like we've long done. Third-party code might rely on it.
6197+
* Send out shared cache inval if necessary. Note that because we only
6198+
* pass the new version of the tuple, this mustn't be used for any
6199+
* operations that could change catcache lookup keys. But we aren't
6200+
* bothering with index updates either, so that's true a fortiori.
6201+
*
6202+
* XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
62326203
*/
62336204
if (!IsBootstrapProcessingMode())
62346205
CacheInvalidateHeapTuple(relation, tuple, NULL);

src/backend/access/transam/xact.c

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1247,24 +1247,14 @@ RecordTransactionCommit(void)
12471247

12481248
/*
12491249
* Transactions without an assigned xid can contain invalidation
1250-
* messages. While inplace updates do this, this is not known to be
1251-
* necessary; see comment at inplace CacheInvalidateHeapTuple().
1252-
* Extensions might still rely on this capability, and standbys may
1253-
* need to process those invals. We can't emit a commit record
1254-
* without an xid, and we don't want to force assigning an xid,
1255-
* because that'd be problematic for e.g. vacuum. Hence we emit a
1256-
* bespoke record for the invalidations. We don't want to use that in
1257-
* case a commit record is emitted, so they happen synchronously with
1258-
* commits (besides not wanting to emit more WAL records).
1259-
*
1260-
* XXX Every known use of this capability is a defect. Since an XID
1261-
* isn't controlling visibility of the change that prompted invals,
1262-
* other sessions need the inval even if this transactions aborts.
1263-
*
1264-
* ON COMMIT DELETE ROWS does a nontransactional index_build(), which
1265-
* queues a relcache inval, including in transactions without an xid
1266-
* that had read the (empty) table. Standbys don't need any ON COMMIT
1267-
* DELETE ROWS invals, but we've not done the work to withhold them.
1250+
* messages (e.g. explicit relcache invalidations or catcache
1251+
* invalidations for inplace updates); standbys need to process those.
1252+
* We can't emit a commit record without an xid, and we don't want to
1253+
* force assigning an xid, because that'd be problematic for e.g.
1254+
* vacuum. Hence we emit a bespoke record for the invalidations. We
1255+
* don't want to use that in case a commit record is emitted, so they
1256+
* happen synchronously with commits (besides not wanting to emit more
1257+
* WAL records).
12681258
*/
12691259
if (nmsgs != 0)
12701260
{

src/backend/catalog/index.c

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2881,19 +2881,12 @@ index_update_stats(Relation rel,
28812881
if (dirty)
28822882
{
28832883
systable_inplace_update_finish(state, tuple);
2884-
/* the above sends transactional and immediate cache inval messages */
2884+
/* the above sends a cache inval message */
28852885
}
28862886
else
28872887
{
28882888
systable_inplace_update_cancel(state);
2889-
2890-
/*
2891-
* While we didn't change relhasindex, CREATE INDEX needs a
2892-
* transactional inval for when the new index's catalog rows become
2893-
* visible. Other CREATE INDEX and REINDEX code happens to also queue
2894-
* this inval, but keep this in case rare callers rely on this part of
2895-
* our API contract.
2896-
*/
2889+
/* no need to change tuple, but force relcache inval anyway */
28972890
CacheInvalidateRelcacheByTuple(tuple);
28982891
}
28992892

src/backend/replication/logical/decode.c

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -463,19 +463,23 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
463463

464464
/*
465465
* Inplace updates are only ever performed on catalog tuples and
466-
* can, per definition, not change tuple visibility. Inplace
467-
* updates don't affect storage or interpretation of table rows,
468-
* so they don't affect logicalrep_write_tuple() outcomes. Hence,
469-
* we don't process invalidations from the original operation. If
470-
* inplace updates did affect those things, invalidations wouldn't
471-
* make it work, since there are no snapshot-specific versions of
472-
* inplace-updated values. Since we also don't decode catalog
473-
* tuples, we're not interested in the record's contents.
466+
* can, per definition, not change tuple visibility. Since we
467+
* don't decode catalog tuples, we're not interested in the
468+
* record's contents.
474469
*
475-
* WAL contains likely-unnecessary commit-time invals from the
476-
* CacheInvalidateHeapTuple() call in heap_inplace_update().
477-
* Excess invalidation is safe.
470+
* In-place updates can be used either by XID-bearing transactions
471+
* (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less
472+
* transactions (e.g. VACUUM). In the former case, the commit
473+
* record will include cache invalidations, so we mark the
474+
* transaction as catalog modifying here. Currently that's
475+
* redundant because the commit will do that as well, but once we
476+
* support decoding in-progress relations, this will be important.
478477
*/
478+
if (!TransactionIdIsValid(xid))
479+
break;
480+
481+
SnapBuildProcessChange(builder, xid, buf->origptr);
482+
ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
479483
break;
480484

481485
case XLOG_HEAP_CONFIRM:

src/backend/utils/cache/catcache.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2128,8 +2128,7 @@ void
21282128
PrepareToInvalidateCacheTuple(Relation relation,
21292129
HeapTuple tuple,
21302130
HeapTuple newtuple,
2131-
void (*function) (int, uint32, Oid, void *),
2132-
void *context)
2131+
void (*function) (int, uint32, Oid))
21332132
{
21342133
slist_iter iter;
21352134
Oid reloid;
@@ -2170,7 +2169,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
21702169
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
21712170
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
21722171

2173-
(*function) (ccp->id, hashvalue, dbid, context);
2172+
(*function) (ccp->id, hashvalue, dbid);
21742173

21752174
if (newtuple)
21762175
{
@@ -2179,7 +2178,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
21792178
newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
21802179

21812180
if (newhashvalue != hashvalue)
2182-
(*function) (ccp->id, newhashvalue, dbid, context);
2181+
(*function) (ccp->id, newhashvalue, dbid);
21832182
}
21842183
}
21852184
}

0 commit comments

Comments
 (0)