Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit28dc2c2

Browse files
committed
Revert "Avoid spurious deadlocks when upgrading a tuple lock"
This reverts commits3da73d6 andde87a08.This code has some tricky corner cases that I'm not sure are correct andnot properly tested anyway, so I'm reverting the whole thing for nextweek's releases (reintroducing the deadlock bug that we set to fix).I'll try again afterwards.Discussion:https://postgr.es/m/E1hbXKQ-0003g1-0C@gemulon.postgresql.org
1 parentf5ee6a7 commit28dc2c2

File tree

5 files changed

+21
-280
lines changed

5 files changed

+21
-280
lines changed

‎src/backend/access/heap/README.tuplock

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,6 @@ do LockTuple as well, if there is any conflict, to ensure that they don't
3636
starve out waiting exclusive-lockers. However, if there is not any active
3737
conflict for a tuple, we don't incur any extra overhead.
3838

39-
We make an exception to the above rule for those lockers that already hold
40-
some lock on a tuple and attempt to acquire a stronger one on it. In that
41-
case, we skip the LockTuple() call even when there are conflicts, provided
42-
that the target tuple is being locked, updated or deleted by multiple sessions
43-
concurrently. Failing to skip the lock would risk a deadlock, e.g., between a
44-
session that was first to record its weaker lock in the tuple header and would
45-
be waiting on the LockTuple() call to upgrade to the stronger lock level, and
46-
another session that has already done LockTuple() and is waiting for the first
47-
session transaction to release its tuple header-level lock.
48-
4939
We provide four levels of tuple locking strength: SELECT FOR UPDATE obtains an
5040
exclusive lock which prevents any kind of modification of the tuple. This is
5141
the lock level that is implicitly taken by DELETE operations, and also by

‎src/backend/access/heap/heapam.c

Lines changed: 21 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
120120
staticTransactionIdMultiXactIdGetUpdateXid(TransactionIdxmax,
121121
uint16t_infomask);
122122
staticboolDoesMultiXactIdConflict(MultiXactIdmulti,uint16infomask,
123-
LockTupleModelockmode,bool*current_is_member);
123+
LockTupleModelockmode);
124124
staticvoidMultiXactIdWait(MultiXactIdmulti,MultiXactStatusstatus,uint16infomask,
125125
Relationrel,ItemPointerctid,XLTW_Operoper,
126126
int*remaining);
@@ -3161,20 +3161,15 @@ heap_delete(Relation relation, ItemPointer tid,
31613161
*/
31623162
if (infomask&HEAP_XMAX_IS_MULTI)
31633163
{
3164-
boolcurrent_is_member= false;
3165-
3164+
/* wait for multixact */
31663165
if (DoesMultiXactIdConflict((MultiXactId)xwait,infomask,
3167-
LockTupleExclusive,&current_is_member))
3166+
LockTupleExclusive))
31683167
{
31693168
LockBuffer(buffer,BUFFER_LOCK_UNLOCK);
31703169

3171-
/*
3172-
* Acquire the lock, if necessary (but skip it when we're
3173-
* requesting a lock and already have one; avoids deadlock).
3174-
*/
3175-
if (!current_is_member)
3176-
heap_acquire_tuplock(relation,&(tp.t_self),LockTupleExclusive,
3177-
LockWaitBlock,&have_tuple_lock);
3170+
/* acquire tuple lock, if necessary */
3171+
heap_acquire_tuplock(relation,&(tp.t_self),LockTupleExclusive,
3172+
LockWaitBlock,&have_tuple_lock);
31783173

31793174
/* wait for multixact */
31803175
MultiXactIdWait((MultiXactId)xwait,MultiXactStatusUpdate,infomask,
@@ -3773,20 +3768,15 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
37733768
{
37743769
TransactionIdupdate_xact;
37753770
intremain;
3776-
boolcurrent_is_member= false;
37773771

37783772
if (DoesMultiXactIdConflict((MultiXactId)xwait,infomask,
3779-
*lockmode,&current_is_member))
3773+
*lockmode))
37803774
{
37813775
LockBuffer(buffer,BUFFER_LOCK_UNLOCK);
37823776

3783-
/*
3784-
* Acquire the lock, if necessary (but skip it when we're
3785-
* requesting a lock and already have one; avoids deadlock).
3786-
*/
3787-
if (!current_is_member)
3788-
heap_acquire_tuplock(relation,&(oldtup.t_self),*lockmode,
3789-
LockWaitBlock,&have_tuple_lock);
3777+
/* acquire tuple lock, if necessary */
3778+
heap_acquire_tuplock(relation,&(oldtup.t_self),*lockmode,
3779+
LockWaitBlock,&have_tuple_lock);
37903780

37913781
/* wait for multixact */
37923782
MultiXactIdWait((MultiXactId)xwait,mxact_status,infomask,
@@ -4756,7 +4746,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
47564746
uint16infomask;
47574747
uint16infomask2;
47584748
boolrequire_sleep;
4759-
boolskip_tuple_lock= false;
47604749
ItemPointerDatat_ctid;
47614750

47624751
/* must copy state data before unlocking buffer */
@@ -4810,21 +4799,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
48104799
result=HeapTupleMayBeUpdated;
48114800
gotoout_unlocked;
48124801
}
4813-
else
4814-
{
4815-
/*
4816-
* Disable acquisition of the heavyweight tuple lock.
4817-
* Otherwise, when promoting a weaker lock, we might
4818-
* deadlock with another locker that has acquired the
4819-
* heavyweight tuple lock and is waiting for our
4820-
* transaction to finish.
4821-
*
4822-
* Note that in this case we still need to wait for
4823-
* the multixact if required, to avoid acquiring
4824-
* conflicting locks.
4825-
*/
4826-
skip_tuple_lock= true;
4827-
}
48284802
}
48294803

48304804
if (members)
@@ -4979,7 +4953,7 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
49794953
if (infomask&HEAP_XMAX_IS_MULTI)
49804954
{
49814955
if (!DoesMultiXactIdConflict((MultiXactId)xwait,infomask,
4982-
mode,NULL))
4956+
mode))
49834957
{
49844958
/*
49854959
* No conflict, but if the xmax changed under us in the
@@ -5056,15 +5030,13 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
50565030
/*
50575031
* Acquire tuple lock to establish our priority for the tuple, or
50585032
* die trying. LockTuple will release us when we are next-in-line
5059-
* for the tuple. We must do this even if we are share-locking,
5060-
* but not if we already have a weaker lock on the tuple.
5033+
* for the tuple. We must do this even if we are share-locking.
50615034
*
50625035
* If we are forced to "start over" below, we keep the tuple lock;
50635036
* this arranges that we stay at the head of the line while
50645037
* rechecking tuple state.
50655038
*/
5066-
if (!skip_tuple_lock&&
5067-
!heap_acquire_tuplock(relation,tid,mode,wait_policy,
5039+
if (!heap_acquire_tuplock(relation,tid,mode,wait_policy,
50685040
&have_tuple_lock))
50695041
{
50705042
/*
@@ -7242,13 +7214,10 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
72427214
* tuple lock of the given strength?
72437215
*
72447216
* The passed infomask pairs up with the given multixact in the tuple header.
7245-
*
7246-
* If current_is_member is not NULL, it is set to 'true' if the current
7247-
* transaction is a member of the given multixact.
72487217
*/
72497218
staticbool
72507219
DoesMultiXactIdConflict(MultiXactIdmulti,uint16infomask,
7251-
LockTupleModelockmode,bool*current_is_member)
7220+
LockTupleModelockmode)
72527221
{
72537222
intnmembers;
72547223
MultiXactMember*members;
@@ -7269,26 +7238,17 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
72697238
TransactionIdmemxid;
72707239
LOCKMODEmemlockmode;
72717240

7272-
if (result&& (current_is_member==NULL||*current_is_member))
7273-
break;
7274-
72757241
memlockmode=LOCKMODE_from_mxstatus(members[i].status);
72767242

7277-
/* ignore members from current xact (but track their presence) */
7278-
memxid=members[i].xid;
7279-
if (TransactionIdIsCurrentTransactionId(memxid))
7280-
{
7281-
if (current_is_member!=NULL)
7282-
*current_is_member= true;
7283-
continue;
7284-
}
7285-
elseif (result)
7286-
continue;
7287-
72887243
/* ignore members that don't conflict with the lock we want */
72897244
if (!DoLockModesConflict(memlockmode,wanted))
72907245
continue;
72917246

7247+
/* ignore members from current xact */
7248+
memxid=members[i].xid;
7249+
if (TransactionIdIsCurrentTransactionId(memxid))
7250+
continue;
7251+
72927252
if (ISUPDATE_from_mxstatus(members[i].status))
72937253
{
72947254
/* ignore aborted updaters */
@@ -7305,11 +7265,10 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
73057265
/*
73067266
* Whatever remains are either live lockers that conflict with our
73077267
* wanted lock, and updaters that are not aborted. Those conflict
7308-
* with what we want. Set up to return true, but keep going to
7309-
* look for the current transaction among the multixact members,
7310-
* if needed.
7268+
* with what we want, so return true.
73117269
*/
73127270
result= true;
7271+
break;
73137272
}
73147273
pfree(members);
73157274
}

‎src/test/isolation/expected/tuplelock-upgrade-no-deadlock.out

Lines changed: 0 additions & 150 deletions
This file was deleted.

‎src/test/isolation/isolation_schedule

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ test: update-locked-tuple
4646
test: propagate-lock-delete
4747
test: tuplelock-conflict
4848
test: tuplelock-update
49-
test: tuplelock-upgrade-no-deadlock
5049
test: freeze-the-dead
5150
test: nowait
5251
test: nowait-2

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp