Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit0489123

Browse files
committed
Grab heavyweight tuple lock only before sleeping
We were trying to acquire the lock even when we were subsequentlynot sleeping in some other transaction, which opens us up unnecessarilyto deadlocks. In particular, this is troublesome if an update tries tolock an updated version of a tuple and finds itself doing EvalPlanQualupdate chain walking; more than two sessions doing this concurrentlywill find themselves sleeping on each other because the HW tuple lockacquisition in heap_lock_tuple called from EvalPlanQualFetch races withthe same tuple lock being acquired in heap_update -- one of thesesessions sleeps on the other one to finish while holding the tuple lock,and the other one sleeps on the tuple lock.Per trouble report from Andrew Sackville-West inhttp://www.postgresql.org/message-id/20140731233051.GN17765@andrew-ThinkPad-X230His scenario can be simplified down to a relatively simpleisolationtester spec file which I don't include in this commit; thereason is that the current isolationtester is not able to deal with morethan one blocked session concurrently and it blocks instead of raisingthe expected deadlock. In the future, if we improve isolationtester, itwould be good to include the spec file in the isolation schedule. Iposted it inhttp://www.postgresql.org/message-id/20141212205254.GC1768@alvh.no-ip.orgHat tip to Mark Kirkwood, who helped diagnose the trouble.
1 parent5bd5968 commit0489123

File tree

1 file changed

+144
-97
lines changed

1 file changed

+144
-97
lines changed

‎src/backend/access/heap/heapam.c

Lines changed: 144 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
9191
Bitmapset*hot_attrs,Bitmapset*key_attrs,
9292
bool*satisfies_hot,bool*satisfies_key,
9393
HeapTupleoldtup,HeapTuplenewtup);
94+
staticvoidheap_acquire_tuplock(Relationrelation,ItemPointertid,
95+
LockTupleModemode,boolnowait,bool*have_tuple_lock);
9496
staticvoidcompute_new_xmax_infomask(TransactionIdxmax,uint16old_infomask,
9597
uint16old_infomask2,TransactionIdadd_to_xmax,
9698
LockTupleModemode,boolis_update,
@@ -103,6 +105,8 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
103105
uint16*new_infomask2);
104106
staticTransactionIdMultiXactIdGetUpdateXid(TransactionIdxmax,
105107
uint16t_infomask);
108+
staticboolDoesMultiXactIdConflict(MultiXactIdmulti,uint16infomask,
109+
LockTupleModelockmode);
106110
staticvoidMultiXactIdWait(MultiXactIdmulti,MultiXactStatusstatus,
107111
int*remaining,uint16infomask);
108112
staticboolConditionalMultiXactIdWait(MultiXactIdmulti,
@@ -2629,11 +2633,8 @@ heap_delete(Relation relation, ItemPointer tid,
26292633
* this arranges that we stay at the head of the line while rechecking
26302634
* tuple state.
26312635
*/
2632-
if (!have_tuple_lock)
2633-
{
2634-
LockTupleTuplock(relation,&(tp.t_self),LockTupleExclusive);
2635-
have_tuple_lock= true;
2636-
}
2636+
heap_acquire_tuplock(relation,&(tp.t_self),LockTupleExclusive,
2637+
false,&have_tuple_lock);
26372638

26382639
/*
26392640
* Sleep until concurrent transaction ends. Note that we don't care
@@ -3113,21 +3114,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
31133114

31143115
LockBuffer(buffer,BUFFER_LOCK_UNLOCK);
31153116

3116-
/*
3117-
* Acquire tuple lock to establish our priority for the tuple (see
3118-
* heap_lock_tuple). LockTuple will release us when we are
3119-
* next-in-line for the tuple.
3120-
*
3121-
* If we are forced to "start over" below, we keep the tuple lock;
3122-
* this arranges that we stay at the head of the line while rechecking
3123-
* tuple state.
3124-
*/
3125-
if (!have_tuple_lock)
3126-
{
3127-
LockTupleTuplock(relation,&(oldtup.t_self),*lockmode);
3128-
have_tuple_lock= true;
3129-
}
3130-
31313117
/*
31323118
* Now we have to do something about the existing locker. If it's a
31333119
* multi, sleep on it; we might be awakened before it is completely
@@ -3138,12 +3124,30 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
31383124
* before actually going to sleep. If the update doesn't conflict
31393125
* with the locks, we just continue without sleeping (but making sure
31403126
* it is preserved).
3127+
*
3128+
* Before sleeping, we need to acquire tuple lock to establish our
3129+
* priority for the tuple (see heap_lock_tuple). LockTuple will
3130+
* release us when we are next-in-line for the tuple. Note we must not
3131+
* acquire the tuple lock until we're sure we're going to sleep;
3132+
* otherwise we're open for race conditions with other transactions
3133+
* holding the tuple lock which sleep on us.
3134+
*
3135+
* If we are forced to "start over" below, we keep the tuple lock;
3136+
* this arranges that we stay at the head of the line while rechecking
3137+
* tuple state.
31413138
*/
31423139
if (infomask&HEAP_XMAX_IS_MULTI)
31433140
{
31443141
TransactionIdupdate_xact;
31453142
intremain;
31463143

3144+
/* acquire tuple lock, if necessary */
3145+
if (DoesMultiXactIdConflict((MultiXactId)xwait,infomask,*lockmode))
3146+
{
3147+
heap_acquire_tuplock(relation,&(oldtup.t_self),*lockmode,
3148+
false,&have_tuple_lock);
3149+
}
3150+
31473151
/* wait for multixact */
31483152
MultiXactIdWait((MultiXactId)xwait,mxact_status,&remain,
31493153
infomask);
@@ -3219,7 +3223,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
32193223
}
32203224
else
32213225
{
3222-
/* wait for regular transaction to end */
3226+
/*
3227+
* Wait for regular transaction to end; but first, acquire
3228+
* tuple lock.
3229+
*/
3230+
heap_acquire_tuplock(relation,&(oldtup.t_self),*lockmode,
3231+
false,&have_tuple_lock);
32233232
XactLockTableWait(xwait);
32243233
LockBuffer(buffer,BUFFER_LOCK_EXCLUSIVE);
32253234

@@ -3888,7 +3897,6 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
38883897
return (MultiXactStatus)retval;
38893898
}
38903899

3891-
38923900
/*
38933901
*heap_lock_tuple - lock a tuple in shared or exclusive mode
38943902
*
@@ -4016,30 +4024,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
40164024
pfree(members);
40174025
}
40184026

4019-
/*
4020-
* Acquire tuple lock to establish our priority for the tuple.
4021-
* LockTuple will release us when we are next-in-line for the tuple.
4022-
* We must do this even if we are share-locking.
4023-
*
4024-
* If we are forced to "start over" below, we keep the tuple lock;
4025-
* this arranges that we stay at the head of the line while rechecking
4026-
* tuple state.
4027-
*/
4028-
if (!have_tuple_lock)
4029-
{
4030-
if (nowait)
4031-
{
4032-
if (!ConditionalLockTupleTuplock(relation,tid,mode))
4033-
ereport(ERROR,
4034-
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4035-
errmsg("could not obtain lock on row in relation \"%s\"",
4036-
RelationGetRelationName(relation))));
4037-
}
4038-
else
4039-
LockTupleTuplock(relation,tid,mode);
4040-
have_tuple_lock= true;
4041-
}
4042-
40434027
/*
40444028
* Initially assume that we will have to wait for the locking
40454029
* transaction(s) to finish. We check various cases below in which
@@ -4146,64 +4130,26 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
41464130
{
41474131
/*
41484132
* If we're requesting NoKeyExclusive, we might also be able to
4149-
* avoid sleeping; just ensure that there's no other lock type
4150-
* than KeyShare. Note that this is a bit more involved than just
4151-
* checking hint bits -- we need to expand the multixact to figure
4152-
* out lock modes for each one (unless there was only one such
4153-
* locker).
4133+
* avoid sleeping; just ensure that there no conflicting lock
4134+
* already acquired.
41544135
*/
41554136
if (infomask&HEAP_XMAX_IS_MULTI)
41564137
{
4157-
intnmembers;
4158-
MultiXactMember*members;
4159-
4160-
/*
4161-
* We don't need to allow old multixacts here; if that had
4162-
* been the case, HeapTupleSatisfiesUpdate would have returned
4163-
* MayBeUpdated and we wouldn't be here.
4164-
*/
4165-
nmembers=GetMultiXactIdMembers(xwait,&members, false);
4166-
4167-
if (nmembers <=0)
4138+
if (!DoesMultiXactIdConflict((MultiXactId)xwait,infomask,
4139+
mode))
41684140
{
41694141
/*
4170-
* Noneed to keep thepreviousxmaxhere. This is
4171-
*unlikely to happen.
4142+
* Noconflict, but if the xmaxchanged under us in the
4143+
*meantime, start over.
41724144
*/
4173-
require_sleep= false;
4174-
}
4175-
else
4176-
{
4177-
inti;
4178-
boolallowed= true;
4179-
4180-
for (i=0;i<nmembers;i++)
4181-
{
4182-
if (members[i].status!=MultiXactStatusForKeyShare)
4183-
{
4184-
allowed= false;
4185-
break;
4186-
}
4187-
}
4188-
if (allowed)
4189-
{
4190-
/*
4191-
* if the xmax changed under us in the meantime, start
4192-
* over.
4193-
*/
4194-
LockBuffer(*buffer,BUFFER_LOCK_EXCLUSIVE);
4195-
if (xmax_infomask_changed(tuple->t_data->t_infomask,infomask)||
4196-
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4197-
xwait))
4198-
{
4199-
pfree(members);
4200-
gotol3;
4201-
}
4202-
/* otherwise, we're good */
4203-
require_sleep= false;
4204-
}
4145+
LockBuffer(*buffer,BUFFER_LOCK_EXCLUSIVE);
4146+
if (xmax_infomask_changed(tuple->t_data->t_infomask,infomask)||
4147+
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4148+
xwait))
4149+
gotol3;
42054150

4206-
pfree(members);
4151+
/* otherwise, we're good */
4152+
require_sleep= false;
42074153
}
42084154
}
42094155
elseif (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
@@ -4229,6 +4175,18 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
42294175

42304176
if (require_sleep)
42314177
{
4178+
/*
4179+
* Acquire tuple lock to establish our priority for the tuple.
4180+
* LockTuple will release us when we are next-in-line for the tuple.
4181+
* We must do this even if we are share-locking.
4182+
*
4183+
* If we are forced to "start over" below, we keep the tuple lock;
4184+
* this arranges that we stay at the head of the line while rechecking
4185+
* tuple state.
4186+
*/
4187+
heap_acquire_tuplock(relation,tid,mode,nowait,
4188+
&have_tuple_lock);
4189+
42324190
if (infomask&HEAP_XMAX_IS_MULTI)
42334191
{
42344192
MultiXactStatusstatus=get_mxact_status_for_lock(mode, false);
@@ -4522,6 +4480,32 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
45224480
returnHeapTupleMayBeUpdated;
45234481
}
45244482

4483+
/*
4484+
* Acquire heavyweight lock on the given tuple, in preparation for acquiring
4485+
* its normal, Xmax-based tuple lock.
4486+
*
4487+
* have_tuple_lock is an input and output parameter: on input, it indicates
4488+
* whether the lock has previously been acquired (and this function does
4489+
* nothing in that case). If this function returns success, have_tuple_lock
4490+
* has been flipped to true.
4491+
*/
4492+
staticvoid
4493+
heap_acquire_tuplock(Relationrelation,ItemPointertid,LockTupleModemode,
4494+
boolnowait,bool*have_tuple_lock)
4495+
{
4496+
if (*have_tuple_lock)
4497+
return;
4498+
4499+
if (!nowait)
4500+
LockTupleTuplock(relation,tid,mode);
4501+
elseif (!ConditionalLockTupleTuplock(relation,tid,mode))
4502+
ereport(ERROR,
4503+
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4504+
errmsg("could not obtain lock on row in relation \"%s\"",
4505+
RelationGetRelationName(relation))));
4506+
4507+
*have_tuple_lock= true;
4508+
}
45254509

45264510
/*
45274511
* Given an original set of Xmax and infomask, and a transaction (identified by
@@ -5926,6 +5910,69 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
59265910
tuple->t_infomask);
59275911
}
59285912

5913+
/*
5914+
* Does the given multixact conflict with the current transaction grabbing a
5915+
* tuple lock of the given strength?
5916+
*
5917+
* The passed infomask pairs up with the given multixact in the tuple header.
5918+
*/
5919+
staticbool
5920+
DoesMultiXactIdConflict(MultiXactIdmulti,uint16infomask,
5921+
LockTupleModelockmode)
5922+
{
5923+
boolallow_old;
5924+
intnmembers;
5925+
MultiXactMember*members;
5926+
boolresult= false;
5927+
5928+
allow_old= !(infomask&HEAP_LOCK_MASK)&&HEAP_XMAX_IS_LOCKED_ONLY(infomask);
5929+
nmembers=GetMultiXactIdMembers(multi,&members,allow_old);
5930+
if (nmembers >=0)
5931+
{
5932+
inti;
5933+
5934+
for (i=0;i<nmembers;i++)
5935+
{
5936+
TransactionIdmemxid;
5937+
LockTupleModememlockmode;
5938+
5939+
memlockmode=LOCKMODE_from_mxstatus(members[i].status);
5940+
/* ignore members that don't conflict with the lock we want */
5941+
if (!DoLockModesConflict(memlockmode,lockmode))
5942+
continue;
5943+
5944+
/* ignore members from current xact */
5945+
memxid=members[i].xid;
5946+
if (TransactionIdIsCurrentTransactionId(memxid))
5947+
continue;
5948+
5949+
if (ISUPDATE_from_mxstatus(members[i].status))
5950+
{
5951+
/* ignore aborted updaters */
5952+
if (TransactionIdDidAbort(memxid))
5953+
continue;
5954+
}
5955+
else
5956+
{
5957+
/* ignore lockers-only that are no longer in progress */
5958+
if (!TransactionIdIsInProgress(memxid))
5959+
continue;
5960+
}
5961+
5962+
/*
5963+
* Whatever remains are either live lockers that conflict with our
5964+
* wanted lock, and updaters that are not aborted. Those conflict
5965+
* with what we want, so return true.
5966+
*/
5967+
result= true;
5968+
break;
5969+
}
5970+
pfree(members);
5971+
}
5972+
5973+
returnresult;
5974+
}
5975+
59295976
/*
59305977
* Do_MultiXactIdWait
59315978
*Actual implementation for the two functions below.

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp