@@ -91,6 +91,8 @@ static void HeapSatisfiesHOTandKeyUpdate(Relation relation,
91
91
Bitmapset * hot_attrs ,Bitmapset * key_attrs ,
92
92
bool * satisfies_hot ,bool * satisfies_key ,
93
93
HeapTuple oldtup ,HeapTuple newtup );
94
+ static void heap_acquire_tuplock (Relation relation ,ItemPointer tid ,
95
+ LockTupleMode mode ,bool nowait ,bool * have_tuple_lock );
94
96
static void compute_new_xmax_infomask (TransactionId xmax ,uint16 old_infomask ,
95
97
uint16 old_infomask2 ,TransactionId add_to_xmax ,
96
98
LockTupleMode mode ,bool is_update ,
@@ -103,6 +105,8 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
103
105
uint16 * new_infomask2 );
104
106
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax ,
105
107
uint16 t_infomask );
108
+ static bool DoesMultiXactIdConflict (MultiXactId multi ,uint16 infomask ,
109
+ LockTupleMode lockmode );
106
110
static void MultiXactIdWait (MultiXactId multi ,MultiXactStatus status ,
107
111
int * remaining ,uint16 infomask );
108
112
static bool ConditionalMultiXactIdWait (MultiXactId multi ,
@@ -2629,11 +2633,8 @@ heap_delete(Relation relation, ItemPointer tid,
2629
2633
* this arranges that we stay at the head of the line while rechecking
2630
2634
* tuple state.
2631
2635
*/
2632
- if (!have_tuple_lock )
2633
- {
2634
- LockTupleTuplock (relation ,& (tp .t_self ),LockTupleExclusive );
2635
- have_tuple_lock = true;
2636
- }
2636
+ heap_acquire_tuplock (relation ,& (tp .t_self ),LockTupleExclusive ,
2637
+ false,& have_tuple_lock );
2637
2638
2638
2639
/*
2639
2640
* Sleep until concurrent transaction ends. Note that we don't care
@@ -3113,21 +3114,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3113
3114
3114
3115
LockBuffer (buffer ,BUFFER_LOCK_UNLOCK );
3115
3116
3116
- /*
3117
- * Acquire tuple lock to establish our priority for the tuple (see
3118
- * heap_lock_tuple). LockTuple will release us when we are
3119
- * next-in-line for the tuple.
3120
- *
3121
- * If we are forced to "start over" below, we keep the tuple lock;
3122
- * this arranges that we stay at the head of the line while rechecking
3123
- * tuple state.
3124
- */
3125
- if (!have_tuple_lock )
3126
- {
3127
- LockTupleTuplock (relation ,& (oldtup .t_self ),* lockmode );
3128
- have_tuple_lock = true;
3129
- }
3130
-
3131
3117
/*
3132
3118
* Now we have to do something about the existing locker. If it's a
3133
3119
* multi, sleep on it; we might be awakened before it is completely
@@ -3138,12 +3124,30 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3138
3124
* before actually going to sleep. If the update doesn't conflict
3139
3125
* with the locks, we just continue without sleeping (but making sure
3140
3126
* it is preserved).
3127
+ *
3128
+ * Before sleeping, we need to acquire tuple lock to establish our
3129
+ * priority for the tuple (see heap_lock_tuple). LockTuple will
3130
+ * release us when we are next-in-line for the tuple. Note we must not
3131
+ * acquire the tuple lock until we're sure we're going to sleep;
3132
+ * otherwise we're open for race conditions with other transactions
3133
+ * holding the tuple lock which sleep on us.
3134
+ *
3135
+ * If we are forced to "start over" below, we keep the tuple lock;
3136
+ * this arranges that we stay at the head of the line while rechecking
3137
+ * tuple state.
3141
3138
*/
3142
3139
if (infomask & HEAP_XMAX_IS_MULTI )
3143
3140
{
3144
3141
TransactionId update_xact ;
3145
3142
int remain ;
3146
3143
3144
+ /* acquire tuple lock, if necessary */
3145
+ if (DoesMultiXactIdConflict ((MultiXactId )xwait ,infomask ,* lockmode ))
3146
+ {
3147
+ heap_acquire_tuplock (relation ,& (oldtup .t_self ),* lockmode ,
3148
+ false,& have_tuple_lock );
3149
+ }
3150
+
3147
3151
/* wait for multixact */
3148
3152
MultiXactIdWait ((MultiXactId )xwait ,mxact_status ,& remain ,
3149
3153
infomask );
@@ -3219,7 +3223,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3219
3223
}
3220
3224
else
3221
3225
{
3222
- /* wait for regular transaction to end */
3226
+ /*
3227
+ * Wait for regular transaction to end; but first, acquire
3228
+ * tuple lock.
3229
+ */
3230
+ heap_acquire_tuplock (relation ,& (oldtup .t_self ),* lockmode ,
3231
+ false,& have_tuple_lock );
3223
3232
XactLockTableWait (xwait );
3224
3233
LockBuffer (buffer ,BUFFER_LOCK_EXCLUSIVE );
3225
3234
@@ -3888,7 +3897,6 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
3888
3897
return (MultiXactStatus )retval ;
3889
3898
}
3890
3899
3891
-
3892
3900
/*
3893
3901
*heap_lock_tuple - lock a tuple in shared or exclusive mode
3894
3902
*
@@ -4016,30 +4024,6 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4016
4024
pfree (members );
4017
4025
}
4018
4026
4019
- /*
4020
- * Acquire tuple lock to establish our priority for the tuple.
4021
- * LockTuple will release us when we are next-in-line for the tuple.
4022
- * We must do this even if we are share-locking.
4023
- *
4024
- * If we are forced to "start over" below, we keep the tuple lock;
4025
- * this arranges that we stay at the head of the line while rechecking
4026
- * tuple state.
4027
- */
4028
- if (!have_tuple_lock )
4029
- {
4030
- if (nowait )
4031
- {
4032
- if (!ConditionalLockTupleTuplock (relation ,tid ,mode ))
4033
- ereport (ERROR ,
4034
- (errcode (ERRCODE_LOCK_NOT_AVAILABLE ),
4035
- errmsg ("could not obtain lock on row in relation \"%s\"" ,
4036
- RelationGetRelationName (relation ))));
4037
- }
4038
- else
4039
- LockTupleTuplock (relation ,tid ,mode );
4040
- have_tuple_lock = true;
4041
- }
4042
-
4043
4027
/*
4044
4028
* Initially assume that we will have to wait for the locking
4045
4029
* transaction(s) to finish. We check various cases below in which
@@ -4146,64 +4130,26 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4146
4130
{
4147
4131
/*
4148
4132
* If we're requesting NoKeyExclusive, we might also be able to
4149
- * avoid sleeping; just ensure that there's no other lock type
4150
- * than KeyShare. Note that this is a bit more involved than just
4151
- * checking hint bits -- we need to expand the multixact to figure
4152
- * out lock modes for each one (unless there was only one such
4153
- * locker).
4133
+ * avoid sleeping; just ensure that there no conflicting lock
4134
+ * already acquired.
4154
4135
*/
4155
4136
if (infomask & HEAP_XMAX_IS_MULTI )
4156
4137
{
4157
- int nmembers ;
4158
- MultiXactMember * members ;
4159
-
4160
- /*
4161
- * We don't need to allow old multixacts here; if that had
4162
- * been the case, HeapTupleSatisfiesUpdate would have returned
4163
- * MayBeUpdated and we wouldn't be here.
4164
- */
4165
- nmembers = GetMultiXactIdMembers (xwait ,& members , false);
4166
-
4167
- if (nmembers <=0 )
4138
+ if (!DoesMultiXactIdConflict ((MultiXactId )xwait ,infomask ,
4139
+ mode ))
4168
4140
{
4169
4141
/*
4170
- * Noneed to keep theprevious xmaxhere. This is
4171
- *unlikely to happen .
4142
+ * Noconflict, but if the xmaxchanged under us in the
4143
+ *meantime, start over .
4172
4144
*/
4173
- require_sleep = false;
4174
- }
4175
- else
4176
- {
4177
- int i ;
4178
- bool allowed = true;
4179
-
4180
- for (i = 0 ;i < nmembers ;i ++ )
4181
- {
4182
- if (members [i ].status != MultiXactStatusForKeyShare )
4183
- {
4184
- allowed = false;
4185
- break ;
4186
- }
4187
- }
4188
- if (allowed )
4189
- {
4190
- /*
4191
- * if the xmax changed under us in the meantime, start
4192
- * over.
4193
- */
4194
- LockBuffer (* buffer ,BUFFER_LOCK_EXCLUSIVE );
4195
- if (xmax_infomask_changed (tuple -> t_data -> t_infomask ,infomask )||
4196
- !TransactionIdEquals (HeapTupleHeaderGetRawXmax (tuple -> t_data ),
4197
- xwait ))
4198
- {
4199
- pfree (members );
4200
- gotol3 ;
4201
- }
4202
- /* otherwise, we're good */
4203
- require_sleep = false;
4204
- }
4145
+ LockBuffer (* buffer ,BUFFER_LOCK_EXCLUSIVE );
4146
+ if (xmax_infomask_changed (tuple -> t_data -> t_infomask ,infomask )||
4147
+ !TransactionIdEquals (HeapTupleHeaderGetRawXmax (tuple -> t_data ),
4148
+ xwait ))
4149
+ gotol3 ;
4205
4150
4206
- pfree (members );
4151
+ /* otherwise, we're good */
4152
+ require_sleep = false;
4207
4153
}
4208
4154
}
4209
4155
else if (HEAP_XMAX_IS_KEYSHR_LOCKED (infomask ))
@@ -4229,6 +4175,18 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4229
4175
4230
4176
if (require_sleep )
4231
4177
{
4178
+ /*
4179
+ * Acquire tuple lock to establish our priority for the tuple.
4180
+ * LockTuple will release us when we are next-in-line for the tuple.
4181
+ * We must do this even if we are share-locking.
4182
+ *
4183
+ * If we are forced to "start over" below, we keep the tuple lock;
4184
+ * this arranges that we stay at the head of the line while rechecking
4185
+ * tuple state.
4186
+ */
4187
+ heap_acquire_tuplock (relation ,tid ,mode ,nowait ,
4188
+ & have_tuple_lock );
4189
+
4232
4190
if (infomask & HEAP_XMAX_IS_MULTI )
4233
4191
{
4234
4192
MultiXactStatus status = get_mxact_status_for_lock (mode , false);
@@ -4522,6 +4480,32 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4522
4480
return HeapTupleMayBeUpdated ;
4523
4481
}
4524
4482
4483
+ /*
4484
+ * Acquire heavyweight lock on the given tuple, in preparation for acquiring
4485
+ * its normal, Xmax-based tuple lock.
4486
+ *
4487
+ * have_tuple_lock is an input and output parameter: on input, it indicates
4488
+ * whether the lock has previously been acquired (and this function does
4489
+ * nothing in that case). If this function returns success, have_tuple_lock
4490
+ * has been flipped to true.
4491
+ */
4492
+ static void
4493
+ heap_acquire_tuplock (Relation relation ,ItemPointer tid ,LockTupleMode mode ,
4494
+ bool nowait ,bool * have_tuple_lock )
4495
+ {
4496
+ if (* have_tuple_lock )
4497
+ return ;
4498
+
4499
+ if (!nowait )
4500
+ LockTupleTuplock (relation ,tid ,mode );
4501
+ else if (!ConditionalLockTupleTuplock (relation ,tid ,mode ))
4502
+ ereport (ERROR ,
4503
+ (errcode (ERRCODE_LOCK_NOT_AVAILABLE ),
4504
+ errmsg ("could not obtain lock on row in relation \"%s\"" ,
4505
+ RelationGetRelationName (relation ))));
4506
+
4507
+ * have_tuple_lock = true;
4508
+ }
4525
4509
4526
4510
/*
4527
4511
* Given an original set of Xmax and infomask, and a transaction (identified by
@@ -5926,6 +5910,69 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
5926
5910
tuple -> t_infomask );
5927
5911
}
5928
5912
5913
+ /*
5914
+ * Does the given multixact conflict with the current transaction grabbing a
5915
+ * tuple lock of the given strength?
5916
+ *
5917
+ * The passed infomask pairs up with the given multixact in the tuple header.
5918
+ */
5919
+ static bool
5920
+ DoesMultiXactIdConflict (MultiXactId multi ,uint16 infomask ,
5921
+ LockTupleMode lockmode )
5922
+ {
5923
+ bool allow_old ;
5924
+ int nmembers ;
5925
+ MultiXactMember * members ;
5926
+ bool result = false;
5927
+
5928
+ allow_old = !(infomask & HEAP_LOCK_MASK )&& HEAP_XMAX_IS_LOCKED_ONLY (infomask );
5929
+ nmembers = GetMultiXactIdMembers (multi ,& members ,allow_old );
5930
+ if (nmembers >=0 )
5931
+ {
5932
+ int i ;
5933
+
5934
+ for (i = 0 ;i < nmembers ;i ++ )
5935
+ {
5936
+ TransactionId memxid ;
5937
+ LockTupleMode memlockmode ;
5938
+
5939
+ memlockmode = LOCKMODE_from_mxstatus (members [i ].status );
5940
+ /* ignore members that don't conflict with the lock we want */
5941
+ if (!DoLockModesConflict (memlockmode ,lockmode ))
5942
+ continue ;
5943
+
5944
+ /* ignore members from current xact */
5945
+ memxid = members [i ].xid ;
5946
+ if (TransactionIdIsCurrentTransactionId (memxid ))
5947
+ continue ;
5948
+
5949
+ if (ISUPDATE_from_mxstatus (members [i ].status ))
5950
+ {
5951
+ /* ignore aborted updaters */
5952
+ if (TransactionIdDidAbort (memxid ))
5953
+ continue ;
5954
+ }
5955
+ else
5956
+ {
5957
+ /* ignore lockers-only that are no longer in progress */
5958
+ if (!TransactionIdIsInProgress (memxid ))
5959
+ continue ;
5960
+ }
5961
+
5962
+ /*
5963
+ * Whatever remains are either live lockers that conflict with our
5964
+ * wanted lock, and updaters that are not aborted. Those conflict
5965
+ * with what we want, so return true.
5966
+ */
5967
+ result = true;
5968
+ break ;
5969
+ }
5970
+ pfree (members );
5971
+ }
5972
+
5973
+ return result ;
5974
+ }
5975
+
5929
5976
/*
5930
5977
* Do_MultiXactIdWait
5931
5978
*Actual implementation for the two functions below.