Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit07ab138

Browse files
committed
Fix two more bugs in fast-path relation locking.
First, the previous code failed to account for the fact that, during HotStandby operation, the startup process takes AccessExclusiveLocks onrelations without setting MyDatabaseId. This resulted in fast pathstrong lock counts failing to be incremented with the startup processtook locks, which in turn allowed conflicting lock requests to succeedwhen they should not have. Report by Erik Rijkers, diagnosis by HeikkiLinnakangas.Second, LockReleaseAll() failed to honor the allLocks and lockmethodidrestrictions with respect to fast-path locks. It's not clear to mewhether this produces any user-visible breakage at the moment, but it'scertainly wrong. Rearrange order of operations in LockReleaseAll to fix.Noted by Tom Lane.
1 parent932ded2 commit07ab138

File tree

1 file changed

+121
-114
lines changed
  • src/backend/storage/lmgr

1 file changed

+121
-114
lines changed

‎src/backend/storage/lmgr/lock.c

Lines changed: 121 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -192,14 +192,17 @@ static intFastPathLocalUseCount = 0;
192192
* self-conflicting, it can't use the fast-path mechanism; but it also does
193193
* not conflict with any of the locks that do, so we can ignore it completely.
194194
*/
195-
#defineFastPathTag(locktag) \
195+
#defineEligibleForRelationFastPath(locktag,mode) \
196196
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
197197
(locktag)->locktag_type == LOCKTAG_RELATION && \
198198
(locktag)->locktag_field1 == MyDatabaseId && \
199-
MyDatabaseId != InvalidOid)
200-
#defineFastPathWeakMode(mode)((mode) < ShareUpdateExclusiveLock)
201-
#defineFastPathStrongMode(mode)((mode) > ShareUpdateExclusiveLock)
202-
#defineFastPathRelevantMode(mode)((mode) != ShareUpdateExclusiveLock)
199+
MyDatabaseId != InvalidOid && \
200+
(mode) < ShareUpdateExclusiveLock)
201+
#defineConflictsWithRelationFastPath(locktag,mode) \
202+
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
203+
(locktag)->locktag_type == LOCKTAG_RELATION && \
204+
(locktag)->locktag_field1 != InvalidOid && \
205+
(mode) > ShareUpdateExclusiveLock)
203206

204207
staticboolFastPathGrantRelationLock(Oidrelid,LOCKMODElockmode);
205208
staticboolFastPathUnGrantRelationLock(Oidrelid,LOCKMODElockmode);
@@ -697,67 +700,71 @@ LockAcquireExtended(const LOCKTAG *locktag,
697700
log_lock= true;
698701
}
699702

700-
/* Locks that participate in the fast path require special handling. */
701-
if (FastPathTag(locktag)&&FastPathRelevantMode(lockmode))
703+
/*
704+
* Attempt to take lock via fast path, if eligible. But if we remember
705+
* having filled up the fast path array, we don't attempt to make any
706+
* further use of it until we release some locks. It's possible that some
707+
* other backend has transferred some of those locks to the shared hash
708+
* table, leaving space free, but it's not worth acquiring the LWLock just
709+
* to check. It's also possible that we're acquiring a second or third
710+
* lock type on a relation we have already locked using the fast-path, but
711+
* for now we don't worry about that case either.
712+
*/
713+
if (EligibleForRelationFastPath(locktag,lockmode)
714+
&&FastPathLocalUseCount<FP_LOCK_SLOTS_PER_BACKEND)
702715
{
703-
uint32fasthashcode;
704-
705-
fasthashcode=FastPathStrongLockHashPartition(hashcode);
716+
uint32fasthashcode=FastPathStrongLockHashPartition(hashcode);
717+
boolacquired;
706718

707719
/*
708-
* If we remember having filled up the fast path array, we don't
709-
* attempt to make any further use of it until we release some locks.
710-
* It's possible that some other backend has transferred some of those
711-
* locks to the shared hash table, leaving space free, but it's not
712-
* worth acquiring the LWLock just to check. It's also possible that
713-
* we're acquiring a second or third lock type on a relation we have
714-
* already locked using the fast-path, but for now we don't worry about
715-
* that case either.
720+
* LWLockAcquire acts as a memory sequencing point, so it's safe
721+
* to assume that any strong locker whose increment to
722+
* FastPathStrongRelationLocks->counts becomes visible after we test
723+
* it has yet to begin to transfer fast-path locks.
716724
*/
717-
if (FastPathWeakMode(lockmode)
718-
&&FastPathLocalUseCount<FP_LOCK_SLOTS_PER_BACKEND)
725+
LWLockAcquire(MyProc->backendLock,LW_EXCLUSIVE);
726+
if (FastPathStrongRelationLocks->count[fasthashcode]!=0)
727+
acquired= false;
728+
else
729+
acquired=FastPathGrantRelationLock(locktag->locktag_field2,
730+
lockmode);
731+
LWLockRelease(MyProc->backendLock);
732+
if (acquired)
719733
{
720-
boolacquired;
721-
722-
/*
723-
* LWLockAcquire acts as a memory sequencing point, so it's safe
724-
* to assume that any strong locker whose increment to
725-
* FastPathStrongRelationLocks->counts becomes visible after we test
726-
* it has yet to begin to transfer fast-path locks.
727-
*/
728-
LWLockAcquire(MyProc->backendLock,LW_EXCLUSIVE);
729-
if (FastPathStrongRelationLocks->count[fasthashcode]!=0)
730-
acquired= false;
731-
else
732-
acquired=FastPathGrantRelationLock(locktag->locktag_field2,
733-
lockmode);
734-
LWLockRelease(MyProc->backendLock);
735-
if (acquired)
736-
{
737-
GrantLockLocal(locallock,owner);
738-
returnLOCKACQUIRE_OK;
739-
}
734+
GrantLockLocal(locallock,owner);
735+
returnLOCKACQUIRE_OK;
740736
}
741-
elseif (FastPathStrongMode(lockmode))
737+
}
738+
739+
/*
740+
* If this lock could potentially have been taken via the fast-path by
741+
* some other backend, we must (temporarily) disable further use of the
742+
* fast-path for this lock tag, and migrate any locks already taken via
743+
* this method to the main lock table.
744+
*/
745+
if (ConflictsWithRelationFastPath(locktag,lockmode))
746+
{
747+
uint32fasthashcode=FastPathStrongLockHashPartition(hashcode);
748+
749+
BeginStrongLockAcquire(locallock,fasthashcode);
750+
if (!FastPathTransferRelationLocks(lockMethodTable,locktag,
751+
hashcode))
742752
{
743-
BeginStrongLockAcquire(locallock,fasthashcode);
744-
if (!FastPathTransferRelationLocks(lockMethodTable,locktag,
745-
hashcode))
746-
{
747-
AbortStrongLockAcquire();
748-
if (reportMemoryError)
749-
ereport(ERROR,
750-
(errcode(ERRCODE_OUT_OF_MEMORY),
751-
errmsg("out of shared memory"),
752-
errhint("You might need to increase max_locks_per_transaction.")));
753-
else
754-
returnLOCKACQUIRE_NOT_AVAIL;
755-
}
753+
AbortStrongLockAcquire();
754+
if (reportMemoryError)
755+
ereport(ERROR,
756+
(errcode(ERRCODE_OUT_OF_MEMORY),
757+
errmsg("out of shared memory"),
758+
errhint("You might need to increase max_locks_per_transaction.")));
759+
else
760+
returnLOCKACQUIRE_NOT_AVAIL;
756761
}
757762
}
758763

759764
/*
760-
* Otherwise we've got to mess with the shared lock table.
765+
* We didn't find the lock in our LOCALLOCK table, and we didn't manage
766+
* to take it via the fast-path, either, so we've got to mess with the
767+
* shared lock table.
761768
*/
762769
partitionLock=LockHashPartitionLock(hashcode);
763770

@@ -1688,8 +1695,8 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
16881695
if (locallock->nLocks>0)
16891696
return TRUE;
16901697

1691-
/*Locks that participate in the fast path require special handling. */
1692-
if (FastPathTag(locktag)&&FastPathWeakMode(lockmode)
1698+
/*Attempt fast release of any lock eligible for the fast path. */
1699+
if (EligibleForRelationFastPath(locktag,lockmode)
16931700
&&FastPathLocalUseCount>0)
16941701
{
16951702
boolreleased;
@@ -1729,7 +1736,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
17291736
PROCLOCKTAGproclocktag;
17301737
boolfound;
17311738

1732-
Assert(FastPathTag(locktag)&&FastPathWeakMode(lockmode));
1739+
Assert(EligibleForRelationFastPath(locktag,lockmode));
17331740
lock= (LOCK*)hash_search_with_hash_value(LockMethodLockHash,
17341741
(constvoid*)locktag,
17351742
locallock->hashcode,
@@ -1830,28 +1837,63 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
18301837

18311838
while ((locallock= (LOCALLOCK*)hash_seq_search(&status))!=NULL)
18321839
{
1833-
if (locallock->proclock==NULL||locallock->lock==NULL)
1840+
/*
1841+
* If the LOCALLOCK entry is unused, we must've run out of shared
1842+
* memory while trying to set up this lock. Just forget the local
1843+
* entry.
1844+
*/
1845+
if (locallock->nLocks==0)
18341846
{
1835-
LOCKMODElockmode=locallock->tag.mode;
1836-
Oidrelid;
1847+
RemoveLocalLock(locallock);
1848+
continue;
1849+
}
18371850

1838-
/*
1839-
* If the LOCALLOCK entry is unused, we must've run out of shared
1840-
* memory while trying to set up this lock. Just forget the local
1841-
* entry.
1842-
*/
1843-
if (locallock->nLocks==0)
1851+
/* Ignore items that are not of the lockmethod to be removed */
1852+
if (LOCALLOCK_LOCKMETHOD(*locallock)!=lockmethodid)
1853+
continue;
1854+
1855+
/*
1856+
* If we are asked to release all locks, we can just zap the entry.
1857+
* Otherwise, must scan to see if there are session locks. We assume
1858+
* there is at most one lockOwners entry for session locks.
1859+
*/
1860+
if (!allLocks)
1861+
{
1862+
LOCALLOCKOWNER*lockOwners=locallock->lockOwners;
1863+
1864+
/* If it's above array position 0, move it down to 0 */
1865+
for (i=locallock->numLockOwners-1;i>0;i--)
18441866
{
1845-
RemoveLocalLock(locallock);
1867+
if (lockOwners[i].owner==NULL)
1868+
{
1869+
lockOwners[0]=lockOwners[i];
1870+
break;
1871+
}
1872+
}
1873+
1874+
if (locallock->numLockOwners>0&&
1875+
lockOwners[0].owner==NULL&&
1876+
lockOwners[0].nLocks>0)
1877+
{
1878+
/* Fix the locallock to show just the session locks */
1879+
locallock->nLocks=lockOwners[0].nLocks;
1880+
locallock->numLockOwners=1;
1881+
/* We aren't deleting this locallock, so done */
18461882
continue;
18471883
}
1884+
}
18481885

1849-
/*
1850-
* Otherwise, we should be dealing with a lock acquired via the
1851-
* fast-path. If not, we've got trouble.
1852-
*/
1853-
if (!FastPathTag(&locallock->tag.lock)
1854-
|| !FastPathWeakMode(lockmode))
1886+
/*
1887+
* If the lock or proclock pointers are NULL, this lock was taken via
1888+
* the relation fast-path.
1889+
*/
1890+
if (locallock->proclock==NULL||locallock->lock==NULL)
1891+
{
1892+
LOCKMODElockmode=locallock->tag.mode;
1893+
Oidrelid;
1894+
1895+
/* Verify that a fast-path lock is what we've got. */
1896+
if (!EligibleForRelationFastPath(&locallock->tag.lock,lockmode))
18551897
elog(PANIC,"locallock table corrupted");
18561898

18571899
/*
@@ -1894,41 +1936,6 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
18941936
continue;
18951937
}
18961938

1897-
/* Ignore items that are not of the lockmethod to be removed */
1898-
if (LOCALLOCK_LOCKMETHOD(*locallock)!=lockmethodid)
1899-
continue;
1900-
1901-
/*
1902-
* If we are asked to release all locks, we can just zap the entry.
1903-
* Otherwise, must scan to see if there are session locks. We assume
1904-
* there is at most one lockOwners entry for session locks.
1905-
*/
1906-
if (!allLocks)
1907-
{
1908-
LOCALLOCKOWNER*lockOwners=locallock->lockOwners;
1909-
1910-
/* If it's above array position 0, move it down to 0 */
1911-
for (i=locallock->numLockOwners-1;i>0;i--)
1912-
{
1913-
if (lockOwners[i].owner==NULL)
1914-
{
1915-
lockOwners[0]=lockOwners[i];
1916-
break;
1917-
}
1918-
}
1919-
1920-
if (locallock->numLockOwners>0&&
1921-
lockOwners[0].owner==NULL&&
1922-
lockOwners[0].nLocks>0)
1923-
{
1924-
/* Fix the locallock to show just the session locks */
1925-
locallock->nLocks=lockOwners[0].nLocks;
1926-
locallock->numLockOwners=1;
1927-
/* We aren't deleting this locallock, so done */
1928-
continue;
1929-
}
1930-
}
1931-
19321939
/* Mark the proclock to show we need to release this lockmode */
19331940
if (locallock->nLocks>0)
19341941
locallock->proclock->releaseMask |=LOCKBIT_ON(locallock->tag.mode);
@@ -2481,10 +2488,10 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
24812488

24822489
/*
24832490
* Fast path locks might not have been entered in the primary lock table.
2484-
*But only strong locks canconflict withanything that might have been
2485-
*taken via the fast-pathmechanism.
2491+
*If the lock we're dealing with couldconflict withsuch a lock, we must
2492+
*examine each backend's fast-patharray for conflicts.
24862493
*/
2487-
if (FastPathTag(locktag)&&FastPathStrongMode(lockmode))
2494+
if (ConflictsWithRelationFastPath(locktag,lockmode))
24882495
{
24892496
inti;
24902497
Oidrelid=locktag->locktag_field2;
@@ -2722,7 +2729,7 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
27222729
* Decrement strong lock count. This logic is needed only for 2PC.
27232730
*/
27242731
if (decrement_strong_lock_count
2725-
&&FastPathTag(&lock->tag)&&FastPathStrongMode(lockmode))
2732+
&&ConflictsWithRelationFastPath(&lock->tag,lockmode))
27262733
{
27272734
uint32fasthashcode=FastPathStrongLockHashPartition(hashcode);
27282735
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
@@ -3604,7 +3611,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
36043611
* Bump strong lock count, to make sure any fast-path lock requests won't
36053612
* be granted without consulting the primary lock table.
36063613
*/
3607-
if (FastPathTag(&lock->tag)&&FastPathStrongMode(lockmode))
3614+
if (ConflictsWithRelationFastPath(&lock->tag,lockmode))
36083615
{
36093616
uint32fasthashcode=FastPathStrongLockHashPartition(hashcode);
36103617
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp