Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit82e861f

Browse files
committed
Fix LWLockAssign() so that it can safely be executed after postmaster
initialization. Add spinlocking, fix EXEC_BACKEND unsafeness.
1 parent77d1de3 commit82e861f

File tree

2 files changed

+31
-15
lines changed

2 files changed

+31
-15
lines changed

‎src/backend/storage/ipc/shmem.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
*
99
*
1010
* IDENTIFICATION
11-
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.85 2005/08/20 23:26:20 tgl Exp $
11+
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
1212
*
1313
*-------------------------------------------------------------------------
1414
*/
@@ -71,8 +71,7 @@ SHMEM_OFFSET ShmemBase;/* start address of shared memory */
7171

7272
staticSHMEM_OFFSETShmemEnd;/* end+1 address of shared memory */
7373

74-
NON_EXEC_STATICslock_t*ShmemLock;/* spinlock for shared memory
75-
* allocation */
74+
slock_t*ShmemLock;/* spinlock for shared memory and LWLock allocation */
7675

7776
NON_EXEC_STATICslock_t*ShmemIndexLock;/* spinlock for ShmemIndex */
7877

‎src/backend/storage/lmgr/lwlock.c

Lines changed: 29 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* Portions Copyright (c) 1994, Regents of the University of California
1616
*
1717
* IDENTIFICATION
18-
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.31 2005/10/0720:11:03 tgl Exp $
18+
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.32 2005/10/0721:42:38 tgl Exp $
1919
*
2020
*-------------------------------------------------------------------------
2121
*/
@@ -27,6 +27,10 @@
2727
#include"storage/spin.h"
2828

2929

30+
/* We use the ShmemLock spinlock to protect LWLockAssign */
31+
externslock_t*ShmemLock;
32+
33+
3034
typedefstructLWLock
3135
{
3236
slock_tmutex;/* Protects LWLock and queue of PGPROCs */
@@ -65,9 +69,6 @@ typedef union LWLockPadded
6569
*/
6670
NON_EXEC_STATICLWLockPadded*LWLockArray=NULL;
6771

68-
/* shared counter for dynamic allocation of LWLockIds */
69-
staticint*LWLockCounter;
70-
7172

7273
/*
7374
* We use this structure to keep track of locked LWLocks for release
@@ -159,7 +160,7 @@ LWLockShmemSize(void)
159160
/* Space for the LWLock array. */
160161
size=mul_size(numLocks,sizeof(LWLockPadded));
161162

162-
/* Space forshared allocation counter, plus room for alignment. */
163+
/* Space fordynamic allocation counter, plus room for alignment. */
163164
size=add_size(size,2*sizeof(int)+LWLOCK_PADDED_SIZE);
164165

165166
returnsize;
@@ -175,12 +176,16 @@ CreateLWLocks(void)
175176
intnumLocks=NumLWLocks();
176177
SizespaceLocks=LWLockShmemSize();
177178
LWLockPadded*lock;
179+
int*LWLockCounter;
178180
char*ptr;
179181
intid;
180182

181183
/* Allocate space */
182184
ptr= (char*)ShmemAlloc(spaceLocks);
183185

186+
/* Leave room for dynamic allocation counter */
187+
ptr+=2*sizeof(int);
188+
184189
/* Ensure desired alignment of LWLock array */
185190
ptr+=LWLOCK_PADDED_SIZE- ((unsigned long)ptr) %LWLOCK_PADDED_SIZE;
186191

@@ -200,9 +205,10 @@ CreateLWLocks(void)
200205
}
201206

202207
/*
203-
* Initialize the dynamic-allocation counter at the end of the array
208+
* Initialize the dynamic-allocation counter, which is stored just before
209+
* the first LWLock.
204210
*/
205-
LWLockCounter= (int*)lock;
211+
LWLockCounter= (int*)((char*)LWLockArray-2*sizeof(int));
206212
LWLockCounter[0]= (int)NumFixedLWLocks;
207213
LWLockCounter[1]=numLocks;
208214
}
@@ -211,16 +217,27 @@ CreateLWLocks(void)
211217
/*
212218
* LWLockAssign - assign a dynamically-allocated LWLock number
213219
*
214-
* NB: we do not currently try to interlock this. Could perhaps use
215-
* ShmemLock spinlock if there were any need to assign LWLockIds after
216-
* shmem setup.
220+
* We interlock this using the same spinlock that is used to protect
221+
* ShmemAlloc(). Interlocking is not really necessary during postmaster
222+
* startup, but it is needed if any user-defined code tries to allocate
223+
* LWLocks after startup.
217224
*/
218225
LWLockId
219226
LWLockAssign(void)
220227
{
228+
LWLockIdresult;
229+
int*LWLockCounter;
230+
231+
LWLockCounter= (int*) ((char*)LWLockArray-2*sizeof(int));
232+
SpinLockAcquire(ShmemLock);
221233
if (LWLockCounter[0] >=LWLockCounter[1])
222-
elog(FATAL,"no more LWLockIds available");
223-
return (LWLockId) (LWLockCounter[0]++);
234+
{
235+
SpinLockRelease(ShmemLock);
236+
elog(ERROR,"no more LWLockIds available");
237+
}
238+
result= (LWLockId) (LWLockCounter[0]++);
239+
SpinLockRelease(ShmemLock);
240+
returnresult;
224241
}
225242

226243

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp