Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitdc06734

Browse files
committed
Force the size and alignment of LWLock array entries to be either 16 or 32
bytes. This shouldn't make any difference on x86 machines, where the sizehappened to be 16 bytes anyway, but on 64-bit machines and machines withslock_t int or wider, it will speed array indexing and hopefully reduceSMP cache contention effects. Per recent experimentation.
1 parent1dd9b09 commitdc06734

File tree

1 file changed

+45
-18
lines changed

1 file changed

+45
-18
lines changed

‎src/backend/storage/lmgr/lwlock.c

Lines changed: 45 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* Portions Copyright (c) 1994, Regents of the University of California
1616
*
1717
* IDENTIFICATION
18-
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.29 2005/08/20 23:26:24 tgl Exp $
18+
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.30 2005/09/16 00:30:05 tgl Exp $
1919
*
2020
*-------------------------------------------------------------------------
2121
*/
@@ -38,12 +38,32 @@ typedef struct LWLock
3838
/* tail is undefined when head is NULL */
3939
}LWLock;
4040

41+
/*
42+
* All the LWLock structs are allocated as an array in shared memory.
43+
* (LWLockIds are indexes into the array.) We force the array stride to
44+
* be a power of 2, which saves a few cycles in indexing, but more
45+
* importantly also ensures that individual LWLocks don't cross cache line
46+
* boundaries. This reduces cache contention problems, especially on AMD
47+
* Opterons. (Of course, we have to also ensure that the array start
48+
* address is suitably aligned.)
49+
*
50+
* LWLock is between 16 and 32 bytes on all known platforms, so these two
51+
* cases are sufficient.
52+
*/
53+
#defineLWLOCK_PADDED_SIZE(sizeof(LWLock) <= 16 ? 16 : 32)
54+
55+
typedefunionLWLockPadded
56+
{
57+
LWLocklock;
58+
charpad[LWLOCK_PADDED_SIZE];
59+
}LWLockPadded;
60+
4161
/*
4262
* This points to the array of LWLocks in shared memory. Backends inherit
43-
* the pointer by fork from the postmaster. LWLockIds are indexes into
44-
*the array.
63+
* the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
64+
*where we have special measures to pass it down).
4565
*/
46-
NON_EXEC_STATICLWLock*LWLockArray=NULL;
66+
NON_EXEC_STATICLWLockPadded*LWLockArray=NULL;
4767

4868
/* shared counter for dynamic allocation of LWLockIds */
4969
staticint*LWLockCounter;
@@ -135,10 +155,11 @@ LWLockShmemSize(void)
135155
Sizesize;
136156
intnumLocks=NumLWLocks();
137157

138-
/*Allocate the LWLocks plus spaceforshared allocation counter. */
139-
size=mul_size(numLocks,sizeof(LWLock));
158+
/*Spaceforthe LWLock array. */
159+
size=mul_size(numLocks,sizeof(LWLockPadded));
140160

141-
size=add_size(size,2*sizeof(int));
161+
/* Space for shared allocation counter, plus room for alignment. */
162+
size=add_size(size,2*sizeof(int)+LWLOCK_PADDED_SIZE);
142163

143164
returnsize;
144165
}
@@ -152,23 +173,29 @@ CreateLWLocks(void)
152173
{
153174
intnumLocks=NumLWLocks();
154175
SizespaceLocks=LWLockShmemSize();
155-
LWLock*lock;
176+
LWLockPadded*lock;
177+
char*ptr;
156178
intid;
157179

158180
/* Allocate space */
159-
LWLockArray= (LWLock*)ShmemAlloc(spaceLocks);
181+
ptr= (char*)ShmemAlloc(spaceLocks);
182+
183+
/* Ensure desired alignment of LWLock array */
184+
ptr+=LWLOCK_PADDED_SIZE- ((unsigned long)ptr) %LWLOCK_PADDED_SIZE;
185+
186+
LWLockArray= (LWLockPadded*)ptr;
160187

161188
/*
162189
* Initialize all LWLocks to "unlocked" state
163190
*/
164191
for (id=0,lock=LWLockArray;id<numLocks;id++,lock++)
165192
{
166-
SpinLockInit(&lock->mutex);
167-
lock->releaseOK= true;
168-
lock->exclusive=0;
169-
lock->shared=0;
170-
lock->head=NULL;
171-
lock->tail=NULL;
193+
SpinLockInit(&lock->lock.mutex);
194+
lock->lock.releaseOK= true;
195+
lock->lock.exclusive=0;
196+
lock->lock.shared=0;
197+
lock->lock.head=NULL;
198+
lock->lock.tail=NULL;
172199
}
173200

174201
/*
@@ -206,7 +233,7 @@ LWLockAssign(void)
206233
void
207234
LWLockAcquire(LWLockIdlockid,LWLockModemode)
208235
{
209-
volatileLWLock*lock=LWLockArray+lockid;
236+
volatileLWLock*lock=&(LWLockArray[lockid].lock);
210237
PGPROC*proc=MyProc;
211238
boolretry= false;
212239
intextraWaits=0;
@@ -358,7 +385,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
358385
bool
359386
LWLockConditionalAcquire(LWLockIdlockid,LWLockModemode)
360387
{
361-
volatileLWLock*lock=LWLockArray+lockid;
388+
volatileLWLock*lock=&(LWLockArray[lockid].lock);
362389
boolmustwait;
363390

364391
PRINT_LWDEBUG("LWLockConditionalAcquire",lockid,lock);
@@ -423,7 +450,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
423450
void
424451
LWLockRelease(LWLockIdlockid)
425452
{
426-
volatileLWLock*lock=LWLockArray+lockid;
453+
volatileLWLock*lock=&(LWLockArray[lockid].lock);
427454
PGPROC*head;
428455
PGPROC*proc;
429456
inti;

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp