Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitcf1234a

Browse files
committed
Fix deadlock danger when atomic ops are done under spinlock.
This was a danger only for --disable-spinlocks in combination withatomic operations unsupported by the current platform.While atomics.c was careful to signal that a separate semaphore oughtto be used when spinlock emulation is active, spin.c didn't actuallyimplement that mechanism. That's my (Andres') fault, it seems to havegotten lost during the development of the atomic operations support.Fix that issue and add test for nesting atomic operations inside aspinlock.Author: Andres FreundDiscussion:https://postgr.es/m/20200605023302.g6v3ydozy5txifji@alap3.anarazel.deBackpatch: 9.5-
1 parent3b37a6d commitcf1234a

File tree

2 files changed

+119
-30
lines changed

2 files changed

+119
-30
lines changed

‎src/backend/storage/lmgr/spin.c

Lines changed: 67 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,24 @@
2828

2929

3030
#ifndefHAVE_SPINLOCKS
31+
32+
/*
33+
* No TAS, so spinlocks are implemented as PGSemaphores.
34+
*/
35+
36+
#ifndefHAVE_ATOMICS
37+
#defineNUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
38+
#else
39+
#defineNUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
40+
#endif/* DISABLE_ATOMICS */
41+
3142
PGSemaphore*SpinlockSemaArray;
32-
#endif
43+
44+
#else/* !HAVE_SPINLOCKS */
45+
46+
#defineNUM_EMULATION_SEMAPHORES 0
47+
48+
#endif/* HAVE_SPINLOCKS */
3349

3450
/*
3551
* Report the amount of shared memory needed to store semaphores for spinlock
@@ -38,34 +54,19 @@ PGSemaphore *SpinlockSemaArray;
3854
Size
3955
SpinlockSemaSize(void)
4056
{
41-
returnSpinlockSemas()*sizeof(PGSemaphore);
57+
returnNUM_EMULATION_SEMAPHORES*sizeof(PGSemaphore);
4258
}
4359

44-
#ifdefHAVE_SPINLOCKS
45-
4660
/*
4761
* Report number of semaphores needed to support spinlocks.
4862
*/
4963
int
5064
SpinlockSemas(void)
5165
{
52-
return0;
66+
returnNUM_EMULATION_SEMAPHORES;
5367
}
54-
#else/* !HAVE_SPINLOCKS */
5568

56-
/*
57-
* No TAS, so spinlocks are implemented as PGSemaphores.
58-
*/
59-
60-
61-
/*
62-
* Report number of semaphores needed to support spinlocks.
63-
*/
64-
int
65-
SpinlockSemas(void)
66-
{
67-
returnNUM_SPINLOCK_SEMAPHORES+NUM_ATOMICS_SEMAPHORES;
68-
}
69+
#ifndefHAVE_SPINLOCKS
6970

7071
/*
7172
* Initialize spinlock emulation.
@@ -92,32 +93,68 @@ SpinlockSemaInit(void)
9293
/*
9394
* s_lock.h hardware-spinlock emulation using semaphores
9495
*
95-
* We map all spinlocks onto a set of NUM_SPINLOCK_SEMAPHORES semaphores.
96-
* It's okay to map multiple spinlocks onto one semaphore because no process
97-
* should ever hold more than one at a time. We just need enough semaphores
98-
* so that we aren't adding too much extra contention from that.
96+
* We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
97+
* map multiple spinlocks onto one semaphore because no process should ever
98+
* hold more than one at a time. We just need enough semaphores so that we
99+
* aren't adding too much extra contention from that.
100+
*
101+
* There is one exception to the restriction of only holding one spinlock at a
102+
* time, which is that it's ok if emulated atomic operations are nested inside
103+
* spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
104+
* we make sure "normal" spinlocks and atomics backed by spinlocks use
105+
* distinct semaphores (see the nested argument to s_init_lock_sema).
99106
*
100107
* slock_t is just an int for this implementation; it holds the spinlock
101-
* number from 1..NUM_SPINLOCK_SEMAPHORES. We intentionally ensure that 0
108+
* number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0
102109
* is not a valid value, so that testing with this code can help find
103110
* failures to initialize spinlocks.
104111
*/
105112

113+
staticinlinevoid
114+
s_check_valid(intlockndx)
115+
{
116+
if (unlikely(lockndx <=0||lockndx>NUM_EMULATION_SEMAPHORES))
117+
elog(ERROR,"invalid spinlock number: %d",lockndx);
118+
}
119+
106120
void
107121
s_init_lock_sema(volatileslock_t*lock,boolnested)
108122
{
109123
staticuint32counter=0;
110-
111-
*lock= ((++counter) %NUM_SPINLOCK_SEMAPHORES)+1;
124+
uint32offset;
125+
uint32sema_total;
126+
uint32idx;
127+
128+
if (nested)
129+
{
130+
/*
131+
* To allow nesting atomics inside spinlocked sections, use a
132+
* different spinlock. See comment above.
133+
*/
134+
offset=1+NUM_SPINLOCK_SEMAPHORES;
135+
sema_total=NUM_ATOMICS_SEMAPHORES;
136+
}
137+
else
138+
{
139+
offset=1;
140+
sema_total=NUM_SPINLOCK_SEMAPHORES;
141+
}
142+
143+
idx= (counter++ %sema_total)+offset;
144+
145+
/* double check we did things correctly */
146+
s_check_valid(idx);
147+
148+
*lock=idx;
112149
}
113150

114151
void
115152
s_unlock_sema(volatileslock_t*lock)
116153
{
117154
intlockndx=*lock;
118155

119-
if(lockndx <=0||lockndx>NUM_SPINLOCK_SEMAPHORES)
120-
elog(ERROR,"invalid spinlock number: %d",lockndx);
156+
s_check_valid(lockndx);
157+
121158
PGSemaphoreUnlock(SpinlockSemaArray[lockndx-1]);
122159
}
123160

@@ -134,8 +171,8 @@ tas_sema(volatile slock_t *lock)
134171
{
135172
intlockndx=*lock;
136173

137-
if(lockndx <=0||lockndx>NUM_SPINLOCK_SEMAPHORES)
138-
elog(ERROR,"invalid spinlock number: %d",lockndx);
174+
s_check_valid(lockndx);
175+
139176
/* Note that TAS macros return 0 if *success* */
140177
return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx-1]);
141178
}

‎src/test/regress/regress.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -898,6 +898,56 @@ test_spinlock(void)
898898
#endif
899899
}
900900

901+
/*
902+
* Verify that performing atomic ops inside a spinlock isn't a
903+
* problem. Realistically that's only going to be a problem when both
904+
* --disable-spinlocks and --disable-atomics are used, but it's cheap enough
905+
* to just always test.
906+
*
907+
* The test works by initializing enough atomics that we'd conflict if there
908+
* were an overlap between a spinlock and an atomic by holding a spinlock
909+
* while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics.
910+
*
911+
* NUM_TEST_ATOMICS doesn't really need to be more than
912+
* NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more
913+
* extensively.
914+
*/
915+
staticvoid
916+
test_atomic_spin_nest(void)
917+
{
918+
slock_tlock;
919+
#defineNUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
920+
pg_atomic_uint32atomics32[NUM_TEST_ATOMICS];
921+
pg_atomic_uint64atomics64[NUM_TEST_ATOMICS];
922+
923+
SpinLockInit(&lock);
924+
925+
for (inti=0;i<NUM_TEST_ATOMICS;i++)
926+
{
927+
pg_atomic_init_u32(&atomics32[i],0);
928+
pg_atomic_init_u64(&atomics64[i],0);
929+
}
930+
931+
/* just so it's not all zeroes */
932+
for (inti=0;i<NUM_TEST_ATOMICS;i++)
933+
{
934+
EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics32[i],i),0);
935+
EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&atomics64[i],i),0);
936+
}
937+
938+
/* test whether we can do atomic op with lock held */
939+
SpinLockAcquire(&lock);
940+
for (inti=0;i<NUM_TEST_ATOMICS;i++)
941+
{
942+
EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics32[i],i),i);
943+
EXPECT_EQ_U32(pg_atomic_read_u32(&atomics32[i]),0);
944+
EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&atomics64[i],i),i);
945+
EXPECT_EQ_U64(pg_atomic_read_u64(&atomics64[i]),0);
946+
}
947+
SpinLockRelease(&lock);
948+
}
949+
#undef NUM_TEST_ATOMICS
950+
901951
PG_FUNCTION_INFO_V1(test_atomic_ops);
902952
Datum
903953
test_atomic_ops(PG_FUNCTION_ARGS)
@@ -914,6 +964,8 @@ test_atomic_ops(PG_FUNCTION_ARGS)
914964
*/
915965
test_spinlock();
916966

967+
test_atomic_spin_nest();
968+
917969
PG_RETURN_BOOL(true);
918970
}
919971

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp