Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitbfea925

Browse files
committed
Further marginal hacking on generic atomic ops.
In the generic atomic ops that rely on a loop around a CAS primitive,there's no need to force the initial read of the "old" value to be atomic.In the typically-rare case that we get a torn value, that simply meansthat the first CAS attempt will fail; but it will update "old" to theatomically-read value, so the next attempt has a chance of succeeding.It was already being done that way in pg_atomic_exchange_u64_impl(),but let's duplicate the approach in the rest.(Given the current coding of the pg_atomic_read functions, this changeis a no-op anyway on popular platforms; it only makes a difference wherepg_atomic_read_u64_impl() is implemented as a CAS.)In passing, also remove unnecessary take-a-pointer-and-dereference-itcoding in the pg_atomic_read functions. That seems to have been basedon a misunderstanding of what the C standard requires. What actuallymatters is that the pointer be declared as pointing to volatile, whichit is.I don't believe this will change the assembly code at all on x86platforms (even ignoring the likelihood that these implementationsget overridden by others); but it may help on less-mainstream CPUs.Discussion:https://postgr.es/m/13707.1504718238@sss.pgh.pa.us
1 parentf06588a commitbfea925

File tree

1 file changed

+14
-16
lines changed

1 file changed

+14
-16
lines changed

‎src/include/port/atomics/generic.h

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ typedef pg_atomic_uint32 pg_atomic_flag;
4545
staticinlineuint32
4646
pg_atomic_read_u32_impl(volatilepg_atomic_uint32*ptr)
4747
{
48-
return*(&ptr->value);
48+
returnptr->value;
4949
}
5050
#endif
5151

@@ -170,7 +170,7 @@ static inline uint32
170170
pg_atomic_exchange_u32_impl(volatilepg_atomic_uint32*ptr,uint32xchg_)
171171
{
172172
uint32old;
173-
old=pg_atomic_read_u32_impl(ptr);
173+
old=ptr->value;/* ok if read is not atomic */
174174
while (!pg_atomic_compare_exchange_u32_impl(ptr,&old,xchg_))
175175
/* skip */;
176176
returnold;
@@ -183,7 +183,7 @@ static inline uint32
183183
pg_atomic_fetch_add_u32_impl(volatilepg_atomic_uint32*ptr,int32add_)
184184
{
185185
uint32old;
186-
old=pg_atomic_read_u32_impl(ptr);
186+
old=ptr->value;/* ok if read is not atomic */
187187
while (!pg_atomic_compare_exchange_u32_impl(ptr,&old,old+add_))
188188
/* skip */;
189189
returnold;
@@ -205,7 +205,7 @@ static inline uint32
205205
pg_atomic_fetch_and_u32_impl(volatilepg_atomic_uint32*ptr,uint32and_)
206206
{
207207
uint32old;
208-
old=pg_atomic_read_u32_impl(ptr);
208+
old=ptr->value;/* ok if read is not atomic */
209209
while (!pg_atomic_compare_exchange_u32_impl(ptr,&old,old&and_))
210210
/* skip */;
211211
returnold;
@@ -218,7 +218,7 @@ static inline uint32
218218
pg_atomic_fetch_or_u32_impl(volatilepg_atomic_uint32*ptr,uint32or_)
219219
{
220220
uint32old;
221-
old=pg_atomic_read_u32_impl(ptr);
221+
old=ptr->value;/* ok if read is not atomic */
222222
while (!pg_atomic_compare_exchange_u32_impl(ptr,&old,old |or_))
223223
/* skip */;
224224
returnold;
@@ -249,7 +249,7 @@ static inline uint64
249249
pg_atomic_exchange_u64_impl(volatilepg_atomic_uint64*ptr,uint64xchg_)
250250
{
251251
uint64old;
252-
old=ptr->value;
252+
old=ptr->value;/* ok if read is not atomic */
253253
while (!pg_atomic_compare_exchange_u64_impl(ptr,&old,xchg_))
254254
/* skip */;
255255
returnold;
@@ -299,12 +299,10 @@ static inline uint64
299299
pg_atomic_read_u64_impl(volatilepg_atomic_uint64*ptr)
300300
{
301301
/*
302-
* On this platform aligned 64bit reads are guaranteed to be atomic,
303-
* except if using the fallback implementation, where can't guarantee the
304-
* required alignment.
302+
* On this platform aligned 64-bit reads are guaranteed to be atomic.
305303
*/
306304
AssertPointerAlignment(ptr,8);
307-
return*(&ptr->value);
305+
returnptr->value;
308306
}
309307

310308
#else
@@ -315,10 +313,10 @@ pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
315313
uint64old=0;
316314

317315
/*
318-
* 64bit reads aren'tsafe on all platforms. In the generic
316+
* 64-bit reads aren'tatomic on all platforms. In the generic
319317
* implementation implement them as a compare/exchange with 0. That'll
320-
* fail or succeed, but always return the old value.Possible might store
321-
* a 0, but only if theprev. value also was a 0 - i.e. harmless.
318+
* fail or succeed, but always return the old value.Possibly might store
319+
* a 0, but only if theprevious value also was a 0 - i.e. harmless.
322320
*/
323321
pg_atomic_compare_exchange_u64_impl(ptr,&old,0);
324322

@@ -342,7 +340,7 @@ static inline uint64
342340
pg_atomic_fetch_add_u64_impl(volatilepg_atomic_uint64*ptr,int64add_)
343341
{
344342
uint64old;
345-
old=pg_atomic_read_u64_impl(ptr);
343+
old=ptr->value;/* ok if read is not atomic */
346344
while (!pg_atomic_compare_exchange_u64_impl(ptr,&old,old+add_))
347345
/* skip */;
348346
returnold;
@@ -364,7 +362,7 @@ static inline uint64
364362
pg_atomic_fetch_and_u64_impl(volatilepg_atomic_uint64*ptr,uint64and_)
365363
{
366364
uint64old;
367-
old=pg_atomic_read_u64_impl(ptr);
365+
old=ptr->value;/* ok if read is not atomic */
368366
while (!pg_atomic_compare_exchange_u64_impl(ptr,&old,old&and_))
369367
/* skip */;
370368
returnold;
@@ -377,7 +375,7 @@ static inline uint64
377375
pg_atomic_fetch_or_u64_impl(volatilepg_atomic_uint64*ptr,uint64or_)
378376
{
379377
uint64old;
380-
old=pg_atomic_read_u64_impl(ptr);
378+
old=ptr->value;/* ok if read is not atomic */
381379
while (!pg_atomic_compare_exchange_u64_impl(ptr,&old,old |or_))
382380
/* skip */;
383381
returnold;

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp