Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit67fc4c9

Browse files
Make parallel nbtree index scans use an LWLock.
Teach parallel nbtree index scans to use an LWLock (not a spinlock) toprotect the scan's shared descriptor state.Preparation for an upcoming patch that will add skip scan optimizationsto nbtree. That patch will create the need to occasionally allocatememory while the scan descriptor is locked, while copying datums thatwere serialized by another backend.Author: Peter Geoghegan <pg@bowt.ie>Reviewed-By: Matthias van de Meent <boekewurm+postgres@gmail.com>Discussion:https://postgr.es/m/CAH2-Wz=PKR6rB7qbx+Vnd7eqeB5VTcrW=iJvAsTsKbdG+kW_UA@mail.gmail.com
1 parent8021c77 commit67fc4c9

File tree

5 files changed

+18
-14
lines changed

5 files changed

+18
-14
lines changed

‎src/backend/access/nbtree/nbtpreprocesskeys.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1565,7 +1565,7 @@ _bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap)
15651565
* Parallel index scans require space in shared memory to store the
15661566
* current array elements (for arrays kept by preprocessing) to schedule
15671567
* the next primitive index scan. The underlying structure is protected
1568-
* usinga spinlock, so defensively limit its size. In practice this can
1568+
* usingan LWLock, so defensively limit its size. In practice this can
15691569
* only affect parallel scans that use an incomplete opfamily.
15701570
*/
15711571
if (scan->parallel_scan&&so->numArrayKeys>INDEX_MAX_KEYS)

‎src/backend/access/nbtree/nbtree.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ typedef struct BTParallelScanDescData
7070
BTPS_Statebtps_pageStatus;/* indicates whether next page is
7171
* available for scan. see above for
7272
* possible states of parallel scan. */
73-
slock_tbtps_mutex;/* protectsabove variables, btps_arrElems */
73+
LWLockbtps_lock;/* protectsshared parallel state */
7474
ConditionVariablebtps_cv;/* used to synchronize parallel scan */
7575

7676
/*
@@ -554,7 +554,8 @@ btinitparallelscan(void *target)
554554
{
555555
BTParallelScanDescbt_target= (BTParallelScanDesc)target;
556556

557-
SpinLockInit(&bt_target->btps_mutex);
557+
LWLockInitialize(&bt_target->btps_lock,
558+
LWTRANCHE_PARALLEL_BTREE_SCAN);
558559
bt_target->btps_nextScanPage=InvalidBlockNumber;
559560
bt_target->btps_lastCurrPage=InvalidBlockNumber;
560561
bt_target->btps_pageStatus=BTPARALLEL_NOT_INITIALIZED;
@@ -576,15 +577,15 @@ btparallelrescan(IndexScanDesc scan)
576577
parallel_scan->ps_offset);
577578

578579
/*
579-
* In theory, we don't need to acquire thespinlock here, because there
580+
* In theory, we don't need to acquire theLWLock here, because there
580581
* shouldn't be any other workers running at this point, but we do so for
581582
* consistency.
582583
*/
583-
SpinLockAcquire(&btscan->btps_mutex);
584+
LWLockAcquire(&btscan->btps_lock,LW_EXCLUSIVE);
584585
btscan->btps_nextScanPage=InvalidBlockNumber;
585586
btscan->btps_lastCurrPage=InvalidBlockNumber;
586587
btscan->btps_pageStatus=BTPARALLEL_NOT_INITIALIZED;
587-
SpinLockRelease(&btscan->btps_mutex);
588+
LWLockRelease(&btscan->btps_lock);
588589
}
589590

590591
/*
@@ -655,7 +656,7 @@ _bt_parallel_seize(IndexScanDesc scan, BlockNumber *next_scan_page,
655656

656657
while (1)
657658
{
658-
SpinLockAcquire(&btscan->btps_mutex);
659+
LWLockAcquire(&btscan->btps_lock,LW_EXCLUSIVE);
659660

660661
if (btscan->btps_pageStatus==BTPARALLEL_DONE)
661662
{
@@ -717,7 +718,7 @@ _bt_parallel_seize(IndexScanDesc scan, BlockNumber *next_scan_page,
717718
*last_curr_page=btscan->btps_lastCurrPage;
718719
exit_loop= true;
719720
}
720-
SpinLockRelease(&btscan->btps_mutex);
721+
LWLockRelease(&btscan->btps_lock);
721722
if (exit_loop|| !status)
722723
break;
723724
ConditionVariableSleep(&btscan->btps_cv,WAIT_EVENT_BTREE_PAGE);
@@ -761,11 +762,11 @@ _bt_parallel_release(IndexScanDesc scan, BlockNumber next_scan_page,
761762
btscan= (BTParallelScanDesc)OffsetToPointer(parallel_scan,
762763
parallel_scan->ps_offset);
763764

764-
SpinLockAcquire(&btscan->btps_mutex);
765+
LWLockAcquire(&btscan->btps_lock,LW_EXCLUSIVE);
765766
btscan->btps_nextScanPage=next_scan_page;
766767
btscan->btps_lastCurrPage=curr_page;
767768
btscan->btps_pageStatus=BTPARALLEL_IDLE;
768-
SpinLockRelease(&btscan->btps_mutex);
769+
LWLockRelease(&btscan->btps_lock);
769770
ConditionVariableSignal(&btscan->btps_cv);
770771
}
771772

@@ -804,14 +805,14 @@ _bt_parallel_done(IndexScanDesc scan)
804805
* Mark the parallel scan as done, unless some other process did so
805806
* already
806807
*/
807-
SpinLockAcquire(&btscan->btps_mutex);
808+
LWLockAcquire(&btscan->btps_lock,LW_EXCLUSIVE);
808809
Assert(btscan->btps_pageStatus!=BTPARALLEL_NEED_PRIMSCAN);
809810
if (btscan->btps_pageStatus!=BTPARALLEL_DONE)
810811
{
811812
btscan->btps_pageStatus=BTPARALLEL_DONE;
812813
status_changed= true;
813814
}
814-
SpinLockRelease(&btscan->btps_mutex);
815+
LWLockRelease(&btscan->btps_lock);
815816

816817
/* wake up all the workers associated with this parallel scan */
817818
if (status_changed)
@@ -838,7 +839,7 @@ _bt_parallel_primscan_schedule(IndexScanDesc scan, BlockNumber curr_page)
838839
btscan= (BTParallelScanDesc)OffsetToPointer(parallel_scan,
839840
parallel_scan->ps_offset);
840841

841-
SpinLockAcquire(&btscan->btps_mutex);
842+
LWLockAcquire(&btscan->btps_lock,LW_EXCLUSIVE);
842843
if (btscan->btps_lastCurrPage==curr_page&&
843844
btscan->btps_pageStatus==BTPARALLEL_IDLE)
844845
{
@@ -854,7 +855,7 @@ _bt_parallel_primscan_schedule(IndexScanDesc scan, BlockNumber curr_page)
854855
btscan->btps_arrElems[i]=array->cur_elem;
855856
}
856857
}
857-
SpinLockRelease(&btscan->btps_mutex);
858+
LWLockRelease(&btscan->btps_lock);
858859
}
859860

860861
/*

‎src/backend/storage/lmgr/lwlock.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,7 @@ static const char *const BuiltinTrancheNames[] = {
153153
[LWTRANCHE_LOCK_MANAGER]="LockManager",
154154
[LWTRANCHE_PREDICATE_LOCK_MANAGER]="PredicateLockManager",
155155
[LWTRANCHE_PARALLEL_HASH_JOIN]="ParallelHashJoin",
156+
[LWTRANCHE_PARALLEL_BTREE_SCAN]="ParallelBtreeScan",
156157
[LWTRANCHE_PARALLEL_QUERY_DSA]="ParallelQueryDSA",
157158
[LWTRANCHE_PER_SESSION_DSA]="PerSessionDSA",
158159
[LWTRANCHE_PER_SESSION_RECORD_TYPE]="PerSessionRecordType",

‎src/backend/utils/activity/wait_event_names.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,7 @@ BufferMapping"Waiting to associate a data block with a buffer in the buffer poo
371371
LockManager"Waiting to read or update information about <quote>heavyweight</quote> locks."
372372
PredicateLockManager"Waiting to access predicate lock information used by serializable transactions."
373373
ParallelHashJoin"Waiting to synchronize workers during Parallel Hash Join plan execution."
374+
ParallelBtreeScan"Waiting to synchronize workers during Parallel B-tree scan plan execution."
374375
ParallelQueryDSA"Waiting for parallel query dynamic shared memory allocation."
375376
PerSessionDSA"Waiting for parallel query dynamic shared memory allocation."
376377
PerSessionRecordType"Waiting to access a parallel query's information about composite types."

‎src/include/storage/lwlock.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,7 @@ typedef enum BuiltinTrancheIds
194194
LWTRANCHE_LOCK_MANAGER,
195195
LWTRANCHE_PREDICATE_LOCK_MANAGER,
196196
LWTRANCHE_PARALLEL_HASH_JOIN,
197+
LWTRANCHE_PARALLEL_BTREE_SCAN,
197198
LWTRANCHE_PARALLEL_QUERY_DSA,
198199
LWTRANCHE_PER_SESSION_DSA,
199200
LWTRANCHE_PER_SESSION_RECORD_TYPE,

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp