Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commite5253fd

Browse files
committed
Add parallel_leader_participation GUC.
Sometimes, for testing, it's useful to have the leader do nothing butread tuples from workers; and it's possible that could work out bettereven in production.Thomas Munro, reviewed by Amit Kapila and by me. A few final tweaksby me.Discussion:http://postgr.es/m/CAEepm=2U++Lp3bNTv2Bv_kkr5NE2pOyHhxU=G0YTa4ZhSYhHiw@mail.gmail.com
1 parent7518049 commite5253fd

File tree

10 files changed

+205
-9
lines changed

10 files changed

+205
-9
lines changed

‎doc/src/sgml/config.sgml

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4265,6 +4265,32 @@ SELECT * FROM parent WHERE key = 2400;
42654265
</listitem>
42664266
</varlistentry>
42674267

4268+
<varlistentry id="guc-parallel-leader-participation" xreflabel="parallel_leader_participation">
4269+
<term>
4270+
<varname>parallel_leader_participation</varname> (<type>boolean</type>)
4271+
<indexterm>
4272+
<primary>
4273+
<varname>parallel_leader_participation</varname> configuration
4274+
parameter
4275+
</primary>
4276+
</indexterm>
4277+
</term>
4278+
<listitem>
4279+
<para>
4280+
Allows the leader process to execute the query plan under
4281+
<literal>Gather</literal> and <literal>Gather Merge</literal> nodes
4282+
instead of waiting for worker processes. The default is
4283+
<literal>on</literal>. Setting this value to <literal>off</literal>
4284+
reduces the likelihood that workers will become blocked because the
4285+
leader is not reading tuples fast enough, but requires the leader
4286+
process to wait for worker processes to start up before the first
4287+
tuples can be produced. The degree to which the leader can help or
4288+
hinder performance depends on the plan type, number of workers and
4289+
query duration.
4290+
</para>
4291+
</listitem>
4292+
</varlistentry>
4293+
42684294
<varlistentry id="guc-force-parallel-mode" xreflabel="force_parallel_mode">
42694295
<term><varname>force_parallel_mode</varname> (<type>enum</type>)
42704296
<indexterm>

‎src/backend/executor/nodeGather.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
#include"executor/nodeSubplan.h"
3939
#include"executor/tqueue.h"
4040
#include"miscadmin.h"
41+
#include"optimizer/planmain.h"
4142
#include"pgstat.h"
4243
#include"utils/memutils.h"
4344
#include"utils/rel.h"
@@ -73,7 +74,8 @@ ExecInitGather(Gather *node, EState *estate, int eflags)
7374
gatherstate->ps.ExecProcNode=ExecGather;
7475

7576
gatherstate->initialized= false;
76-
gatherstate->need_to_scan_locally= !node->single_copy;
77+
gatherstate->need_to_scan_locally=
78+
!node->single_copy&&parallel_leader_participation;
7779
gatherstate->tuples_needed=-1;
7880

7981
/*
@@ -193,9 +195,9 @@ ExecGather(PlanState *pstate)
193195
node->nextreader=0;
194196
}
195197

196-
/* Run plan locally if no workers or not single-copy. */
198+
/* Run plan locally if no workers orenabled andnot single-copy. */
197199
node->need_to_scan_locally= (node->nreaders==0)
198-
|| !gather->single_copy;
200+
||(!gather->single_copy&&parallel_leader_participation);
199201
node->initialized= true;
200202
}
201203

‎src/backend/executor/nodeGatherMerge.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include"executor/tqueue.h"
2424
#include"lib/binaryheap.h"
2525
#include"miscadmin.h"
26+
#include"optimizer/planmain.h"
2627
#include"utils/memutils.h"
2728
#include"utils/rel.h"
2829

@@ -233,8 +234,9 @@ ExecGatherMerge(PlanState *pstate)
233234
}
234235
}
235236

236-
/* always allow leader to participate */
237-
node->need_to_scan_locally= true;
237+
/* allow leader to participate if enabled or no choice */
238+
if (parallel_leader_participation||node->nreaders==0)
239+
node->need_to_scan_locally= true;
238240
node->initialized= true;
239241
}
240242

‎src/backend/optimizer/path/costsize.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5137,7 +5137,6 @@ static double
51375137
get_parallel_divisor(Path*path)
51385138
{
51395139
doubleparallel_divisor=path->parallel_workers;
5140-
doubleleader_contribution;
51415140

51425141
/*
51435142
* Early experience with parallel query suggests that when there is only
@@ -5150,9 +5149,14 @@ get_parallel_divisor(Path *path)
51505149
* its time servicing each worker, and the remainder executing the
51515150
* parallel plan.
51525151
*/
5153-
leader_contribution=1.0- (0.3*path->parallel_workers);
5154-
if (leader_contribution>0)
5155-
parallel_divisor+=leader_contribution;
5152+
if (parallel_leader_participation)
5153+
{
5154+
doubleleader_contribution;
5155+
5156+
leader_contribution=1.0- (0.3*path->parallel_workers);
5157+
if (leader_contribution>0)
5158+
parallel_divisor+=leader_contribution;
5159+
}
51565160

51575161
returnparallel_divisor;
51585162
}

‎src/backend/optimizer/plan/planner.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
/* GUC parameters */
6262
doublecursor_tuple_fraction=DEFAULT_CURSOR_TUPLE_FRACTION;
6363
intforce_parallel_mode=FORCE_PARALLEL_OFF;
64+
boolparallel_leader_participation= true;
6465

6566
/* Hook for plugins to get control in planner() */
6667
planner_hook_typeplanner_hook=NULL;

‎src/backend/utils/misc/guc.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1676,6 +1676,16 @@ static struct config_bool ConfigureNamesBool[] =
16761676
NULL,NULL,NULL
16771677
},
16781678

1679+
{
1680+
{"parallel_leader_participation",PGC_USERSET,RESOURCES_ASYNCHRONOUS,
1681+
gettext_noop("Controls whether Gather and Gather Merge also run subplans."),
1682+
gettext_noop("Should gather nodes also run subplans, or just gather tuples?")
1683+
},
1684+
&parallel_leader_participation,
1685+
true,
1686+
NULL,NULL,NULL
1687+
},
1688+
16791689
/* End-of-list marker */
16801690
{
16811691
{NULL,0,0,NULL,NULL},NULL, false,NULL,NULL,NULL

‎src/backend/utils/misc/postgresql.conf.sample

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,7 @@
163163
#effective_io_concurrency = 1# 1-1000; 0 disables prefetching
164164
#max_worker_processes = 8# (change requires restart)
165165
#max_parallel_workers_per_gather = 2# taken from max_parallel_workers
166+
#parallel_leader_particulation = on
166167
#max_parallel_workers = 8# maximum number of max_worker_processes that
167168
# can be used in parallel queries
168169
#old_snapshot_threshold = -1# 1min-60d; -1 disables; 0 is immediate

‎src/include/optimizer/planmain.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ typedef enum
2929
#defineDEFAULT_CURSOR_TUPLE_FRACTION 0.1
3030
externdoublecursor_tuple_fraction;
3131
externintforce_parallel_mode;
32+
externboolparallel_leader_participation;
3233

3334
/* query_planner callback to compute query_pathkeys */
3435
typedefvoid (*query_pathkeys_callback) (PlannerInfo*root,void*extra);

‎src/test/regress/expected/select_parallel.out

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,49 @@ select count(*) from a_star;
3434
50
3535
(1 row)
3636

37+
-- test with leader participation disabled
38+
set parallel_leader_participation = off;
39+
explain (costs off)
40+
select count(*) from tenk1 where stringu1 = 'GRAAAA';
41+
QUERY PLAN
42+
---------------------------------------------------------
43+
Finalize Aggregate
44+
-> Gather
45+
Workers Planned: 4
46+
-> Partial Aggregate
47+
-> Parallel Seq Scan on tenk1
48+
Filter: (stringu1 = 'GRAAAA'::name)
49+
(6 rows)
50+
51+
select count(*) from tenk1 where stringu1 = 'GRAAAA';
52+
count
53+
-------
54+
15
55+
(1 row)
56+
57+
-- test with leader participation disabled, but no workers available (so
58+
-- the leader will have to run the plan despite the setting)
59+
set max_parallel_workers = 0;
60+
explain (costs off)
61+
select count(*) from tenk1 where stringu1 = 'GRAAAA';
62+
QUERY PLAN
63+
---------------------------------------------------------
64+
Finalize Aggregate
65+
-> Gather
66+
Workers Planned: 4
67+
-> Partial Aggregate
68+
-> Parallel Seq Scan on tenk1
69+
Filter: (stringu1 = 'GRAAAA'::name)
70+
(6 rows)
71+
72+
select count(*) from tenk1 where stringu1 = 'GRAAAA';
73+
count
74+
-------
75+
15
76+
(1 row)
77+
78+
reset max_parallel_workers;
79+
reset parallel_leader_participation;
3780
-- test that parallel_restricted function doesn't run in worker
3881
alter table tenk1 set (parallel_workers = 4);
3982
explain (verbose, costs off)
@@ -400,6 +443,49 @@ explain (costs off, verbose)
400443
(11 rows)
401444

402445
drop function simple_func(integer);
446+
-- test gather merge with parallel leader participation disabled
447+
set parallel_leader_participation = off;
448+
explain (costs off)
449+
select count(*) from tenk1 group by twenty;
450+
QUERY PLAN
451+
----------------------------------------------------
452+
Finalize GroupAggregate
453+
Group Key: twenty
454+
-> Gather Merge
455+
Workers Planned: 4
456+
-> Partial GroupAggregate
457+
Group Key: twenty
458+
-> Sort
459+
Sort Key: twenty
460+
-> Parallel Seq Scan on tenk1
461+
(9 rows)
462+
463+
select count(*) from tenk1 group by twenty;
464+
count
465+
-------
466+
500
467+
500
468+
500
469+
500
470+
500
471+
500
472+
500
473+
500
474+
500
475+
500
476+
500
477+
500
478+
500
479+
500
480+
500
481+
500
482+
500
483+
500
484+
500
485+
500
486+
(20 rows)
487+
488+
reset parallel_leader_participation;
403489
--test rescan behavior of gather merge
404490
set enable_material = false;
405491
explain (costs off)
@@ -508,6 +594,33 @@ select string4 from tenk1 order by string4 limit 5;
508594
AAAAxx
509595
(5 rows)
510596

597+
-- gather merge test with 0 workers, with parallel leader
598+
-- participation disabled (the leader will have to run the plan
599+
-- despite the setting)
600+
set parallel_leader_participation = off;
601+
explain (costs off)
602+
select string4 from tenk1 order by string4 limit 5;
603+
QUERY PLAN
604+
----------------------------------------------
605+
Limit
606+
-> Gather Merge
607+
Workers Planned: 4
608+
-> Sort
609+
Sort Key: string4
610+
-> Parallel Seq Scan on tenk1
611+
(6 rows)
612+
613+
select string4 from tenk1 order by string4 limit 5;
614+
string4
615+
---------
616+
AAAAxx
617+
AAAAxx
618+
AAAAxx
619+
AAAAxx
620+
AAAAxx
621+
(5 rows)
622+
623+
reset parallel_leader_participation;
511624
reset max_parallel_workers;
512625
SAVEPOINT settings;
513626
SET LOCAL force_parallel_mode = 1;

‎src/test/regress/sql/select_parallel.sql

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,22 @@ explain (costs off)
1919
selectcount(*)from a_star;
2020
selectcount(*)from a_star;
2121

22+
-- test with leader participation disabled
23+
set parallel_leader_participation= off;
24+
explain (costs off)
25+
selectcount(*)from tenk1where stringu1='GRAAAA';
26+
selectcount(*)from tenk1where stringu1='GRAAAA';
27+
28+
-- test with leader participation disabled, but no workers available (so
29+
-- the leader will have to run the plan despite the setting)
30+
set max_parallel_workers=0;
31+
explain (costs off)
32+
selectcount(*)from tenk1where stringu1='GRAAAA';
33+
selectcount(*)from tenk1where stringu1='GRAAAA';
34+
35+
reset max_parallel_workers;
36+
reset parallel_leader_participation;
37+
2238
-- test that parallel_restricted function doesn't run in worker
2339
altertable tenk1set (parallel_workers=4);
2440
explain (verbose, costs off)
@@ -157,6 +173,16 @@ explain (costs off, verbose)
157173

158174
dropfunction simple_func(integer);
159175

176+
-- test gather merge with parallel leader participation disabled
177+
set parallel_leader_participation= off;
178+
179+
explain (costs off)
180+
selectcount(*)from tenk1group by twenty;
181+
182+
selectcount(*)from tenk1group by twenty;
183+
184+
reset parallel_leader_participation;
185+
160186
--test rescan behavior of gather merge
161187
set enable_material= false;
162188

@@ -192,6 +218,16 @@ set max_parallel_workers = 0;
192218
explain (costs off)
193219
select string4from tenk1order by string4limit5;
194220
select string4from tenk1order by string4limit5;
221+
222+
-- gather merge test with 0 workers, with parallel leader
223+
-- participation disabled (the leader will have to run the plan
224+
-- despite the setting)
225+
set parallel_leader_participation= off;
226+
explain (costs off)
227+
select string4from tenk1order by string4limit5;
228+
select string4from tenk1order by string4limit5;
229+
230+
reset parallel_leader_participation;
195231
reset max_parallel_workers;
196232

197233
SAVEPOINT settings;

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp