Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitb0b0d84

Browse files
committed
Allow a parallel context to relaunch workers.
This may allow some callers to avoid the overhead involved in tearingdown a parallel context and then setting up a new one, which meansreleasing the DSM and then allocating and populating a new one. Isuspect we'll want to revise the Gather node to make use of this newcapability, but even if not it may be useful elsewhere and requiresvery little additional code.
1 parentafdfcd3 commitb0b0d84

File tree

3 files changed

+55
-0
lines changed

3 files changed

+55
-0
lines changed

‎src/backend/access/transam/README.parallel

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,3 +221,8 @@ pattern looks like this:
221221
DestroyParallelContext(pcxt);
222222

223223
ExitParallelMode();
224+
225+
If desired, after WaitForParallelWorkersToFinish() has been called, another
226+
call to LaunchParallelWorkers() can be made using the same parallel context.
227+
Calls to these two functions can be alternated any number of times before
228+
destroying the parallel context.

‎src/backend/access/transam/parallel.c

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -404,6 +404,52 @@ LaunchParallelWorkers(ParallelContext *pcxt)
404404
/* We might be running in a short-lived memory context. */
405405
oldcontext=MemoryContextSwitchTo(TopTransactionContext);
406406

407+
/*
408+
* This function can be called for a parallel context for which it has
409+
* already been called previously, but only if all of the old workers
410+
* have already exited. When this case arises, we need to do some extra
411+
* reinitialization.
412+
*/
413+
if (pcxt->nworkers_launched>0)
414+
{
415+
FixedParallelState*fps;
416+
char*error_queue_space;
417+
418+
/* Clean out old worker handles. */
419+
for (i=0;i<pcxt->nworkers;++i)
420+
{
421+
if (pcxt->worker[i].error_mqh!=NULL)
422+
elog(ERROR,"previously launched worker still alive");
423+
if (pcxt->worker[i].bgwhandle!=NULL)
424+
{
425+
pfree(pcxt->worker[i].bgwhandle);
426+
pcxt->worker[i].bgwhandle=NULL;
427+
}
428+
}
429+
430+
/* Reset a few bits of fixed parallel state to a clean state. */
431+
fps=shm_toc_lookup(pcxt->toc,PARALLEL_KEY_FIXED);
432+
fps->workers_attached=0;
433+
fps->last_xlog_end=0;
434+
435+
/* Recreate error queues. */
436+
error_queue_space=
437+
shm_toc_lookup(pcxt->toc,PARALLEL_KEY_ERROR_QUEUE);
438+
for (i=0;i<pcxt->nworkers;++i)
439+
{
440+
char*start;
441+
shm_mq*mq;
442+
443+
start=error_queue_space+i*PARALLEL_ERROR_QUEUE_SIZE;
444+
mq=shm_mq_create(start,PARALLEL_ERROR_QUEUE_SIZE);
445+
shm_mq_set_receiver(mq,MyProc);
446+
pcxt->worker[i].error_mqh=shm_mq_attach(mq,pcxt->seg,NULL);
447+
}
448+
449+
/* Reset number of workers launched. */
450+
pcxt->nworkers_launched=0;
451+
}
452+
407453
/* Configure a worker. */
408454
snprintf(worker.bgw_name,BGW_MAXLEN,"parallel worker for PID %d",
409455
MyProcPid);
@@ -428,8 +474,11 @@ LaunchParallelWorkers(ParallelContext *pcxt)
428474
if (!any_registrations_failed&&
429475
RegisterDynamicBackgroundWorker(&worker,
430476
&pcxt->worker[i].bgwhandle))
477+
{
431478
shm_mq_set_handle(pcxt->worker[i].error_mqh,
432479
pcxt->worker[i].bgwhandle);
480+
pcxt->nworkers_launched++;
481+
}
433482
else
434483
{
435484
/*

‎src/include/access/parallel.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ typedef struct ParallelContext
3535
dlist_nodenode;
3636
SubTransactionIdsubid;
3737
intnworkers;
38+
intnworkers_launched;
3839
parallel_worker_main_typeentrypoint;
3940
char*library_name;
4041
char*function_name;

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp