diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 8eea092..1b707d4 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -516,7 +516,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
*/
CHECK_FOR_INTERRUPTS();
- for (i = 0; i < pcxt->nworkers; ++i)
+ for (i = 0; i < pcxt->nworkers_launched; ++i)
{
if (pcxt->worker[i].error_mqh != NULL)
{
@@ -556,7 +556,7 @@ WaitForParallelWorkersToExit(ParallelContext *pcxt)
int i;
/* Wait until the workers actually die. */
- for (i = 0; i < pcxt->nworkers; ++i)
+ for (i = 0; i < pcxt->nworkers_launched; ++i)
{
BgwHandleStatus status;
@@ -606,7 +606,7 @@ DestroyParallelContext(ParallelContext *pcxt)
/* Kill each worker in turn, and forget their error queues. */
if (pcxt->worker != NULL)
{
- for (i = 0; i < pcxt->nworkers; ++i)
+ for (i = 0; i < pcxt->nworkers_launched; ++i)
{
if (pcxt->worker[i].error_mqh != NULL)
{
@@ -704,7 +704,7 @@ HandleParallelMessages(void)
if (pcxt->worker == NULL)
continue;
- for (i = 0; i < pcxt->nworkers; ++i)
+ for (i = 0; i < pcxt->nworkers_launched; ++i)
{
/*
* Read as many messages as we can from each worker, but stop when
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 4658e59..6a3aa17 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -492,7 +492,7 @@ ExecParallelFinish(ParallelExecutorInfo *pei)
WaitForParallelWorkersToFinish(pei->pcxt);
/* Next, accumulate buffer usage. */
- for (i = 0; i < pei->pcxt->nworkers; ++i)
+ for (i = 0; i < pei->pcxt->nworkers_launched; ++i)
InstrAccumParallelQuery(&pei->buffer_usage[i]);
/* Finally, accumulate instrumentation, if any. */
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index 046f156..dcbfda5 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -153,7 +153,6 @@ ExecGather(GatherState *node)
if (gather->num_workers > 0 && IsInParallelMode())
{
ParallelContext *pcxt;
- bool got_any_worker = false;
/* Initialize the workers required to execute Gather node. */
if (!node->pei)
@@ -169,29 +168,26 @@ ExecGather(GatherState *node)
LaunchParallelWorkers(pcxt);
/* Set up tuple queue readers to read the results. */
- if (pcxt->nworkers > 0)
+ if (pcxt->nworkers_launched > 0)
{
node->nreaders = 0;
node->reader =
- palloc(pcxt->nworkers * sizeof(TupleQueueReader *));
+ palloc(pcxt->nworkers_launched * sizeof(TupleQueueReader *));
- for (i = 0; i < pcxt->nworkers; ++i)
+ for (i = 0; i < pcxt->nworkers_launched; ++i)
{
- if (pcxt->worker[i].bgwhandle == NULL)
- continue;
-
shm_mq_set_handle(node->pei->tqueue[i],
pcxt->worker[i].bgwhandle);
node->reader[node->nreaders++] =
CreateTupleQueueReader(node->pei->tqueue[i],
fslot->tts_tupleDescriptor);
- got_any_worker = true;
}
}
-
- /* No workers? Then never mind. */
- if (!got_any_worker)
+ else
+ {
+ /* No workers? Then never mind. */
ExecShutdownGatherWorkers(node);
+ }
}
/* Run plan locally if no workers or not single-copy. */