File tree Expand file tree Collapse file tree 3 files changed +12
-16
lines changed Expand file tree Collapse file tree 3 files changed +12
-16
lines changed Original file line number Diff line number Diff line change @@ -520,7 +520,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
520520 */
521521 CHECK_FOR_INTERRUPTS ();
522522
523- for (i = 0 ; i < pcxt -> nworkers ; ++ i )
523+ for (i = 0 ; i < pcxt -> nworkers_launched ; ++ i )
524524 {
525525 if (pcxt -> worker [i ].error_mqh != NULL )
526526 {
@@ -560,7 +560,7 @@ WaitForParallelWorkersToExit(ParallelContext *pcxt)
560560 int i ;
561561
562562 /* Wait until the workers actually die. */
563- for (i = 0 ; i < pcxt -> nworkers ; ++ i )
563+ for (i = 0 ; i < pcxt -> nworkers_launched ; ++ i )
564564 {
565565 BgwHandleStatus status ;
566566
@@ -610,7 +610,7 @@ DestroyParallelContext(ParallelContext *pcxt)
610610 /* Kill each worker in turn, and forget their error queues. */
611611 if (pcxt -> worker != NULL )
612612 {
613- for (i = 0 ; i < pcxt -> nworkers ; ++ i )
613+ for (i = 0 ; i < pcxt -> nworkers_launched ; ++ i )
614614 {
615615 if (pcxt -> worker [i ].error_mqh != NULL )
616616 {
@@ -708,7 +708,7 @@ HandleParallelMessages(void)
708708 if (pcxt -> worker == NULL )
709709 continue ;
710710
711- for (i = 0 ; i < pcxt -> nworkers ; ++ i )
711+ for (i = 0 ; i < pcxt -> nworkers_launched ; ++ i )
712712 {
713713 /*
714714 * Read as many messages as we can from each worker, but stop when
Original file line number Diff line number Diff line change @@ -522,7 +522,7 @@ ExecParallelFinish(ParallelExecutorInfo *pei)
522522 WaitForParallelWorkersToFinish (pei -> pcxt );
523523
524524 /* Next, accumulate buffer usage. */
525- for (i = 0 ; i < pei -> pcxt -> nworkers ; ++ i )
525+ for (i = 0 ; i < pei -> pcxt -> nworkers_launched ; ++ i )
526526 InstrAccumParallelQuery (& pei -> buffer_usage [i ]);
527527
528528 /* Finally, accumulate instrumentation, if any. */
Original file line number Diff line number Diff line change @@ -153,7 +153,6 @@ ExecGather(GatherState *node)
153153 if (gather -> num_workers > 0 && IsInParallelMode ())
154154 {
155155 ParallelContext * pcxt ;
156- bool got_any_worker = false;
157156
158157 /* Initialize the workers required to execute Gather node. */
159158 if (!node -> pei )
@@ -169,29 +168,26 @@ ExecGather(GatherState *node)
169168 LaunchParallelWorkers (pcxt );
170169
171170 /* Set up tuple queue readers to read the results. */
172- if (pcxt -> nworkers > 0 )
171+ if (pcxt -> nworkers_launched > 0 )
173172 {
174173 node -> nreaders = 0 ;
175174 node -> reader =
176- palloc (pcxt -> nworkers * sizeof (TupleQueueReader * ));
175+ palloc (pcxt -> nworkers_launched * sizeof (TupleQueueReader * ));
177176
178- for (i = 0 ; i < pcxt -> nworkers ; ++ i )
177+ for (i = 0 ; i < pcxt -> nworkers_launched ; ++ i )
179178 {
180- if (pcxt -> worker [i ].bgwhandle == NULL )
181- continue ;
182-
183179 shm_mq_set_handle (node -> pei -> tqueue [i ],
184180 pcxt -> worker [i ].bgwhandle );
185181 node -> reader [node -> nreaders ++ ] =
186182 CreateTupleQueueReader (node -> pei -> tqueue [i ],
187183 fslot -> tts_tupleDescriptor );
188- got_any_worker = true;
189184 }
190185 }
191-
192- /* No workers? Then never mind. */
193- if (! got_any_worker )
186+ else
187+ {
188+ /* No workers? Then never mind. */
194189 ExecShutdownGatherWorkers (node );
190+ }
195191 }
196192
197193 /* Run plan locally if no workers or not single-copy. */
You can’t perform that action at this time.
0 commit comments