|
10 | 10 |
|
11 | 11 | #include"pg_compat.h"
|
12 | 12 |
|
| 13 | +#include"catalog/pg_proc.h" |
| 14 | +#include"foreign/fdwapi.h" |
| 15 | +#include"optimizer/clauses.h" |
13 | 16 | #include"optimizer/pathnode.h"
|
14 | 17 | #include"port.h"
|
15 | 18 | #include"utils.h"
|
| 19 | +#include"utils/lsyscache.h" |
16 | 20 |
|
17 | 21 | #include<math.h>
|
18 | 22 |
|
@@ -111,4 +115,213 @@ make_result(List *tlist,
|
111 | 115 |
|
112 | 116 | returnnode;
|
113 | 117 | }
|
| 118 | + |
| 119 | +/* |
| 120 | + * If this relation could possibly be scanned from within a worker, then set |
| 121 | + * its consider_parallel flag. |
| 122 | + */ |
| 123 | +void |
| 124 | +set_rel_consider_parallel(PlannerInfo*root,RelOptInfo*rel, |
| 125 | +RangeTblEntry*rte) |
| 126 | +{ |
| 127 | +/* |
| 128 | + * The flag has previously been initialized to false, so we can just |
| 129 | + * return if it becomes clear that we can't safely set it. |
| 130 | + */ |
| 131 | +Assert(!rel->consider_parallel); |
| 132 | + |
| 133 | +/* Don't call this if parallelism is disallowed for the entire query. */ |
| 134 | +Assert(root->glob->parallelModeOK); |
| 135 | + |
| 136 | +/* This should only be called for baserels and appendrel children. */ |
| 137 | +Assert(rel->reloptkind==RELOPT_BASEREL|| |
| 138 | +rel->reloptkind==RELOPT_OTHER_MEMBER_REL); |
| 139 | + |
| 140 | +/* Assorted checks based on rtekind. */ |
| 141 | +switch (rte->rtekind) |
| 142 | +{ |
| 143 | +caseRTE_RELATION: |
| 144 | + |
| 145 | +/* |
| 146 | + * Currently, parallel workers can't access the leader's temporary |
| 147 | + * tables. We could possibly relax this if the wrote all of its |
| 148 | + * local buffers at the start of the query and made no changes |
| 149 | + * thereafter (maybe we could allow hint bit changes), and if we |
| 150 | + * taught the workers to read them. Writing a large number of |
| 151 | + * temporary buffers could be expensive, though, and we don't have |
| 152 | + * the rest of the necessary infrastructure right now anyway. So |
| 153 | + * for now, bail out if we see a temporary table. |
| 154 | + */ |
| 155 | +if (get_rel_persistence(rte->relid)==RELPERSISTENCE_TEMP) |
| 156 | +return; |
| 157 | + |
| 158 | +/* |
| 159 | + * Table sampling can be pushed down to workers if the sample |
| 160 | + * function and its arguments are safe. |
| 161 | + */ |
| 162 | +if (rte->tablesample!=NULL) |
| 163 | +{ |
| 164 | +Oidproparallel=func_parallel(rte->tablesample->tsmhandler); |
| 165 | + |
| 166 | +if (proparallel!=PROPARALLEL_SAFE) |
| 167 | +return; |
| 168 | +if (has_parallel_hazard((Node*)rte->tablesample->args, |
| 169 | +false)) |
| 170 | +return; |
| 171 | +} |
| 172 | + |
| 173 | +/* |
| 174 | + * Ask FDWs whether they can support performing a ForeignScan |
| 175 | + * within a worker. Most often, the answer will be no. For |
| 176 | + * example, if the nature of the FDW is such that it opens a TCP |
| 177 | + * connection with a remote server, each parallel worker would end |
| 178 | + * up with a separate connection, and these connections might not |
| 179 | + * be appropriately coordinated between workers and the leader. |
| 180 | + */ |
| 181 | +if (rte->relkind==RELKIND_FOREIGN_TABLE) |
| 182 | +{ |
| 183 | +Assert(rel->fdwroutine); |
| 184 | +if (!rel->fdwroutine->IsForeignScanParallelSafe) |
| 185 | +return; |
| 186 | +if (!rel->fdwroutine->IsForeignScanParallelSafe(root,rel,rte)) |
| 187 | +return; |
| 188 | +} |
| 189 | + |
| 190 | +/* |
| 191 | + * There are additional considerations for appendrels, which we'll |
| 192 | + * deal with in set_append_rel_size and set_append_rel_pathlist. |
| 193 | + * For now, just set consider_parallel based on the rel's own |
| 194 | + * quals and targetlist. |
| 195 | + */ |
| 196 | +break; |
| 197 | + |
| 198 | +caseRTE_SUBQUERY: |
| 199 | + |
| 200 | +/* |
| 201 | + * There's no intrinsic problem with scanning a subquery-in-FROM |
| 202 | + * (as distinct from a SubPlan or InitPlan) in a parallel worker. |
| 203 | + * If the subquery doesn't happen to have any parallel-safe paths, |
| 204 | + * then flagging it as consider_parallel won't change anything, |
| 205 | + * but that's true for plain tables, too. We must set |
| 206 | + * consider_parallel based on the rel's own quals and targetlist, |
| 207 | + * so that if a subquery path is parallel-safe but the quals and |
| 208 | + * projection we're sticking onto it are not, we correctly mark |
| 209 | + * the SubqueryScanPath as not parallel-safe. (Note that |
| 210 | + * set_subquery_pathlist() might push some of these quals down |
| 211 | + * into the subquery itself, but that doesn't change anything.) |
| 212 | + */ |
| 213 | +break; |
| 214 | + |
| 215 | +caseRTE_JOIN: |
| 216 | +/* Shouldn't happen; we're only considering baserels here. */ |
| 217 | +Assert(false); |
| 218 | +return; |
| 219 | + |
| 220 | +caseRTE_FUNCTION: |
| 221 | +/* Check for parallel-restricted functions. */ |
| 222 | +if (has_parallel_hazard((Node*)rte->functions, false)) |
| 223 | +return; |
| 224 | +break; |
| 225 | + |
| 226 | +caseRTE_VALUES: |
| 227 | +/* Check for parallel-restricted functions. */ |
| 228 | +if (has_parallel_hazard((Node*)rte->values_lists, false)) |
| 229 | +return; |
| 230 | +break; |
| 231 | + |
| 232 | +caseRTE_CTE: |
| 233 | + |
| 234 | +/* |
| 235 | + * CTE tuplestores aren't shared among parallel workers, so we |
| 236 | + * force all CTE scans to happen in the leader. Also, populating |
| 237 | + * the CTE would require executing a subplan that's not available |
| 238 | + * in the worker, might be parallel-restricted, and must get |
| 239 | + * executed only once. |
| 240 | + */ |
| 241 | +return; |
| 242 | +} |
| 243 | + |
| 244 | +/* |
| 245 | + * If there's anything in baserestrictinfo that's parallel-restricted, we |
| 246 | + * give up on parallelizing access to this relation. We could consider |
| 247 | + * instead postponing application of the restricted quals until we're |
| 248 | + * above all the parallelism in the plan tree, but it's not clear that |
| 249 | + * that would be a win in very many cases, and it might be tricky to make |
| 250 | + * outer join clauses work correctly. It would likely break equivalence |
| 251 | + * classes, too. |
| 252 | + */ |
| 253 | +if (has_parallel_hazard((Node*)rel->baserestrictinfo, false)) |
| 254 | +return; |
| 255 | + |
| 256 | +/* |
| 257 | + * Likewise, if the relation's outputs are not parallel-safe, give up. |
| 258 | + * (Usually, they're just Vars, but sometimes they're not.) |
| 259 | + */ |
| 260 | +if (has_parallel_hazard((Node*)rel->reltarget->exprs, false)) |
| 261 | +return; |
| 262 | + |
| 263 | +/* We have a winner. */ |
| 264 | +rel->consider_parallel= true; |
| 265 | +} |
| 266 | + |
| 267 | +/* |
| 268 | + * create_plain_partial_paths |
| 269 | + * Build partial access paths for parallel scan of a plain relation |
| 270 | + */ |
| 271 | +void |
| 272 | +create_plain_partial_paths(PlannerInfo*root,RelOptInfo*rel) |
| 273 | +{ |
| 274 | +intparallel_workers; |
| 275 | + |
| 276 | +/* |
| 277 | + * If the user has set the parallel_workers reloption, use that; otherwise |
| 278 | + * select a default number of workers. |
| 279 | + */ |
| 280 | +if (rel->rel_parallel_workers!=-1) |
| 281 | +parallel_workers=rel->rel_parallel_workers; |
| 282 | +else |
| 283 | +{ |
| 284 | +intparallel_threshold; |
| 285 | + |
| 286 | +/* |
| 287 | + * If this relation is too small to be worth a parallel scan, just |
| 288 | + * return without doing anything ... unless it's an inheritance child. |
| 289 | + * In that case, we want to generate a parallel path here anyway. It |
| 290 | + * might not be worthwhile just for this relation, but when combined |
| 291 | + * with all of its inheritance siblings it may well pay off. |
| 292 | + */ |
| 293 | +if (rel->pages< (BlockNumber)min_parallel_relation_size&& |
| 294 | +rel->reloptkind==RELOPT_BASEREL) |
| 295 | +return; |
| 296 | + |
| 297 | +/* |
| 298 | + * Select the number of workers based on the log of the size of the |
| 299 | + * relation. This probably needs to be a good deal more |
| 300 | + * sophisticated, but we need something here for now. Note that the |
| 301 | + * upper limit of the min_parallel_relation_size GUC is chosen to |
| 302 | + * prevent overflow here. |
| 303 | + */ |
| 304 | +parallel_workers=1; |
| 305 | +parallel_threshold=Max(min_parallel_relation_size,1); |
| 306 | +while (rel->pages >= (BlockNumber) (parallel_threshold*3)) |
| 307 | +{ |
| 308 | +parallel_workers++; |
| 309 | +parallel_threshold *=3; |
| 310 | +if (parallel_threshold>INT_MAX /3) |
| 311 | +break;/* avoid overflow */ |
| 312 | +} |
| 313 | +} |
| 314 | + |
| 315 | +/* |
| 316 | + * In no case use more than max_parallel_workers_per_gather workers. |
| 317 | + */ |
| 318 | +parallel_workers=Min(parallel_workers,max_parallel_workers_per_gather); |
| 319 | + |
| 320 | +/* If any limit was set to zero, the user doesn't want a parallel scan. */ |
| 321 | +if (parallel_workers <=0) |
| 322 | +return; |
| 323 | + |
| 324 | +/* Add an unordered partial path based on a parallel sequential scan. */ |
| 325 | +add_partial_path(rel,create_seqscan_path(root,rel,NULL,parallel_workers)); |
| 326 | +} |
114 | 327 | #endif
|