|
65 | 65 | #include"commands/dbcommands.h" |
66 | 66 | #include"commands/progress.h" |
67 | 67 | #include"commands/vacuum.h" |
| 68 | +#include"executor/instrument.h" |
68 | 69 | #include"miscadmin.h" |
69 | 70 | #include"optimizer/paths.h" |
70 | 71 | #include"pgstat.h" |
|
137 | 138 | #definePARALLEL_VACUUM_KEY_SHARED1 |
138 | 139 | #definePARALLEL_VACUUM_KEY_DEAD_TUPLES2 |
139 | 140 | #definePARALLEL_VACUUM_KEY_QUERY_TEXT3 |
| 141 | +#definePARALLEL_VACUUM_KEY_BUFFER_USAGE4 |
140 | 142 |
|
141 | 143 | /* |
142 | 144 | * Macro to check if we are in a parallel vacuum. If true, we are in the |
@@ -270,6 +272,9 @@ typedef struct LVParallelState |
270 | 272 | /* Shared information among parallel vacuum workers */ |
271 | 273 | LVShared*lvshared; |
272 | 274 |
|
| 275 | +/* Points to buffer usage area in DSM */ |
| 276 | +BufferUsage*buffer_usage; |
| 277 | + |
273 | 278 | /* |
274 | 279 | * The number of indexes that support parallel index bulk-deletion and |
275 | 280 | * parallel index cleanup respectively. |
@@ -2137,8 +2142,20 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats, |
2137 | 2142 | parallel_vacuum_index(Irel,stats,lps->lvshared, |
2138 | 2143 | vacrelstats->dead_tuples,nindexes,vacrelstats); |
2139 | 2144 |
|
2140 | | -/* Wait for all vacuum workers to finish */ |
2141 | | -WaitForParallelWorkersToFinish(lps->pcxt); |
| 2145 | +/* |
| 2146 | + * Next, accumulate buffer usage. (This must wait for the workers to |
| 2147 | + * finish, or we might get incomplete data.) |
| 2148 | + */ |
| 2149 | +if (nworkers>0) |
| 2150 | +{ |
| 2151 | +inti; |
| 2152 | + |
| 2153 | +/* Wait for all vacuum workers to finish */ |
| 2154 | +WaitForParallelWorkersToFinish(lps->pcxt); |
| 2155 | + |
| 2156 | +for (i=0;i<lps->pcxt->nworkers_launched;i++) |
| 2157 | +InstrAccumParallelQuery(&lps->buffer_usage[i]); |
| 2158 | +} |
2142 | 2159 |
|
2143 | 2160 | /* |
2144 | 2161 | * Carry the shared balance value to heap scan and disable shared costing |
@@ -3153,6 +3170,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats, |
3153 | 3170 | ParallelContext*pcxt; |
3154 | 3171 | LVShared*shared; |
3155 | 3172 | LVDeadTuples*dead_tuples; |
| 3173 | +BufferUsage*buffer_usage; |
3156 | 3174 | bool*can_parallel_vacuum; |
3157 | 3175 | longmaxtuples; |
3158 | 3176 | char*sharedquery; |
@@ -3236,6 +3254,17 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats, |
3236 | 3254 | shm_toc_estimate_chunk(&pcxt->estimator,est_deadtuples); |
3237 | 3255 | shm_toc_estimate_keys(&pcxt->estimator,1); |
3238 | 3256 |
|
| 3257 | +/* |
| 3258 | + * Estimate space for BufferUsage -- PARALLEL_VACUUM_KEY_BUFFER_USAGE. |
| 3259 | + * |
| 3260 | + * If there are no extensions loaded that care, we could skip this. We |
| 3261 | + * have no way of knowing whether anyone's looking at pgBufferUsage, so do |
| 3262 | + * it unconditionally. |
| 3263 | + */ |
| 3264 | +shm_toc_estimate_chunk(&pcxt->estimator, |
| 3265 | +mul_size(sizeof(BufferUsage),pcxt->nworkers)); |
| 3266 | +shm_toc_estimate_keys(&pcxt->estimator,1); |
| 3267 | + |
3239 | 3268 | /* Finally, estimate PARALLEL_VACUUM_KEY_QUERY_TEXT space */ |
3240 | 3269 | querylen=strlen(debug_query_string); |
3241 | 3270 | shm_toc_estimate_chunk(&pcxt->estimator,querylen+1); |
@@ -3270,6 +3299,12 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats, |
3270 | 3299 | shm_toc_insert(pcxt->toc,PARALLEL_VACUUM_KEY_DEAD_TUPLES,dead_tuples); |
3271 | 3300 | vacrelstats->dead_tuples=dead_tuples; |
3272 | 3301 |
|
| 3302 | +/* Allocate space for each worker's BufferUsage; no need to initialize */ |
| 3303 | +buffer_usage=shm_toc_allocate(pcxt->toc, |
| 3304 | +mul_size(sizeof(BufferUsage),pcxt->nworkers)); |
| 3305 | +shm_toc_insert(pcxt->toc,PARALLEL_VACUUM_KEY_BUFFER_USAGE,buffer_usage); |
| 3306 | +lps->buffer_usage=buffer_usage; |
| 3307 | + |
3273 | 3308 | /* Store query string for workers */ |
3274 | 3309 | sharedquery= (char*)shm_toc_allocate(pcxt->toc,querylen+1); |
3275 | 3310 | memcpy(sharedquery,debug_query_string,querylen+1); |
@@ -3399,6 +3434,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) |
3399 | 3434 | Relation*indrels; |
3400 | 3435 | LVShared*lvshared; |
3401 | 3436 | LVDeadTuples*dead_tuples; |
| 3437 | +BufferUsage*buffer_usage; |
3402 | 3438 | intnindexes; |
3403 | 3439 | char*sharedquery; |
3404 | 3440 | IndexBulkDeleteResult**stats; |
@@ -3468,10 +3504,17 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc) |
3468 | 3504 | errcallback.previous=error_context_stack; |
3469 | 3505 | error_context_stack=&errcallback; |
3470 | 3506 |
|
| 3507 | +/* Prepare to track buffer usage during parallel execution */ |
| 3508 | +InstrStartParallelQuery(); |
| 3509 | + |
3471 | 3510 | /* Process indexes to perform vacuum/cleanup */ |
3472 | 3511 | parallel_vacuum_index(indrels,stats,lvshared,dead_tuples,nindexes, |
3473 | 3512 | &vacrelstats); |
3474 | 3513 |
|
| 3514 | +/* Report buffer usage during parallel execution */ |
| 3515 | +buffer_usage=shm_toc_lookup(toc,PARALLEL_VACUUM_KEY_BUFFER_USAGE, false); |
| 3516 | +InstrEndParallelQuery(&buffer_usage[ParallelWorkerNumber]); |
| 3517 | + |
3475 | 3518 | /* Pop the error context stack */ |
3476 | 3519 | error_context_stack=errcallback.previous; |
3477 | 3520 |
|
|