Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit73f6ec3

Browse files
vacuumlazy.c: document vistest and OldestXmin.
Explain the relationship between vacuumlazy.c's vistest and OldestXmincutoffs. These closely related cutoffs are different in subtle butimportant ways. Also document a closely related rule: we must establishrel_pages _after_ OldestXmin to ensure that no XID < OldestXmin can bemissed by lazy_scan_heap().It's easier to explain these issues by initializing everything together,so consolidate initialization of vacrel state. Now almost every vacrelfield is initialized by heap_vacuum_rel(). The only remaining exceptionis the dead_items array, which is still managed by lazy_scan_heap() dueto interactions with how we initialize parallel VACUUM.Also move the process that updates pg_class entries for each index intoheap_vacuum_rel(), and adjust related assertions. All pg_class updatesnow take place after lazy_scan_heap() returns, which seems clearer.Author: Peter Geoghegan <pg@bowt.ie>Reviewed-By: Andres Freund <andres@anarazel.de>Discussion:https://postgr.es/m/20211211045710.ljtuu4gfloh754rs@alap3.anarazel.deDiscussion:https://postgr.es/m/CAH2-WznYsUxVT156rCQ+q=YD4S4=1M37hWvvHLz-H1pwSM8-Ew@mail.gmail.com
1 parent5b68f75 commit73f6ec3

File tree

1 file changed

+92
-81
lines changed

1 file changed

+92
-81
lines changed

‎src/backend/access/heap/vacuumlazy.c

Lines changed: 92 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,10 @@ typedef struct LVRelState
167167
MultiXactIdrelminmxid;
168168
doubleold_live_tuples;/* previous value of pg_class.reltuples */
169169

170-
/* VACUUM operation'scutoff for pruning */
170+
/* VACUUM operation'scutoffs for freezing and pruning */
171171
TransactionIdOldestXmin;
172-
/* VACUUM operation's cutoff for freezing XIDs and MultiXactIds */
172+
GlobalVisState*vistest;
173+
/* VACUUM operation's target cutoffs for freezing XIDs and MultiXactIds */
173174
TransactionIdFreezeLimit;
174175
MultiXactIdMultiXactCutoff;
175176
/* Are FreezeLimit/MultiXactCutoff still valid? */
@@ -185,8 +186,6 @@ typedef struct LVRelState
185186
boolverbose;/* VACUUM VERBOSE? */
186187

187188
/*
188-
* State managed by lazy_scan_heap() follows.
189-
*
190189
* dead_items stores TIDs whose index tuples are deleted by index
191190
* vacuuming. Each TID points to an LP_DEAD line pointer from a heap page
192191
* that has been processed by lazy_scan_prune. Also needed by
@@ -252,7 +251,6 @@ static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
252251
boolsharelock,Buffervmbuffer);
253252
staticvoidlazy_scan_prune(LVRelState*vacrel,Bufferbuf,
254253
BlockNumberblkno,Pagepage,
255-
GlobalVisState*vistest,
256254
LVPagePruneState*prunestate);
257255
staticboollazy_scan_noprune(LVRelState*vacrel,Bufferbuf,
258256
BlockNumberblkno,Pagepage,
@@ -281,7 +279,7 @@ static void dead_items_alloc(LVRelState *vacrel, int nworkers);
281279
staticvoiddead_items_cleanup(LVRelState*vacrel);
282280
staticboolheap_page_is_all_visible(LVRelState*vacrel,Bufferbuf,
283281
TransactionId*visibility_cutoff_xid,bool*all_frozen);
284-
staticvoidupdate_index_statistics(LVRelState*vacrel);
282+
staticvoidupdate_relstats_all_indexes(LVRelState*vacrel);
285283
staticvoidvacuum_error_callback(void*arg);
286284
staticvoidupdate_vacuum_error_info(LVRelState*vacrel,
287285
LVSavedErrInfo*saved_vacrel,
@@ -296,7 +294,8 @@ static void restore_vacuum_error_info(LVRelState *vacrel,
296294
*
297295
*This routine sets things up for and then calls lazy_scan_heap, where
298296
*almost all work actually takes place. Finalizes everything after call
299-
*returns by managing rel truncation and updating pg_class statistics.
297+
*returns by managing relation truncation and updating rel's pg_class
298+
*entry. (Also updates pg_class entries for any indexes that need it.)
300299
*
301300
*At entry, we have already established a transaction and opened
302301
*and locked the relation.
@@ -468,9 +467,51 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
468467
vacrel->relminmxid=rel->rd_rel->relminmxid;
469468
vacrel->old_live_tuples=rel->rd_rel->reltuples;
470469

471-
/* Set cutoffs for entire VACUUM */
470+
/* Initialize page counters explicitly (be tidy) */
471+
vacrel->scanned_pages=0;
472+
vacrel->frozenskipped_pages=0;
473+
vacrel->removed_pages=0;
474+
vacrel->lpdead_item_pages=0;
475+
vacrel->missed_dead_pages=0;
476+
vacrel->nonempty_pages=0;
477+
/* dead_items_alloc allocates vacrel->dead_items later on */
478+
479+
/* Allocate/initialize output statistics state */
480+
vacrel->new_rel_tuples=0;
481+
vacrel->new_live_tuples=0;
482+
vacrel->indstats= (IndexBulkDeleteResult**)
483+
palloc0(vacrel->nindexes*sizeof(IndexBulkDeleteResult*));
484+
485+
/* Initialize remaining counters (be tidy) */
486+
vacrel->num_index_scans=0;
487+
vacrel->tuples_deleted=0;
488+
vacrel->lpdead_items=0;
489+
vacrel->live_tuples=0;
490+
vacrel->recently_dead_tuples=0;
491+
vacrel->missed_dead_tuples=0;
492+
493+
/*
494+
* Determine the extent of the blocks that we'll scan in lazy_scan_heap,
495+
* and finalize cutoffs used for freezing and pruning in lazy_scan_prune.
496+
*
497+
* We expect vistest will always make heap_page_prune remove any deleted
498+
* tuple whose xmax is < OldestXmin. lazy_scan_prune must never become
499+
* confused about whether a tuple should be frozen or removed. (In the
500+
* future we might want to teach lazy_scan_prune to recompute vistest from
501+
* time to time, to increase the number of dead tuples it can prune away.)
502+
*
503+
* We must determine rel_pages _after_ OldestXmin has been established.
504+
* lazy_scan_heap's physical heap scan (scan of pages < rel_pages) is
505+
* thereby guaranteed to not miss any tuples with XIDs < OldestXmin. These
506+
* XIDs must at least be considered for freezing (though not necessarily
507+
* frozen) during its scan.
508+
*/
509+
vacrel->rel_pages=orig_rel_pages=RelationGetNumberOfBlocks(rel);
472510
vacrel->OldestXmin=OldestXmin;
511+
vacrel->vistest=GlobalVisTestFor(rel);
512+
/* FreezeLimit controls XID freezing (always <= OldestXmin) */
473513
vacrel->FreezeLimit=FreezeLimit;
514+
/* MultiXactCutoff controls MXID freezing */
474515
vacrel->MultiXactCutoff=MultiXactCutoff;
475516
/* Track if cutoffs became invalid (possible in !aggressive case only) */
476517
vacrel->freeze_cutoffs_valid= true;
@@ -481,21 +522,21 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
481522
*/
482523
lazy_scan_heap(vacrel,params->nworkers);
483524

484-
/* Done with indexes */
485-
vac_close_indexes(vacrel->nindexes,vacrel->indrels,NoLock);
486-
487525
/*
488-
* Optionally truncate the relation. But remember the relation size used
489-
* by lazy_scan_heap for later first.
526+
* Update pg_class entries for each of rel's indexes where appropriate.
527+
*
528+
* Unlike the later update to rel's pg_class entry, this is not critical.
529+
* Maintains relpages/reltuples statistics used by the planner only.
490530
*/
491-
orig_rel_pages=vacrel->rel_pages;
531+
if (vacrel->do_index_cleanup)
532+
update_relstats_all_indexes(vacrel);
533+
534+
/* Done with rel's indexes */
535+
vac_close_indexes(vacrel->nindexes,vacrel->indrels,NoLock);
536+
537+
/* Optionally truncate rel */
492538
if (should_attempt_truncation(vacrel))
493-
{
494-
update_vacuum_error_info(vacrel,NULL,VACUUM_ERRCB_PHASE_TRUNCATE,
495-
vacrel->nonempty_pages,
496-
InvalidOffsetNumber);
497539
lazy_truncate_heap(vacrel);
498-
}
499540

500541
/* Pop the error context stack */
501542
error_context_stack=errcallback.previous;
@@ -505,7 +546,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
505546
PROGRESS_VACUUM_PHASE_FINAL_CLEANUP);
506547

507548
/*
508-
*Update statistics inpg_class.
549+
*Prepare to update rel'spg_class entry.
509550
*
510551
* In principle new_live_tuples could be -1 indicating that we (still)
511552
* don't know the tuple count. In practice that probably can't happen,
@@ -517,22 +558,19 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
517558
*/
518559
new_rel_pages=vacrel->rel_pages;/* After possible rel truncation */
519560
new_live_tuples=vacrel->new_live_tuples;
520-
521561
visibilitymap_count(rel,&new_rel_allvisible,NULL);
522562
if (new_rel_allvisible>new_rel_pages)
523563
new_rel_allvisible=new_rel_pages;
524564

525565
/*
566+
* Now actually update rel's pg_class entry.
567+
*
526568
* Aggressive VACUUM must reliably advance relfrozenxid (and relminmxid).
527569
* We are able to advance relfrozenxid in a non-aggressive VACUUM too,
528570
* provided we didn't skip any all-visible (not all-frozen) pages using
529571
* the visibility map, and assuming that we didn't fail to get a cleanup
530572
* lock that made it unsafe with respect to FreezeLimit (or perhaps our
531573
* MultiXactCutoff) established for VACUUM operation.
532-
*
533-
* NB: We must use orig_rel_pages, not vacrel->rel_pages, since we want
534-
* the rel_pages used by lazy_scan_heap, which won't match when we
535-
* happened to truncate the relation afterwards.
536574
*/
537575
if (vacrel->scanned_pages+vacrel->frozenskipped_pages<orig_rel_pages||
538576
!vacrel->freeze_cutoffs_valid)
@@ -787,7 +825,7 @@ static void
787825
lazy_scan_heap(LVRelState*vacrel,intnworkers)
788826
{
789827
VacDeadItems*dead_items;
790-
BlockNumbernblocks,
828+
BlockNumbernblocks=vacrel->rel_pages,
791829
blkno,
792830
next_unskippable_block,
793831
next_failsafe_block,
@@ -800,29 +838,6 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
800838
PROGRESS_VACUUM_MAX_DEAD_TUPLES
801839
};
802840
int64initprog_val[3];
803-
GlobalVisState*vistest;
804-
805-
nblocks=RelationGetNumberOfBlocks(vacrel->rel);
806-
vacrel->rel_pages=nblocks;
807-
vacrel->scanned_pages=0;
808-
vacrel->frozenskipped_pages=0;
809-
vacrel->removed_pages=0;
810-
vacrel->lpdead_item_pages=0;
811-
vacrel->missed_dead_pages=0;
812-
vacrel->nonempty_pages=0;
813-
814-
/* Initialize instrumentation counters */
815-
vacrel->num_index_scans=0;
816-
vacrel->tuples_deleted=0;
817-
vacrel->lpdead_items=0;
818-
vacrel->live_tuples=0;
819-
vacrel->recently_dead_tuples=0;
820-
vacrel->missed_dead_tuples=0;
821-
822-
vistest=GlobalVisTestFor(vacrel->rel);
823-
824-
vacrel->indstats= (IndexBulkDeleteResult**)
825-
palloc0(vacrel->nindexes*sizeof(IndexBulkDeleteResult*));
826841

827842
/*
828843
* Do failsafe precheck before calling dead_items_alloc. This ensures
@@ -880,9 +895,9 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
880895
* might leave some dead tuples lying around, but the next vacuum will
881896
* find them. But even when aggressive *is* set, it's still OK if we miss
882897
* a page whose all-frozen marking has just been cleared. Any new XIDs
883-
* just added to that page are necessarilynewer than the GlobalXmin we
884-
*computed, sothey'll have no effect on the value to which we can safely
885-
*setrelfrozenxid. A similar argument applies for MXIDs and relminmxid.
898+
* just added to that page are necessarily>= vacrel->OldestXmin, and so
899+
* they'll have no effect on the value to which we can safely set
900+
* relfrozenxid. A similar argument applies for MXIDs and relminmxid.
886901
*/
887902
next_unskippable_block=0;
888903
if (vacrel->skipwithvm)
@@ -1153,7 +1168,7 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
11531168
* were pruned some time earlier. Also considers freezing XIDs in the
11541169
* tuple headers of remaining items with storage.
11551170
*/
1156-
lazy_scan_prune(vacrel,buf,blkno,page,vistest,&prunestate);
1171+
lazy_scan_prune(vacrel,buf,blkno,page,&prunestate);
11571172

11581173
Assert(!prunestate.all_visible|| !prunestate.has_lpdead_items);
11591174

@@ -1392,15 +1407,11 @@ lazy_scan_heap(LVRelState *vacrel, int nworkers)
13921407
lazy_cleanup_all_indexes(vacrel);
13931408

13941409
/*
1395-
* Free resources managed by dead_items_alloc. This will end parallel
1396-
* mode when needed (it must end before updating index statistics as we
1397-
* can't write in parallel mode).
1410+
* Free resources managed by dead_items_alloc. This ends parallel mode in
1411+
* passing when necessary.
13981412
*/
13991413
dead_items_cleanup(vacrel);
1400-
1401-
/* Update index statistics */
1402-
if (vacrel->nindexes>0&&vacrel->do_index_cleanup)
1403-
update_index_statistics(vacrel);
1414+
Assert(!IsInParallelMode());
14041415
}
14051416

14061417
/*
@@ -1559,7 +1570,6 @@ lazy_scan_prune(LVRelState *vacrel,
15591570
Bufferbuf,
15601571
BlockNumberblkno,
15611572
Pagepage,
1562-
GlobalVisState*vistest,
15631573
LVPagePruneState*prunestate)
15641574
{
15651575
Relationrel=vacrel->rel;
@@ -1598,7 +1608,7 @@ lazy_scan_prune(LVRelState *vacrel,
15981608
* lpdead_items's final value can be thought of as the number of tuples
15991609
* that were deleted from indexes.
16001610
*/
1601-
tuples_deleted=heap_page_prune(rel,buf,vistest,
1611+
tuples_deleted=heap_page_prune(rel,buf,vacrel->vistest,
16021612
InvalidTransactionId,0,&nnewlpdead,
16031613
&vacrel->offnum);
16041614

@@ -2292,8 +2302,6 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
22922302
Assert(vacrel->nindexes>0);
22932303
Assert(vacrel->do_index_vacuuming);
22942304
Assert(vacrel->do_index_cleanup);
2295-
Assert(TransactionIdIsNormal(vacrel->relfrozenxid));
2296-
Assert(MultiXactIdIsValid(vacrel->relminmxid));
22972305

22982306
/* Precheck for XID wraparound emergencies */
22992307
if (lazy_check_wraparound_failsafe(vacrel))
@@ -2604,6 +2612,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
26042612
staticbool
26052613
lazy_check_wraparound_failsafe(LVRelState*vacrel)
26062614
{
2615+
Assert(TransactionIdIsNormal(vacrel->relfrozenxid));
2616+
Assert(MultiXactIdIsValid(vacrel->relminmxid));
2617+
26072618
/* Don't warn more than once per VACUUM */
26082619
if (vacrel->failsafe_active)
26092620
return true;
@@ -2644,6 +2655,10 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
26442655
staticvoid
26452656
lazy_cleanup_all_indexes(LVRelState*vacrel)
26462657
{
2658+
doublereltuples=vacrel->new_rel_tuples;
2659+
boolestimated_count=vacrel->scanned_pages<vacrel->rel_pages;
2660+
2661+
Assert(vacrel->do_index_cleanup);
26472662
Assert(vacrel->nindexes>0);
26482663

26492664
/* Report that we are now cleaning up indexes */
@@ -2652,10 +2667,6 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
26522667

26532668
if (!ParallelVacuumIsActive(vacrel))
26542669
{
2655-
doublereltuples=vacrel->new_rel_tuples;
2656-
boolestimated_count=
2657-
vacrel->scanned_pages<vacrel->rel_pages;
2658-
26592670
for (intidx=0;idx<vacrel->nindexes;idx++)
26602671
{
26612672
Relationindrel=vacrel->indrels[idx];
@@ -2669,9 +2680,9 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
26692680
else
26702681
{
26712682
/* Outsource everything to parallel variant */
2672-
parallel_vacuum_cleanup_all_indexes(vacrel->pvs,vacrel->new_rel_tuples,
2683+
parallel_vacuum_cleanup_all_indexes(vacrel->pvs,reltuples,
26732684
vacrel->num_index_scans,
2674-
(vacrel->scanned_pages<vacrel->rel_pages));
2685+
estimated_count);
26752686
}
26762687
}
26772688

@@ -2797,27 +2808,23 @@ lazy_cleanup_one_index(Relation indrel, IndexBulkDeleteResult *istat,
27972808
* Also don't attempt it if we are doing early pruning/vacuuming, because a
27982809
* scan which cannot find a truncated heap page cannot determine that the
27992810
* snapshot is too old to read that page.
2800-
*
2801-
* This is split out so that we can test whether truncation is going to be
2802-
* called for before we actually do it. If you change the logic here, be
2803-
* careful to depend only on fields that lazy_scan_heap updates on-the-fly.
28042811
*/
28052812
staticbool
28062813
should_attempt_truncation(LVRelState*vacrel)
28072814
{
28082815
BlockNumberpossibly_freeable;
28092816

2810-
if (!vacrel->do_rel_truncate||vacrel->failsafe_active)
2817+
if (!vacrel->do_rel_truncate||vacrel->failsafe_active||
2818+
old_snapshot_threshold >=0)
28112819
return false;
28122820

28132821
possibly_freeable=vacrel->rel_pages-vacrel->nonempty_pages;
28142822
if (possibly_freeable>0&&
28152823
(possibly_freeable >=REL_TRUNCATE_MINIMUM||
2816-
possibly_freeable >=vacrel->rel_pages /REL_TRUNCATE_FRACTION)&&
2817-
old_snapshot_threshold<0)
2824+
possibly_freeable >=vacrel->rel_pages /REL_TRUNCATE_FRACTION))
28182825
return true;
2819-
else
2820-
return false;
2826+
2827+
return false;
28212828
}
28222829

28232830
/*
@@ -2835,6 +2842,10 @@ lazy_truncate_heap(LVRelState *vacrel)
28352842
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
28362843
PROGRESS_VACUUM_PHASE_TRUNCATE);
28372844

2845+
/* Update error traceback information one last time */
2846+
update_vacuum_error_info(vacrel,NULL,VACUUM_ERRCB_PHASE_TRUNCATE,
2847+
vacrel->nonempty_pages,InvalidOffsetNumber);
2848+
28382849
/*
28392850
* Loop until no more truncating can be done.
28402851
*/
@@ -3328,13 +3339,13 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
33283339
* Update index statistics in pg_class if the statistics are accurate.
33293340
*/
33303341
staticvoid
3331-
update_index_statistics(LVRelState*vacrel)
3342+
update_relstats_all_indexes(LVRelState*vacrel)
33323343
{
33333344
Relation*indrels=vacrel->indrels;
33343345
intnindexes=vacrel->nindexes;
33353346
IndexBulkDeleteResult**indstats=vacrel->indstats;
33363347

3337-
Assert(!IsInParallelMode());
3348+
Assert(vacrel->do_index_cleanup);
33383349

33393350
for (intidx=0;idx<nindexes;idx++)
33403351
{

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp