Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitbbb6e55

Browse files
committed
Make VACUUM avoid waiting for a cleanup lock, where possible.
In a regular VACUUM, it's OK to skip pages for which a cleanup lockisn't immediately available; the next VACUUM will deal with them. Ifwe're scanning the entire relation to advance relfrozenxid, we mightneed to wait, but only if there are tuples on the page that actuallyrequire freezing. These changes should greatly reduce the incidenceof of vacuum processes getting "stuck".Simon Riggs and Robert Haas
1 parentbd23969 commitbbb6e55

File tree

3 files changed

+122
-4
lines changed

3 files changed

+122
-4
lines changed

‎src/backend/access/heap/heapam.c

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3841,6 +3841,44 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
38413841
returnchanged;
38423842
}
38433843

3844+
/*
3845+
* heap_tuple_needs_freeze
3846+
*
3847+
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3848+
* are older than the specified cutoff XID. If so, return TRUE.
3849+
*
3850+
* It doesn't matter whether the tuple is alive or dead, we are checking
3851+
* to see if a tuple needs to be removed or frozen to avoid wraparound.
3852+
*/
3853+
bool
3854+
heap_tuple_needs_freeze(HeapTupleHeadertuple,TransactionIdcutoff_xid,
3855+
Bufferbuf)
3856+
{
3857+
TransactionIdxid;
3858+
3859+
xid=HeapTupleHeaderGetXmin(tuple);
3860+
if (TransactionIdIsNormal(xid)&&
3861+
TransactionIdPrecedes(xid,cutoff_xid))
3862+
return true;
3863+
3864+
if (!(tuple->t_infomask&HEAP_XMAX_IS_MULTI))
3865+
{
3866+
xid=HeapTupleHeaderGetXmax(tuple);
3867+
if (TransactionIdIsNormal(xid)&&
3868+
TransactionIdPrecedes(xid,cutoff_xid))
3869+
return true;
3870+
}
3871+
3872+
if (tuple->t_infomask&HEAP_MOVED)
3873+
{
3874+
xid=HeapTupleHeaderGetXvac(tuple);
3875+
if (TransactionIdIsNormal(xid)&&
3876+
TransactionIdPrecedes(xid,cutoff_xid))
3877+
return true;
3878+
}
3879+
3880+
return false;
3881+
}
38443882

38453883
/* ----------------
38463884
*heap_markpos- mark scan position

‎src/backend/commands/vacuumlazy.c

Lines changed: 82 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ static BufferAccessStrategy vac_strategy;
117117
staticvoidlazy_scan_heap(Relationonerel,LVRelStats*vacrelstats,
118118
Relation*Irel,intnindexes,boolscan_all);
119119
staticvoidlazy_vacuum_heap(Relationonerel,LVRelStats*vacrelstats);
120+
staticboollazy_check_needs_freeze(Bufferbuf);
120121
staticvoidlazy_vacuum_index(Relationindrel,
121122
IndexBulkDeleteResult**stats,
122123
LVRelStats*vacrelstats);
@@ -453,8 +454,6 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
453454

454455
vacuum_delay_point();
455456

456-
vacrelstats->scanned_pages++;
457-
458457
/*
459458
* If we are close to overrunning the available space for dead-tuple
460459
* TIDs, pause and do a cycle of vacuuming before we tackle this page.
@@ -486,7 +485,41 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
486485
RBM_NORMAL,vac_strategy);
487486

488487
/* We need buffer cleanup lock so that we can prune HOT chains. */
489-
LockBufferForCleanup(buf);
488+
if (!ConditionalLockBufferForCleanup(buf))
489+
{
490+
/*
491+
* It's OK to skip vacuuming a page, as long as its not got data
492+
* that needs to be cleaned for wraparound avoidance.
493+
*/
494+
if (!scan_all)
495+
{
496+
ReleaseBuffer(buf);
497+
continue;
498+
}
499+
500+
/*
501+
* If this is a wraparound checking vacuum, then we read the page
502+
* with share lock to see if any xids need to be frozen. If the
503+
* page doesn't need attention we just skip and continue. If it
504+
* does, we wait for cleanup lock.
505+
*
506+
* We could defer the lock request further by remembering the page
507+
* and coming back to it later, of we could even register
508+
* ourselves for multiple buffers and then service whichever one
509+
* is received first. For now, this seems good enough.
510+
*/
511+
LockBuffer(buf,BUFFER_LOCK_SHARE);
512+
if (!lazy_check_needs_freeze(buf))
513+
{
514+
UnlockReleaseBuffer(buf);
515+
continue;
516+
}
517+
LockBuffer(buf,BUFFER_LOCK_UNLOCK);
518+
LockBufferForCleanup(buf);
519+
/* drop through to normal processing */
520+
}
521+
522+
vacrelstats->scanned_pages++;
490523

491524
page=BufferGetPage(buf);
492525

@@ -932,7 +965,8 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
932965
tblk=ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
933966
buf=ReadBufferExtended(onerel,MAIN_FORKNUM,tblk,RBM_NORMAL,
934967
vac_strategy);
935-
LockBufferForCleanup(buf);
968+
if (!ConditionalLockBufferForCleanup(buf))
969+
continue;
936970
tupindex=lazy_vacuum_page(onerel,tblk,buf,tupindex,vacrelstats);
937971

938972
/* Now that we've compacted the page, record its available space */
@@ -1009,6 +1043,50 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
10091043
returntupindex;
10101044
}
10111045

1046+
/*
1047+
*lazy_check_needs_freeze() -- scan page to see if any tuples
1048+
* need to be cleaned to avoid wraparound
1049+
*
1050+
* Returns true if the page needs to be vacuumed using cleanup lock.
1051+
*/
1052+
staticbool
1053+
lazy_check_needs_freeze(Bufferbuf)
1054+
{
1055+
Pagepage;
1056+
OffsetNumberoffnum,
1057+
maxoff;
1058+
HeapTupleHeadertupleheader;
1059+
1060+
page=BufferGetPage(buf);
1061+
1062+
if (PageIsNew(page)||PageIsEmpty(page))
1063+
{
1064+
/* PageIsNew probably shouldn't happen... */
1065+
return false;
1066+
}
1067+
1068+
maxoff=PageGetMaxOffsetNumber(page);
1069+
for (offnum=FirstOffsetNumber;
1070+
offnum <=maxoff;
1071+
offnum=OffsetNumberNext(offnum))
1072+
{
1073+
ItemIditemid;
1074+
1075+
itemid=PageGetItemId(page,offnum);
1076+
1077+
if (!ItemIdIsNormal(itemid))
1078+
continue;
1079+
1080+
tupleheader= (HeapTupleHeader)PageGetItem(page,itemid);
1081+
1082+
if (heap_tuple_needs_freeze(tupleheader,FreezeLimit,buf))
1083+
return true;
1084+
}/* scan along page */
1085+
1086+
return false;
1087+
}
1088+
1089+
10121090
/*
10131091
*lazy_vacuum_index() -- vacuum one index relation.
10141092
*

‎src/include/access/heapam.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,8 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
111111
externvoidheap_inplace_update(Relationrelation,HeapTupletuple);
112112
externboolheap_freeze_tuple(HeapTupleHeadertuple,TransactionIdcutoff_xid,
113113
Bufferbuf);
114+
externboolheap_tuple_needs_freeze(HeapTupleHeadertuple,TransactionIdcutoff_xid,
115+
Bufferbuf);
114116

115117
externOidsimple_heap_insert(Relationrelation,HeapTupletup);
116118
externvoidsimple_heap_delete(Relationrelation,ItemPointertid);

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp