Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit7ab9b2f

Browse files
committed
Rearrange lazy_scan_heap to avoid visibility map race conditions.
We must set the visibility map bit before releasing our exclusive lockon the heap page; otherwise, someone might clear the heap page bitbefore we set the visibility map bit, leading to a situation where thevisibility map thinks the page is all-visible but it's really not.This problem has existed since 8.4, but it wasn't critical before wehad index-only scans, since the worst case scenario was that the pagewouldn't get vacuumed until the next scan_all vacuum.Along the way, a couple of minor, related improvements: (1) if wepause the heap scan to do an index vac cycle, release any visibilitymap page we're holding, since really long-running pins are not goodfor a variety of reasons; and (2) warn if we see a page that's markedall-visible in the visibility map but not on the page level, sincethat should never happen any more (it was allowed in previousreleases, but not in 9.2).
1 parent85efd5f commit7ab9b2f

File tree

1 file changed

+46
-39
lines changed

1 file changed

+46
-39
lines changed

‎src/backend/commands/vacuumlazy.c

Lines changed: 46 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,18 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
490490
if ((vacrelstats->max_dead_tuples-vacrelstats->num_dead_tuples)<MaxHeapTuplesPerPage&&
491491
vacrelstats->num_dead_tuples>0)
492492
{
493+
/*
494+
* Before beginning index vacuuming, we release any pin we may hold
495+
* on the visibility map page. This isn't necessary for correctness,
496+
* but we do it anyway to avoid holding the pin across a lengthy,
497+
* unrelated operation.
498+
*/
499+
if (BufferIsValid(vmbuffer))
500+
{
501+
ReleaseBuffer(vmbuffer);
502+
vmbuffer=InvalidBuffer;
503+
}
504+
493505
/* Log cleanup info before we touch indexes */
494506
vacuum_log_cleanup_info(onerel,vacrelstats);
495507

@@ -510,6 +522,16 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
510522
vacrelstats->num_index_scans++;
511523
}
512524

525+
/*
526+
* Pin the visibility map page in case we need to mark the page
527+
* all-visible. In most cases this will be very cheap, because we'll
528+
* already have the correct page pinned anyway. However, it's possible
529+
* that (a) next_not_all_visible_block is covered by a different VM page
530+
* than the current block or (b) we released our pin and did a cycle of
531+
* index vacuuming.
532+
*/
533+
visibilitymap_pin(onerel,blkno,&vmbuffer);
534+
513535
buf=ReadBufferExtended(onerel,MAIN_FORKNUM,blkno,
514536
RBM_NORMAL,vac_strategy);
515537

@@ -600,26 +622,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
600622
empty_pages++;
601623
freespace=PageGetHeapFreeSpace(page);
602624

625+
/* empty pages are always all-visible */
603626
if (!PageIsAllVisible(page))
604627
{
605628
PageSetAllVisible(page);
606629
MarkBufferDirty(buf);
630+
visibilitymap_set(onerel,blkno,InvalidXLogRecPtr,vmbuffer);
607631
}
608632

609-
LockBuffer(buf,BUFFER_LOCK_UNLOCK);
610-
611-
/* Update the visibility map */
612-
if (!all_visible_according_to_vm)
613-
{
614-
visibilitymap_pin(onerel,blkno,&vmbuffer);
615-
LockBuffer(buf,BUFFER_LOCK_SHARE);
616-
if (PageIsAllVisible(page))
617-
visibilitymap_set(onerel,blkno,InvalidXLogRecPtr,
618-
vmbuffer);
619-
LockBuffer(buf,BUFFER_LOCK_UNLOCK);
620-
}
621-
622-
ReleaseBuffer(buf);
633+
UnlockReleaseBuffer(buf);
623634
RecordPageWithFreeSpace(onerel,blkno,freespace);
624635
continue;
625636
}
@@ -834,11 +845,26 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
834845

835846
freespace=PageGetHeapFreeSpace(page);
836847

837-
/*Update the all-visible flag on the page */
838-
if (!PageIsAllVisible(page)&&all_visible)
848+
/*mark page all-visible, if appropriate */
849+
if (all_visible&&!all_visible_according_to_vm)
839850
{
840-
PageSetAllVisible(page);
841-
MarkBufferDirty(buf);
851+
if (!PageIsAllVisible(page))
852+
{
853+
PageSetAllVisible(page);
854+
MarkBufferDirty(buf);
855+
}
856+
visibilitymap_set(onerel,blkno,InvalidXLogRecPtr,vmbuffer);
857+
}
858+
859+
/*
860+
* As of PostgreSQL 9.2, the visibility map bit should never be set if
861+
* the page-level bit is clear.
862+
*/
863+
elseif (all_visible_according_to_vm&& !PageIsAllVisible(page))
864+
{
865+
elog(WARNING,"page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
866+
relname,blkno);
867+
visibilitymap_clear(onerel,blkno,vmbuffer);
842868
}
843869

844870
/*
@@ -859,30 +885,11 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
859885
elog(WARNING,"page containing dead tuples is marked as all-visible in relation \"%s\" page %u",
860886
relname,blkno);
861887
PageClearAllVisible(page);
862-
SetBufferCommitInfoNeedsSave(buf);
863-
864-
/*
865-
* Normally, we would drop the lock on the heap page before
866-
* updating the visibility map, but since this case shouldn't
867-
* happen anyway, don't worry about that.
868-
*/
869-
visibilitymap_pin(onerel,blkno,&vmbuffer);
888+
MarkBufferDirty(buf);
870889
visibilitymap_clear(onerel,blkno,vmbuffer);
871890
}
872891

873-
LockBuffer(buf,BUFFER_LOCK_UNLOCK);
874-
875-
/* Update the visibility map */
876-
if (!all_visible_according_to_vm&&all_visible)
877-
{
878-
visibilitymap_pin(onerel,blkno,&vmbuffer);
879-
LockBuffer(buf,BUFFER_LOCK_SHARE);
880-
if (PageIsAllVisible(page))
881-
visibilitymap_set(onerel,blkno,InvalidXLogRecPtr,vmbuffer);
882-
LockBuffer(buf,BUFFER_LOCK_UNLOCK);
883-
}
884-
885-
ReleaseBuffer(buf);
892+
UnlockReleaseBuffer(buf);
886893

887894
/* Remember the location of the last page with nonremovable tuples */
888895
if (hastup)

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp