|
16 | 16 | * perform a pass of index cleanup and page compaction, then resume the heap
|
17 | 17 | * scan with an empty TID array.
|
18 | 18 | *
|
| 19 | + * As a special exception if we're processing a table with no indexes we can |
| 20 | + * vacuum each page as we go so we don't need to allocate more space than |
| 21 | + * enough to hold as many heap tuples fit on one page. |
| 22 | + * |
19 | 23 | * We can limit the storage for page free space to MaxFSMPages entries,
|
20 | 24 | * since that's the most the free space map will be willing to remember
|
21 | 25 | * anyway.If the relation has fewer than that many pages with free space,
|
|
31 | 35 | *
|
32 | 36 | *
|
33 | 37 | * IDENTIFICATION
|
34 |
| - * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.76 2006/07/31 20:09:00 tgl Exp $ |
| 38 | + * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.77 2006/09/04 21:40:23 momjian Exp $ |
35 | 39 | *
|
36 | 40 | *-------------------------------------------------------------------------
|
37 | 41 | */
|
@@ -106,7 +110,7 @@ static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
|
106 | 110 | TransactionIdOldestXmin);
|
107 | 111 | staticBlockNumbercount_nondeletable_pages(Relationonerel,
|
108 | 112 | LVRelStats*vacrelstats,TransactionIdOldestXmin);
|
109 |
| -staticvoidlazy_space_alloc(LVRelStats*vacrelstats,BlockNumberrelblocks); |
| 113 | +staticvoidlazy_space_alloc(LVRelStats*vacrelstats,BlockNumberrelblocks,unsignednindexes); |
110 | 114 | staticvoidlazy_record_dead_tuple(LVRelStats*vacrelstats,
|
111 | 115 | ItemPointeritemptr);
|
112 | 116 | staticvoidlazy_record_free_space(LVRelStats*vacrelstats,
|
@@ -206,7 +210,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
|
206 | 210 | *This routine sets commit status bits, builds lists of dead tuples
|
207 | 211 | *and pages with free space, and calculates statistics on the number
|
208 | 212 | *of live tuples in the heap. When done, or when we run low on space
|
209 |
| - *for dead-tuple TIDs, invoke vacuuming of indexes and heap. |
| 213 | + *for dead-tuple TIDs, or after every page if the table has no indexes |
| 214 | + *invoke vacuuming of indexes and heap. |
210 | 215 | *
|
211 | 216 | *It also updates the minimum Xid found anywhere on the table in
|
212 | 217 | *vacrelstats->minxid, for later storing it in pg_class.relminxid.
|
@@ -247,7 +252,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
247 | 252 | vacrelstats->rel_pages=nblocks;
|
248 | 253 | vacrelstats->nonempty_pages=0;
|
249 | 254 |
|
250 |
| -lazy_space_alloc(vacrelstats,nblocks); |
| 255 | +lazy_space_alloc(vacrelstats,nblocks,nindexes); |
251 | 256 |
|
252 | 257 | for (blkno=0;blkno<nblocks;blkno++)
|
253 | 258 | {
|
@@ -282,8 +287,14 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
282 | 287 |
|
283 | 288 | buf=ReadBuffer(onerel,blkno);
|
284 | 289 |
|
285 |
| -/* In this phase we only need shared access to the buffer */ |
286 |
| -LockBuffer(buf,BUFFER_LOCK_SHARE); |
| 290 | +/* In this phase we only need shared access to the buffer unless we're |
| 291 | + * going to do the vacuuming now which we do if there are no indexes |
| 292 | + */ |
| 293 | + |
| 294 | +if (nindexes) |
| 295 | +LockBuffer(buf,BUFFER_LOCK_SHARE); |
| 296 | +else |
| 297 | +LockBufferForCleanup(buf); |
287 | 298 |
|
288 | 299 | page=BufferGetPage(buf);
|
289 | 300 |
|
@@ -450,6 +461,12 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
450 | 461 | {
|
451 | 462 | lazy_record_free_space(vacrelstats,blkno,
|
452 | 463 | PageGetFreeSpace(page));
|
| 464 | +}elseif (!nindexes) { |
| 465 | +/* If there are no indexes we can vacuum the page right now instead |
| 466 | + * of doing a second scan */ |
| 467 | +lazy_vacuum_page(onerel,blkno,buf,0,vacrelstats); |
| 468 | +lazy_record_free_space(vacrelstats,blkno,PageGetFreeSpace(BufferGetPage(buf))); |
| 469 | +vacrelstats->num_dead_tuples=0; |
453 | 470 | }
|
454 | 471 |
|
455 | 472 | /* Remember the location of the last page with nonremovable tuples */
|
@@ -891,16 +908,20 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats,
|
891 | 908 | * See the comments at the head of this file for rationale.
|
892 | 909 | */
|
893 | 910 | staticvoid
|
894 |
| -lazy_space_alloc(LVRelStats*vacrelstats,BlockNumberrelblocks) |
| 911 | +lazy_space_alloc(LVRelStats*vacrelstats,BlockNumberrelblocks,unsignednindexes) |
895 | 912 | {
|
896 | 913 | longmaxtuples;
|
897 | 914 | intmaxpages;
|
898 | 915 |
|
899 |
| -maxtuples= (maintenance_work_mem*1024L) /sizeof(ItemPointerData); |
900 |
| -maxtuples=Min(maxtuples,INT_MAX); |
901 |
| -maxtuples=Min(maxtuples,MaxAllocSize /sizeof(ItemPointerData)); |
902 |
| -/* stay sane if small maintenance_work_mem */ |
903 |
| -maxtuples=Max(maxtuples,MaxHeapTuplesPerPage); |
| 916 | +if (nindexes) { |
| 917 | +maxtuples= (maintenance_work_mem*1024L) /sizeof(ItemPointerData); |
| 918 | +maxtuples=Min(maxtuples,INT_MAX); |
| 919 | +maxtuples=Min(maxtuples,MaxAllocSize /sizeof(ItemPointerData)); |
| 920 | +/* stay sane if small maintenance_work_mem */ |
| 921 | +maxtuples=Max(maxtuples,MaxHeapTuplesPerPage); |
| 922 | +}else { |
| 923 | +maxtuples=MaxHeapTuplesPerPage; |
| 924 | +} |
904 | 925 |
|
905 | 926 | vacrelstats->num_dead_tuples=0;
|
906 | 927 | vacrelstats->max_dead_tuples= (int)maxtuples;
|
|