Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitd088ba5

Browse files
nbtree: Allocate new pages in separate function.
Split nbtree's _bt_getbuf function is two: code that read locks or writelocks existing pages remains in _bt_getbuf, while code that deals withallocating new pages is moved to a new, dedicated function called_bt_allocbuf. This simplifies most _bt_getbuf callers, since it is nolonger necessary for them to pass a heaprel argument. Many of thechanges to nbtree from commit61b313e can be reverted. This minimizesthe divergence between HEAD/PostgreSQL 16 and earlier release branches._bt_allocbuf replaces the previous nbtree idiom of passing P_NEW to_bt_getbuf. There are only 3 affected call sites, all of which continueto pass a heaprel for recovery conflict purposes. Note that nbtree'suse of P_NEW was superficial; nbtree never actually relied on the P_NEWcode paths in bufmgr.c, so this change is strictly mechanical.GiST already took the same approach; it has a dedicated function forallocating new pages called gistNewBuffer(). That factor allowed commit61b313e to make much more targeted changes to GiST.Author: Peter Geoghegan <pg@bowt.ie>Reviewed-By: Heikki Linnakangas <hlinnaka@iki.fi>Discussion:https://postgr.es/m/CAH2-Wz=8Z9qY58bjm_7TAHgtW6RzZ5Ke62q5emdCEy9BAzwhmg@mail.gmail.com
1 parentfe879ae commitd088ba5

File tree

12 files changed

+269
-255
lines changed

12 files changed

+269
-255
lines changed

‎contrib/amcheck/verify_nbtree.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,6 @@ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
183183
OffsetNumberupperbound);
184184
staticPagepalloc_btree_page(BtreeCheckState*state,BlockNumberblocknum);
185185
staticinlineBTScanInsertbt_mkscankey_pivotsearch(Relationrel,
186-
Relationheaprel,
187186
IndexTupleitup);
188187
staticItemIdPageGetItemIdCareful(BtreeCheckState*state,BlockNumberblock,
189188
Pagepage,OffsetNumberoffset);
@@ -332,7 +331,7 @@ bt_index_check_internal(Oid indrelid, bool parentcheck, bool heapallindexed,
332331
RelationGetRelationName(indrel))));
333332

334333
/* Extract metadata from metapage, and sanitize it in passing */
335-
_bt_metaversion(indrel,heaprel,&heapkeyspace,&allequalimage);
334+
_bt_metaversion(indrel,&heapkeyspace,&allequalimage);
336335
if (allequalimage&& !heapkeyspace)
337336
ereport(ERROR,
338337
(errcode(ERRCODE_INDEX_CORRUPTED),
@@ -1259,7 +1258,7 @@ bt_target_page_check(BtreeCheckState *state)
12591258
}
12601259

12611260
/* Build insertion scankey for current page offset */
1262-
skey=bt_mkscankey_pivotsearch(state->rel,state->heaprel,itup);
1261+
skey=bt_mkscankey_pivotsearch(state->rel,itup);
12631262

12641263
/*
12651264
* Make sure tuple size does not exceed the relevant BTREE_VERSION
@@ -1769,7 +1768,7 @@ bt_right_page_check_scankey(BtreeCheckState *state)
17691768
* memory remaining allocated.
17701769
*/
17711770
firstitup= (IndexTuple)PageGetItem(rightpage,rightitem);
1772-
returnbt_mkscankey_pivotsearch(state->rel,state->heaprel,firstitup);
1771+
returnbt_mkscankey_pivotsearch(state->rel,firstitup);
17731772
}
17741773

17751774
/*
@@ -2682,7 +2681,7 @@ bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
26822681
Bufferlbuf;
26832682
boolexists;
26842683

2685-
key=_bt_mkscankey(state->rel,state->heaprel,itup);
2684+
key=_bt_mkscankey(state->rel,itup);
26862685
Assert(key->heapkeyspace&&key->scantid!=NULL);
26872686

26882687
/*
@@ -2695,7 +2694,7 @@ bt_rootdescend(BtreeCheckState *state, IndexTuple itup)
26952694
*/
26962695
Assert(state->readonly&&state->rootdescend);
26972696
exists= false;
2698-
stack=_bt_search(state->rel,state->heaprel,key,&lbuf,BT_READ,NULL);
2697+
stack=_bt_search(state->rel,NULL,key,&lbuf,BT_READ,NULL);
26992698

27002699
if (BufferIsValid(lbuf))
27012700
{
@@ -3134,11 +3133,11 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum)
31343133
* the scankey is greater.
31353134
*/
31363135
staticinlineBTScanInsert
3137-
bt_mkscankey_pivotsearch(Relationrel,Relationheaprel,IndexTupleitup)
3136+
bt_mkscankey_pivotsearch(Relationrel,IndexTupleitup)
31383137
{
31393138
BTScanInsertskey;
31403139

3141-
skey=_bt_mkscankey(rel,heaprel,itup);
3140+
skey=_bt_mkscankey(rel,itup);
31423141
skey->pivotsearch= true;
31433142

31443143
returnskey;

‎src/backend/access/heap/heapam_handler.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -731,14 +731,9 @@ heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap,
731731
*multi_cutoff);
732732

733733

734-
/*
735-
* Set up sorting if wanted. NewHeap is being passed to
736-
* tuplesort_begin_cluster(), it could have been OldHeap too. It does not
737-
* really matter, as the goal is to have a heap relation being passed to
738-
* _bt_log_reuse_page() (which should not be called from this code path).
739-
*/
734+
/* Set up sorting if wanted */
740735
if (use_sort)
741-
tuplesort=tuplesort_begin_cluster(oldTupDesc,OldIndex,NewHeap,
736+
tuplesort=tuplesort_begin_cluster(oldTupDesc,OldIndex,
742737
maintenance_work_mem,
743738
NULL,TUPLESORT_NONE);
744739
else

‎src/backend/access/nbtree/nbtinsert.c

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static Buffer _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key,
5959
IndexTuplenposting,uint16postingoff);
6060
staticvoid_bt_insert_parent(Relationrel,Relationheaprel,Bufferbuf,
6161
Bufferrbuf,BTStackstack,boolisroot,boolisonly);
62-
staticBuffer_bt_newroot(Relationrel,Relationheaprel,Bufferlbuf,Bufferrbuf);
62+
staticBuffer_bt_newlevel(Relationrel,Relationheaprel,Bufferlbuf,Bufferrbuf);
6363
staticinlinebool_bt_pgaddtup(Pagepage,Sizeitemsize,IndexTupleitup,
6464
OffsetNumberitup_off,boolnewfirstdataitem);
6565
staticvoid_bt_delete_or_dedup_one_page(Relationrel,RelationheapRel,
@@ -110,7 +110,7 @@ _bt_doinsert(Relation rel, IndexTuple itup,
110110
boolcheckingunique= (checkUnique!=UNIQUE_CHECK_NO);
111111

112112
/* we need an insertion scan key to do our search, so build one */
113-
itup_key=_bt_mkscankey(rel,heapRel,itup);
113+
itup_key=_bt_mkscankey(rel,itup);
114114

115115
if (checkingunique)
116116
{
@@ -1024,13 +1024,15 @@ _bt_findinsertloc(Relation rel,
10241024
* indexes.
10251025
*/
10261026
staticvoid
1027-
_bt_stepright(Relationrel,Relationheaprel,BTInsertStateinsertstate,BTStackstack)
1027+
_bt_stepright(Relationrel,Relationheaprel,BTInsertStateinsertstate,
1028+
BTStackstack)
10281029
{
10291030
Pagepage;
10301031
BTPageOpaqueopaque;
10311032
Bufferrbuf;
10321033
BlockNumberrblkno;
10331034

1035+
Assert(heaprel!=NULL);
10341036
page=BufferGetPage(insertstate->buf);
10351037
opaque=BTPageGetOpaque(page);
10361038

@@ -1145,7 +1147,7 @@ _bt_insertonpg(Relation rel,
11451147

11461148
/*
11471149
* Every internal page should have exactly one negative infinity item at
1148-
* all times. Only _bt_split() and_bt_newroot() should add items that
1150+
* all times. Only _bt_split() and_bt_newlevel() should add items that
11491151
* become negative infinity items through truncation, since they're the
11501152
* only routines that allocate new internal pages.
11511153
*/
@@ -1250,14 +1252,14 @@ _bt_insertonpg(Relation rel,
12501252
* only one on its tree level, but was not the root, it may have been
12511253
* the "fast root". We need to ensure that the fast root link points
12521254
* at or above the current page. We can safely acquire a lock on the
1253-
* metapage here --- see comments for_bt_newroot().
1255+
* metapage here --- see comments for_bt_newlevel().
12541256
*/
12551257
if (unlikely(split_only_page))
12561258
{
12571259
Assert(!isleaf);
12581260
Assert(BufferIsValid(cbuf));
12591261

1260-
metabuf=_bt_getbuf(rel,heaprel,BTREE_METAPAGE,BT_WRITE);
1262+
metabuf=_bt_getbuf(rel,BTREE_METAPAGE,BT_WRITE);
12611263
metapg=BufferGetPage(metabuf);
12621264
metad=BTPageGetMeta(metapg);
12631265

@@ -1421,7 +1423,7 @@ _bt_insertonpg(Relation rel,
14211423
* call _bt_getrootheight while holding a buffer lock.
14221424
*/
14231425
if (BlockNumberIsValid(blockcache)&&
1424-
_bt_getrootheight(rel,heaprel) >=BTREE_FASTPATH_MIN_LEVEL)
1426+
_bt_getrootheight(rel) >=BTREE_FASTPATH_MIN_LEVEL)
14251427
RelationSetTargetBlock(rel,blockcache);
14261428
}
14271429

@@ -1715,7 +1717,7 @@ _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key, Buffer buf,
17151717
* way because it avoids an unnecessary PANIC when either origpage or its
17161718
* existing sibling page are corrupt.
17171719
*/
1718-
rbuf=_bt_getbuf(rel,heaprel,P_NEW,BT_WRITE);
1720+
rbuf=_bt_allocbuf(rel,heaprel);
17191721
rightpage=BufferGetPage(rbuf);
17201722
rightpagenumber=BufferGetBlockNumber(rbuf);
17211723
/* rightpage was initialized by _bt_getbuf */
@@ -1888,7 +1890,7 @@ _bt_split(Relation rel, Relation heaprel, BTScanInsert itup_key, Buffer buf,
18881890
*/
18891891
if (!isrightmost)
18901892
{
1891-
sbuf=_bt_getbuf(rel,heaprel,oopaque->btpo_next,BT_WRITE);
1893+
sbuf=_bt_getbuf(rel,oopaque->btpo_next,BT_WRITE);
18921894
spage=BufferGetPage(sbuf);
18931895
sopaque=BTPageGetOpaque(spage);
18941896
if (sopaque->btpo_prev!=origpagenumber)
@@ -2102,6 +2104,8 @@ _bt_insert_parent(Relation rel,
21022104
boolisroot,
21032105
boolisonly)
21042106
{
2107+
Assert(heaprel!=NULL);
2108+
21052109
/*
21062110
* Here we have to do something Lehman and Yao don't talk about: deal with
21072111
* a root split and construction of a new root. If our stack is empty
@@ -2121,8 +2125,8 @@ _bt_insert_parent(Relation rel,
21212125

21222126
Assert(stack==NULL);
21232127
Assert(isonly);
2124-
/* create a new root node and update the metapage */
2125-
rootbuf=_bt_newroot(rel,heaprel,buf,rbuf);
2128+
/* create a new root nodeone level upand update the metapage */
2129+
rootbuf=_bt_newlevel(rel,heaprel,buf,rbuf);
21262130
/* release the split buffers */
21272131
_bt_relbuf(rel,rootbuf);
21282132
_bt_relbuf(rel,rbuf);
@@ -2161,8 +2165,7 @@ _bt_insert_parent(Relation rel,
21612165
BlockNumberIsValid(RelationGetTargetBlock(rel))));
21622166

21632167
/* Find the leftmost page at the next level up */
2164-
pbuf=_bt_get_endpoint(rel,heaprel,opaque->btpo_level+1, false,
2165-
NULL);
2168+
pbuf=_bt_get_endpoint(rel,opaque->btpo_level+1, false,NULL);
21662169
/* Set up a phony stack entry pointing there */
21672170
stack=&fakestack;
21682171
stack->bts_blkno=BufferGetBlockNumber(pbuf);
@@ -2230,6 +2233,9 @@ _bt_insert_parent(Relation rel,
22302233
*
22312234
* On entry, 'lbuf' must be locked in write-mode. On exit, it is unlocked
22322235
* and unpinned.
2236+
*
2237+
* Caller must provide a valid heaprel, since finishing a page split requires
2238+
* allocating a new page if and when the parent page splits in turn.
22332239
*/
22342240
void
22352241
_bt_finish_split(Relationrel,Relationheaprel,Bufferlbuf,BTStackstack)
@@ -2243,9 +2249,10 @@ _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf, BTStack stack)
22432249
boolwasonly;
22442250

22452251
Assert(P_INCOMPLETE_SPLIT(lpageop));
2252+
Assert(heaprel!=NULL);
22462253

22472254
/* Lock right sibling, the one missing the downlink */
2248-
rbuf=_bt_getbuf(rel,heaprel,lpageop->btpo_next,BT_WRITE);
2255+
rbuf=_bt_getbuf(rel,lpageop->btpo_next,BT_WRITE);
22492256
rpage=BufferGetPage(rbuf);
22502257
rpageop=BTPageGetOpaque(rpage);
22512258

@@ -2257,7 +2264,7 @@ _bt_finish_split(Relation rel, Relation heaprel, Buffer lbuf, BTStack stack)
22572264
BTMetaPageData*metad;
22582265

22592266
/* acquire lock on the metapage */
2260-
metabuf=_bt_getbuf(rel,heaprel,BTREE_METAPAGE,BT_WRITE);
2267+
metabuf=_bt_getbuf(rel,BTREE_METAPAGE,BT_WRITE);
22612268
metapg=BufferGetPage(metabuf);
22622269
metad=BTPageGetMeta(metapg);
22632270

@@ -2323,10 +2330,11 @@ _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child
23232330
Pagepage;
23242331
BTPageOpaqueopaque;
23252332

2326-
buf=_bt_getbuf(rel,heaprel,blkno,BT_WRITE);
2333+
buf=_bt_getbuf(rel,blkno,BT_WRITE);
23272334
page=BufferGetPage(buf);
23282335
opaque=BTPageGetOpaque(page);
23292336

2337+
Assert(heaprel!=NULL);
23302338
if (P_INCOMPLETE_SPLIT(opaque))
23312339
{
23322340
_bt_finish_split(rel,heaprel,buf,stack->bts_parent);
@@ -2415,7 +2423,7 @@ _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child
24152423
}
24162424

24172425
/*
2418-
*_bt_newroot() -- Create a newroot page for the index.
2426+
*_bt_newlevel() -- Create a newlevel above root page.
24192427
*
24202428
*We've just split the old root page and need to create a new one.
24212429
*In order to do this, we add a new root page to the file, then lock
@@ -2433,7 +2441,7 @@ _bt_getstackbuf(Relation rel, Relation heaprel, BTStack stack, BlockNumber child
24332441
*lbuf, rbuf & rootbuf.
24342442
*/
24352443
staticBuffer
2436-
_bt_newroot(Relationrel,Relationheaprel,Bufferlbuf,Bufferrbuf)
2444+
_bt_newlevel(Relationrel,Relationheaprel,Bufferlbuf,Bufferrbuf)
24372445
{
24382446
Bufferrootbuf;
24392447
Pagelpage,
@@ -2459,12 +2467,12 @@ _bt_newroot(Relation rel, Relation heaprel, Buffer lbuf, Buffer rbuf)
24592467
lopaque=BTPageGetOpaque(lpage);
24602468

24612469
/* get a new root page */
2462-
rootbuf=_bt_getbuf(rel,heaprel,P_NEW,BT_WRITE);
2470+
rootbuf=_bt_allocbuf(rel,heaprel);
24632471
rootpage=BufferGetPage(rootbuf);
24642472
rootblknum=BufferGetBlockNumber(rootbuf);
24652473

24662474
/* acquire lock on the metapage */
2467-
metabuf=_bt_getbuf(rel,heaprel,BTREE_METAPAGE,BT_WRITE);
2475+
metabuf=_bt_getbuf(rel,BTREE_METAPAGE,BT_WRITE);
24682476
metapg=BufferGetPage(metabuf);
24692477
metad=BTPageGetMeta(metapg);
24702478

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp