Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit041e8b9

Browse files
committed
Get rid of our dependency on type "long" for memory size calculations.
Consistently use "Size" (or size_t, or in some places int64 or double)as the type for variables holding memory allocation sizes. In mostplaces variables' data types were fine already, but we had an ancienthabit of computing bytes from kilobytes-units GUCs with code like"work_mem * 1024L". That risks overflow on Win64 where they did notmake "long" as wide as "size_t". We worked around that by restrictingsuch GUCs' ranges, so you couldn't set work_mem et al higher than 2GBon Win64. This patch removes that restriction, after replacing suchcalculations with "work_mem * (Size) 1024" or variants of that.It should be noted that this patch was constructed by searchingoutwards from the GUCs that have MAX_KILOBYTES as upper limit.So I can't positively guarantee there are no other places doingmemory-size arithmetic in int or long variables. I do however feelpretty confident that increasing MAX_KILOBYTES on Win64 is safe now.Also, nothing in our code should be dealing in multiple-gigabyteallocations without authorization from a relevant GUC, so it seemspretty likely that this search caught everything that could be atrisk of overflow.Author: Vladlen Popolitov <v.popolitov@postgrespro.ru>Co-authored-by: Tom Lane <tgl@sss.pgh.pa.us>Discussion:https://postgr.es/m/1a01f0-66ec2d80-3b-68487680@27595217
1 parentf8d8581 commit041e8b9

File tree

20 files changed

+50
-46
lines changed

20 files changed

+50
-46
lines changed

‎src/backend/access/gin/ginfast.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
intgin_pending_list_limit=0;
4040

4141
#defineGIN_PAGE_FREESIZE \
42-
( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
42+
((Size)BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
4343

4444
typedefstructKeyArray
4545
{
@@ -456,7 +456,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
456456
* ginInsertCleanup() should not be called inside our CRIT_SECTION.
457457
*/
458458
cleanupSize=GinGetPendingListCleanupSize(index);
459-
if (metadata->nPendingPages*GIN_PAGE_FREESIZE>cleanupSize*1024L)
459+
if (metadata->nPendingPages*GIN_PAGE_FREESIZE>cleanupSize*(Size)1024)
460460
needCleanup= true;
461461

462462
UnlockReleaseBuffer(metabuffer);
@@ -795,7 +795,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
795795
blknoFinish;
796796
boolcleanupFinish= false;
797797
boolfsm_vac= false;
798-
SizeworkMemory;
798+
intworkMemory;
799799

800800
/*
801801
* We would like to prevent concurrent cleanup process. For that we will
@@ -901,7 +901,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
901901
*/
902902
if (GinPageGetOpaque(page)->rightlink==InvalidBlockNumber||
903903
(GinPageHasFullRow(page)&&
904-
(accum.allocatedMemory >=workMemory*1024L)))
904+
accum.allocatedMemory >=workMemory*(Size)1024))
905905
{
906906
ItemPointerData*list;
907907
uint32nlist;

‎src/backend/access/gin/ginget.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
125125
CompactAttribute*attr;
126126

127127
/* Initialize empty bitmap result */
128-
scanEntry->matchBitmap=tbm_create(work_mem*1024L,NULL);
128+
scanEntry->matchBitmap=tbm_create(work_mem*(Size)1024,NULL);
129129

130130
/* Null query cannot partial-match anything */
131131
if (scanEntry->isPartialMatch&&

‎src/backend/access/gin/gininsert.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ ginBuildCallback(Relation index, ItemPointer tid, Datum *values,
288288
values[i],isnull[i],tid);
289289

290290
/* If we've maxed out our available memory, dump everything to the index */
291-
if (buildstate->accum.allocatedMemory >=(Size)maintenance_work_mem*1024L)
291+
if (buildstate->accum.allocatedMemory >=maintenance_work_mem*(Size)1024)
292292
{
293293
ItemPointerData*list;
294294
Datumkey;

‎src/backend/access/hash/hash.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
120120
doublereltuples;
121121
doubleallvisfrac;
122122
uint32num_buckets;
123-
longsort_threshold;
123+
Sizesort_threshold;
124124
HashBuildStatebuildstate;
125125

126126
/*
@@ -155,13 +155,13 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
155155
* one page. Also, "initial index size" accounting does not include the
156156
* metapage, nor the first bitmap page.
157157
*/
158-
sort_threshold= (maintenance_work_mem*1024L) /BLCKSZ;
158+
sort_threshold= (maintenance_work_mem*(Size)1024) /BLCKSZ;
159159
if (index->rd_rel->relpersistence!=RELPERSISTENCE_TEMP)
160160
sort_threshold=Min(sort_threshold,NBuffers);
161161
else
162162
sort_threshold=Min(sort_threshold,NLocBuffer);
163163

164-
if (num_buckets >=(uint32)sort_threshold)
164+
if (num_buckets >=sort_threshold)
165165
buildstate.spool=_h_spoolinit(heap,index,num_buckets);
166166
else
167167
buildstate.spool=NULL;

‎src/backend/access/heap/vacuumlazy.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2070,7 +2070,7 @@ lazy_vacuum(LVRelState *vacrel)
20702070
*/
20712071
threshold= (double)vacrel->rel_pages*BYPASS_THRESHOLD_PAGES;
20722072
bypass= (vacrel->lpdead_item_pages<threshold&&
2073-
(TidStoreMemoryUsage(vacrel->dead_items)<(32L*1024L*1024L)));
2073+
TidStoreMemoryUsage(vacrel->dead_items)<32*1024*1024);
20742074
}
20752075

20762076
if (bypass)
@@ -3037,7 +3037,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
30373037
*/
30383038

30393039
dead_items_info= (VacDeadItemsInfo*)palloc(sizeof(VacDeadItemsInfo));
3040-
dead_items_info->max_bytes=vac_work_mem*1024L;
3040+
dead_items_info->max_bytes=vac_work_mem*(Size)1024;
30413041
dead_items_info->num_items=0;
30423042
vacrel->dead_items_info=dead_items_info;
30433043

‎src/backend/access/nbtree/nbtpage.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2953,7 +2953,7 @@ _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child,
29532953
void
29542954
_bt_pendingfsm_init(Relationrel,BTVacState*vstate,boolcleanuponly)
29552955
{
2956-
int64maxbufsize;
2956+
Sizemaxbufsize;
29572957

29582958
/*
29592959
* Don't bother with optimization in cleanup-only case -- we don't expect
@@ -2969,12 +2969,13 @@ _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
29692969
* int overflow here.
29702970
*/
29712971
vstate->bufsize=256;
2972-
maxbufsize= (work_mem*1024L) /sizeof(BTPendingFSM);
2973-
maxbufsize=Min(maxbufsize,INT_MAX);
2972+
maxbufsize= (work_mem* (Size)1024) /sizeof(BTPendingFSM);
29742973
maxbufsize=Min(maxbufsize,MaxAllocSize /sizeof(BTPendingFSM));
2974+
/* BTVacState.maxbufsize has type int */
2975+
maxbufsize=Min(maxbufsize,INT_MAX);
29752976
/* Stay sane with small work_mem */
29762977
maxbufsize=Max(maxbufsize,vstate->bufsize);
2977-
vstate->maxbufsize=maxbufsize;
2978+
vstate->maxbufsize=(int)maxbufsize;
29782979

29792980
/* Allocate buffer, indicate that there are currently 0 pending pages */
29802981
vstate->pendingpages=palloc(sizeof(BTPendingFSM)*vstate->bufsize);

‎src/backend/commands/vacuumparallel.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
375375
(nindexes_mwm>0) ?
376376
maintenance_work_mem /Min(parallel_workers,nindexes_mwm) :
377377
maintenance_work_mem;
378-
shared->dead_items_info.max_bytes=vac_work_mem*1024L;
378+
shared->dead_items_info.max_bytes=vac_work_mem*(size_t)1024;
379379

380380
/* Prepare DSA space for dead items */
381381
dead_items=TidStoreCreateShared(shared->dead_items_info.max_bytes,

‎src/backend/executor/execUtils.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ CreateWorkExprContext(EState *estate)
326326
SizemaxBlockSize=ALLOCSET_DEFAULT_MAXSIZE;
327327

328328
/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
329-
while (16*maxBlockSize>work_mem*1024L)
329+
while (maxBlockSize>work_mem*(Size)1024 /16)
330330
maxBlockSize >>=1;
331331

332332
if (maxBlockSize<ALLOCSET_DEFAULT_INITSIZE)

‎src/backend/executor/nodeBitmapIndexscan.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ MultiExecBitmapIndexScan(BitmapIndexScanState *node)
9191
else
9292
{
9393
/* XXX should we use less than work_mem for this? */
94-
tbm=tbm_create(work_mem*1024L,
94+
tbm=tbm_create(work_mem*(Size)1024,
9595
((BitmapIndexScan*)node->ss.ps.plan)->isshared ?
9696
node->ss.ps.state->es_query_dsa :NULL);
9797
}

‎src/backend/executor/nodeBitmapOr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ MultiExecBitmapOr(BitmapOrState *node)
143143
if (result==NULL)/* first subplan */
144144
{
145145
/* XXX should we use less than work_mem for this? */
146-
result=tbm_create(work_mem*1024L,
146+
result=tbm_create(work_mem*(Size)1024,
147147
((BitmapOr*)node->ps.plan)->isshared ?
148148
node->ps.state->es_query_dsa :NULL);
149149
}

‎src/backend/nodes/tidbitmap.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ static inttbm_shared_comparator(const void *left, const void *right,
263263
* be allocated from the DSA.
264264
*/
265265
TIDBitmap*
266-
tbm_create(longmaxbytes,dsa_area*dsa)
266+
tbm_create(Sizemaxbytes,dsa_area*dsa)
267267
{
268268
TIDBitmap*tbm;
269269

@@ -273,7 +273,7 @@ tbm_create(long maxbytes, dsa_area *dsa)
273273
tbm->mcxt=CurrentMemoryContext;
274274
tbm->status=TBM_EMPTY;
275275

276-
tbm->maxentries=(int)tbm_calculate_entries(maxbytes);
276+
tbm->maxentries=tbm_calculate_entries(maxbytes);
277277
tbm->lossify_start=0;
278278
tbm->dsa=dsa;
279279
tbm->dsapagetable=InvalidDsaPointer;
@@ -1539,10 +1539,10 @@ pagetable_free(pagetable_hash *pagetable, void *pointer)
15391539
*
15401540
* Estimate number of hashtable entries we can have within maxbytes.
15411541
*/
1542-
long
1543-
tbm_calculate_entries(doublemaxbytes)
1542+
int
1543+
tbm_calculate_entries(Sizemaxbytes)
15441544
{
1545-
longnbuckets;
1545+
Sizenbuckets;
15461546

15471547
/*
15481548
* Estimate number of hashtable entries we can have within maxbytes. This
@@ -1555,7 +1555,7 @@ tbm_calculate_entries(double maxbytes)
15551555
nbuckets=Min(nbuckets,INT_MAX-1);/* safety limit */
15561556
nbuckets=Max(nbuckets,16);/* sanity limit */
15571557

1558-
returnnbuckets;
1558+
return(int)nbuckets;
15591559
}
15601560

15611561
/*

‎src/backend/optimizer/path/costsize.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1903,7 +1903,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
19031903
doubleinput_bytes=relation_byte_size(tuples,width);
19041904
doubleoutput_bytes;
19051905
doubleoutput_tuples;
1906-
longsort_mem_bytes=sort_mem*1024L;
1906+
int64sort_mem_bytes=sort_mem*(int64)1024;
19071907

19081908
/*
19091909
* We want to be sure the cost of a sort is never estimated as zero, even
@@ -2488,7 +2488,7 @@ cost_material(Path *path,
24882488
Coststartup_cost=input_startup_cost;
24892489
Costrun_cost=input_total_cost-input_startup_cost;
24902490
doublenbytes=relation_byte_size(tuples,width);
2491-
longwork_mem_bytes=work_mem*1024L;
2491+
doublework_mem_bytes=work_mem*(Size)1024;
24922492

24932493
path->rows=tuples;
24942494

@@ -4028,7 +4028,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
40284028
elseif (enable_material&&innersortkeys!=NIL&&
40294029
relation_byte_size(inner_path_rows,
40304030
inner_path->pathtarget->width)>
4031-
(work_mem*1024L))
4031+
work_mem*(Size)1024)
40324032
path->materialize_inner= true;
40334033
else
40344034
path->materialize_inner= false;
@@ -4663,7 +4663,7 @@ cost_rescan(PlannerInfo *root, Path *path,
46634663
Costrun_cost=cpu_tuple_cost*path->rows;
46644664
doublenbytes=relation_byte_size(path->rows,
46654665
path->pathtarget->width);
4666-
longwork_mem_bytes=work_mem*1024L;
4666+
doublework_mem_bytes=work_mem*(Size)1024;
46674667

46684668
if (nbytes>work_mem_bytes)
46694669
{
@@ -4690,7 +4690,7 @@ cost_rescan(PlannerInfo *root, Path *path,
46904690
Costrun_cost=cpu_operator_cost*path->rows;
46914691
doublenbytes=relation_byte_size(path->rows,
46924692
path->pathtarget->width);
4693-
longwork_mem_bytes=work_mem*1024L;
4693+
doublework_mem_bytes=work_mem*(Size)1024;
46944694

46954695
if (nbytes>work_mem_bytes)
46964696
{
@@ -6496,7 +6496,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
64966496
doublepages_fetched;
64976497
doubletuples_fetched;
64986498
doubleheap_pages;
6499-
longmaxentries;
6499+
doublemaxentries;
65006500

65016501
/*
65026502
* Fetch total cost of obtaining the bitmap, as well as its total
@@ -6527,7 +6527,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
65276527
* the bitmap at one time.)
65286528
*/
65296529
heap_pages=Min(pages_fetched,baserel->pages);
6530-
maxentries=tbm_calculate_entries(work_mem*1024L);
6530+
maxentries=tbm_calculate_entries(work_mem*(Size)1024);
65316531

65326532
if (loop_count>1)
65336533
{

‎src/backend/optimizer/plan/planner.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6887,7 +6887,7 @@ plan_create_index_workers(Oid tableOid, Oid indexOid)
68876887
* parallel worker to sort.
68886888
*/
68896889
while (parallel_workers>0&&
6890-
maintenance_work_mem / (parallel_workers+1)<32768L)
6890+
maintenance_work_mem / (parallel_workers+1)<32*1024)
68916891
parallel_workers--;
68926892

68936893
done:

‎src/backend/replication/logical/reorderbuffer.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3643,7 +3643,7 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
36433643
* haven't exceeded the memory limit.
36443644
*/
36453645
if (debug_logical_replication_streaming==DEBUG_LOGICAL_REP_STREAMING_BUFFERED&&
3646-
rb->size<logical_decoding_work_mem*1024L)
3646+
rb->size<logical_decoding_work_mem*(Size)1024)
36473647
return;
36483648

36493649
/*
@@ -3656,7 +3656,7 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
36563656
* because a user can reduce the logical_decoding_work_mem to a smaller
36573657
* value before the most recent change.
36583658
*/
3659-
while (rb->size >=logical_decoding_work_mem*1024L||
3659+
while (rb->size >=logical_decoding_work_mem*(Size)1024||
36603660
(debug_logical_replication_streaming==DEBUG_LOGICAL_REP_STREAMING_IMMEDIATE&&
36613661
rb->size>0))
36623662
{
@@ -3699,8 +3699,7 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
36993699
}
37003700

37013701
/* We must be under the memory limit now. */
3702-
Assert(rb->size<logical_decoding_work_mem*1024L);
3703-
3702+
Assert(rb->size<logical_decoding_work_mem* (Size)1024);
37043703
}
37053704

37063705
/*

‎src/backend/utils/sort/tuplestore.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
265265
state->truncated= false;
266266
state->usedDisk= false;
267267
state->maxSpace=0;
268-
state->allowedMem=maxKBytes*1024L;
268+
state->allowedMem=maxKBytes*(int64)1024;
269269
state->availMem=state->allowedMem;
270270
state->myfile=NULL;
271271

‎src/include/executor/hashjoin.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ typedef struct HashMemoryChunkData
147147

148148
typedefstructHashMemoryChunkData*HashMemoryChunk;
149149

150-
#defineHASH_CHUNK_SIZE(32 *1024L)
150+
#defineHASH_CHUNK_SIZE((Size) (32 *1024))
151151
#defineHASH_CHUNK_HEADER_SIZEMAXALIGN(sizeof(HashMemoryChunkData))
152152
#defineHASH_CHUNK_DATA(hc)(((char *) (hc)) + HASH_CHUNK_HEADER_SIZE)
153153
/* tuples exceeding HASH_CHUNK_THRESHOLD bytes are put in their own chunk */

‎src/include/nodes/tidbitmap.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ typedef struct TBMIterateResult
6262

6363
/* function prototypes in nodes/tidbitmap.c */
6464

65-
externTIDBitmap*tbm_create(longmaxbytes,dsa_area*dsa);
65+
externTIDBitmap*tbm_create(Sizemaxbytes,dsa_area*dsa);
6666
externvoidtbm_free(TIDBitmap*tbm);
6767
externvoidtbm_free_shared_area(dsa_area*dsa,dsa_pointerdp);
6868

@@ -84,7 +84,7 @@ extern void tbm_end_private_iterate(TBMPrivateIterator *iterator);
8484
externvoidtbm_end_shared_iterate(TBMSharedIterator*iterator);
8585
externTBMSharedIterator*tbm_attach_shared_iterate(dsa_area*dsa,
8686
dsa_pointerdp);
87-
externlongtbm_calculate_entries(doublemaxbytes);
87+
externinttbm_calculate_entries(Sizemaxbytes);
8888

8989
externTBMIteratortbm_begin_iterate(TIDBitmap*tbm,
9090
dsa_area*dsa,dsa_pointerdsp);

‎src/include/utils/dsa.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ typedef pg_atomic_uint64 dsa_pointer_atomic;
9797
#defineDSA_DEFAULT_INIT_SEGMENT_SIZE ((size_t) (1 * 1024 * 1024))
9898

9999
/* The minimum size of a DSM segment. */
100-
#defineDSA_MIN_SEGMENT_SIZE((size_t) (256 *1024L))
100+
#defineDSA_MIN_SEGMENT_SIZE((size_t) (256 *1024))
101101

102102
/* The maximum size of a DSM segment. */
103103
#defineDSA_MAX_SEGMENT_SIZE ((size_t) 1 << DSA_OFFSET_WIDTH)

‎src/include/utils/guc.h

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,13 @@
1717
#include"utils/array.h"
1818

1919

20-
/* upper limit for GUC variables measured in kilobytes of memory */
21-
/* note that various places assume the byte size fits in a "long" variable */
22-
#ifSIZEOF_SIZE_T>4&&SIZEOF_LONG>4
20+
/*
21+
* Maximum for integer GUC variables that are measured in kilobytes of memory.
22+
* This value is chosen to ensure that the corresponding number of bytes fits
23+
* into a variable of type size_t or ssize_t. Be sure to compute the number
24+
* of bytes like "guc_var * (Size) 1024" to avoid int-width overflow.
25+
*/
26+
#ifSIZEOF_SIZE_T>4
2327
#defineMAX_KILOBYTESINT_MAX
2428
#else
2529
#defineMAX_KILOBYTES(INT_MAX / 1024)

‎src/test/modules/test_bloomfilter/test_bloomfilter.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ create_and_test_bloom(int power, int64 nelements, int callerseed)
7676
int64nfalsepos;
7777
bloom_filter*filter;
7878

79-
bloom_work_mem= (1L<<power) /8L /1024L;
79+
bloom_work_mem= ((int64)1<<power) /(8*1024);
8080

8181
elog(DEBUG1,"bloom_work_mem (KB): %d",bloom_work_mem);
8282

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp