Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit7181530

Browse files
committed
In standby mode, respect checkpoint_segments in addition to
checkpoint_timeout to trigger restartpoints. We used to deliberately onlydo time-based restartpoints, because if checkpoint_segments is small wewould spend time doing restartpoints more often than really necessary.But now that restartpoints are done in bgwriter, they're not asdisruptive as they used to be. Secondly, because streaming replicationstores the streamed WAL files in pg_xlog, we want to clean it up moreoften to avoid running out of disk space when checkpoint_timeout is largeand checkpoint_segments small.Patch by Fujii Masao, with some minor changes by me.
1 parent8c873bb commit7181530

File tree

2 files changed

+43
-18
lines changed

2 files changed

+43
-18
lines changed

‎src/backend/access/transam/xlog.c

Lines changed: 42 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
88
* Portions Copyright (c) 1994, Regents of the University of California
99
*
10-
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.418 2010/06/0910:54:45 mha Exp $
10+
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.419 2010/06/0915:04:06 heikki Exp $
1111
*
1212
*-------------------------------------------------------------------------
1313
*/
@@ -508,6 +508,9 @@ static bool reachedMinRecoveryPoint = false;
508508

509509
staticboolInRedo= false;
510510

511+
/* Have we launched bgwriter during recovery? */
512+
staticboolbgwriterLaunched= false;
513+
511514
/*
512515
* Information logged when we detect a change in one of the parameters
513516
* important for Hot Standby.
@@ -550,6 +553,7 @@ static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags);
550553
staticboolXLogCheckBuffer(XLogRecData*rdata,booldoPageWrites,
551554
XLogRecPtr*lsn,BkpBlock*bkpb);
552555
staticboolAdvanceXLInsertBuffer(boolnew_segment);
556+
staticboolXLogCheckpointNeeded(uint32logid,uint32logseg);
553557
staticvoidXLogWrite(XLogwrtRqstWriteRqst,boolflexible,boolxlog_switch);
554558
staticboolInstallXLogFileSegment(uint32*log,uint32*seg,char*tmppath,
555559
boolfind_free,int*max_advance,
@@ -1554,14 +1558,14 @@ AdvanceXLInsertBuffer(bool new_segment)
15541558
/*
15551559
* Check whether we've consumed enough xlog space that a checkpoint is needed.
15561560
*
1557-
*Caller must have just finished filling the open log file (so that
1558-
*openLogId/openLogSeg are valid).We measure the distance from RedoRecPtr
1559-
*to the open log fileand see if that exceeds CheckPointSegments.
1561+
*logid/logseg indicate a log file that has just been filled up (or read
1562+
*during recovery). We measure the distance from RedoRecPtr to logid/logseg
1563+
* and see if that exceeds CheckPointSegments.
15601564
*
15611565
* Note: it is caller's responsibility that RedoRecPtr is up-to-date.
15621566
*/
15631567
staticbool
1564-
XLogCheckpointNeeded(void)
1568+
XLogCheckpointNeeded(uint32logid,uint32logseg)
15651569
{
15661570
/*
15671571
* A straight computation of segment number could overflow 32 bits. Rather
@@ -1577,8 +1581,8 @@ XLogCheckpointNeeded(void)
15771581
old_segno= (RedoRecPtr.xlogid %XLogSegSize)*XLogSegsPerFile+
15781582
(RedoRecPtr.xrecoff /XLogSegSize);
15791583
old_highbits=RedoRecPtr.xlogid /XLogSegSize;
1580-
new_segno= (openLogId %XLogSegSize)*XLogSegsPerFile+openLogSeg;
1581-
new_highbits=openLogId /XLogSegSize;
1584+
new_segno= (logid %XLogSegSize)*XLogSegsPerFile+logseg;
1585+
new_highbits=logid /XLogSegSize;
15821586
if (new_highbits!=old_highbits||
15831587
new_segno >=old_segno+ (uint32) (CheckPointSegments-1))
15841588
return true;
@@ -1782,10 +1786,10 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
17821786
* update RedoRecPtr and recheck.
17831787
*/
17841788
if (IsUnderPostmaster&&
1785-
XLogCheckpointNeeded())
1789+
XLogCheckpointNeeded(openLogId,openLogSeg))
17861790
{
17871791
(void)GetRedoRecPtr();
1788-
if (XLogCheckpointNeeded())
1792+
if (XLogCheckpointNeeded(openLogId,openLogSeg))
17891793
RequestCheckpoint(CHECKPOINT_CAUSE_XLOG);
17901794
}
17911795
}
@@ -5653,7 +5657,6 @@ StartupXLOG(void)
56535657
XLogRecord*record;
56545658
uint32freespace;
56555659
TransactionIdoldestActiveXID;
5656-
boolbgwriterLaunched= false;
56575660

56585661
/*
56595662
* Read control file and check XLOG status looks valid.
@@ -7576,6 +7579,21 @@ CreateRestartPoint(int flags)
75767579
return false;
75777580
}
75787581

7582+
/*
7583+
* Update the shared RedoRecPtr so that the startup process can
7584+
* calculate the number of segments replayed since last restartpoint,
7585+
* and request a restartpoint if it exceeds checkpoint_segments.
7586+
*
7587+
* You need to hold WALInsertLock and info_lck to update it, although
7588+
* during recovery acquiring WALInsertLock is just pro forma, because
7589+
* there is no other processes updating Insert.RedoRecPtr.
7590+
*/
7591+
LWLockAcquire(WALInsertLock,LW_EXCLUSIVE);
7592+
SpinLockAcquire(&xlogctl->info_lck);
7593+
xlogctl->Insert.RedoRecPtr=lastCheckPoint.redo;
7594+
SpinLockRelease(&xlogctl->info_lck);
7595+
LWLockRelease(WALInsertLock);
7596+
75797597
if (log_checkpoints)
75807598
{
75817599
/*
@@ -9209,6 +9227,20 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
92099227
*/
92109228
if (readFile >=0&& !XLByteInSeg(*RecPtr,readId,readSeg))
92119229
{
9230+
/*
9231+
* Signal bgwriter to start a restartpoint if we've replayed too
9232+
* much xlog since the last one.
9233+
*/
9234+
if (StandbyMode&&bgwriterLaunched)
9235+
{
9236+
if (XLogCheckpointNeeded(readId,readSeg))
9237+
{
9238+
(void)GetRedoRecPtr();
9239+
if (XLogCheckpointNeeded(readId,readSeg))
9240+
RequestCheckpoint(CHECKPOINT_CAUSE_XLOG);
9241+
}
9242+
}
9243+
92129244
close(readFile);
92139245
readFile=-1;
92149246
readSource=0;

‎src/backend/replication/walreceiver.c

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
*
3030
*
3131
* IDENTIFICATION
32-
* $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.13 2010/06/0900:54:39 ishii Exp $
32+
* $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.14 2010/06/0915:04:07 heikki Exp $
3333
*
3434
*-------------------------------------------------------------------------
3535
*/
@@ -506,13 +506,6 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
506506
buf+=byteswritten;
507507

508508
LogstreamResult.Write=recptr;
509-
510-
/*
511-
* XXX: Should we signal bgwriter to start a restartpoint if we've
512-
* consumed too much xlog since the last one, like in normal
513-
* processing? But this is not worth doing unless a restartpoint can
514-
* be created independently from a checkpoint record.
515-
*/
516509
}
517510
}
518511

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp