Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitee28665

Browse files
committed
tests: speedup semaphore tests
Increase tests parallelization to 20.Free up some disk space to have enough space for parallel test dirs.Increase SysV ipc semaphores to handle a big number of parallelpostgres instances.
1 parent4b45d27 commitee28665

File tree

4 files changed

+61
-4
lines changed

4 files changed

+61
-4
lines changed

‎cmd/sentinel/sentinel.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ func (s *Sentinel) isLagBelowMax(cd *cluster.ClusterData, curMasterDB, db *clust
374374
if!*cd.Cluster.DefSpec().SynchronousReplication {
375375
log.Debug(fmt.Sprintf("curMasterDB.Status.XLogPos: %d, db.Status.XLogPos: %d, lag: %d",curMasterDB.Status.XLogPos,db.Status.XLogPos,int64(curMasterDB.Status.XLogPos-db.Status.XLogPos)))
376376
ifint64(curMasterDB.Status.XLogPos-db.Status.XLogPos)>int64(*cd.Cluster.DefSpec().MaxStandbyLag) {
377-
log.Debug("ignoring keeper since its behind that maximum xlog position",zap.String("db",db.UID),zap.Uint64("dbXLogPos",db.Status.XLogPos),zap.Uint64("masterXLogPos",curMasterDB.Status.XLogPos))
377+
log.Info("ignoring keeper since its behind that maximum xlog position",zap.String("db",db.UID),zap.Uint64("dbXLogPos",db.Status.XLogPos),zap.Uint64("masterXLogPos",curMasterDB.Status.XLogPos))
378378
returnfalse
379379
}
380380
}

‎scripts/semaphore.sh

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,14 +39,23 @@ export CONSUL_BIN="${PWD}/consul/consul"
3939

4040
OLDPATH=$PATH
4141

42+
# Increase sysv ipc semaphores to accomode a big number of parallel postgres instances
43+
sudo /bin/sh -c'echo "32000 1024000000 500 32000" > /proc/sys/kernel/sem'
44+
45+
# Free up some disk space
46+
rm -rf~/.rbenv
47+
48+
export INTEGRATION=1
49+
export PARALLEL=20
50+
4251
# Test with postgresql 9.5
4352
echo"===== Testing with postgreSQL 9.5 ====="
44-
export PATH=/usr/lib/postgresql/9.5/bin/:$OLDPATH;INTEGRATION=1./test
53+
export PATH=/usr/lib/postgresql/9.5/bin/:$OLDPATH; ./test
4554

4655
# Test with postgresql 9.6
4756
echo"===== Testing with postgreSQL 9.6 ====="
48-
export PATH=/usr/lib/postgresql/9.6/bin/:$OLDPATH;INTEGRATION=1./test
57+
export PATH=/usr/lib/postgresql/9.6/bin/:$OLDPATH; ./test
4958

5059
# Test with postgresql 10
5160
echo"===== Testing with postgreSQL 10 ====="
52-
export PATH=/usr/lib/postgresql/10/bin/:$OLDPATH;INTEGRATION=1./test
61+
export PATH=/usr/lib/postgresql/10/bin/:$OLDPATH; ./test

‎tests/integration/ha_test.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,7 @@ func testFailover(t *testing.T, syncRepl bool) {
342342

343343
// wait for the keepers to have reported their state (needed to know the instance XLogPos)
344344
time.Sleep(5*time.Second)
345+
WaitClusterSyncedXLogPos([]string{master.uid,standby.uid},sm,20*time.Second)
345346

346347
// Stop the keeper process on master, should also stop the database
347348
t.Logf("Stopping current master keeper: %s",master.uid)
@@ -405,6 +406,7 @@ func testFailoverFailed(t *testing.T, syncRepl bool) {
405406

406407
// wait for the keepers to have reported their state (needed to know the instance XLogPos)
407408
time.Sleep(5*time.Second)
409+
WaitClusterSyncedXLogPos([]string{master.uid,standby.uid},sm,20*time.Second)
408410

409411
// Stop the keeper process on master, should also stop the database
410412
t.Logf("Stopping current master keeper: %s",master.uid)
@@ -474,6 +476,10 @@ func TestFailoverTooMuchLag(t *testing.T) {
474476
// stop the standby and write more than MaxStandbyLag data to the master
475477
t.Logf("Stopping current standby keeper: %s",standby.uid)
476478
standby.Stop()
479+
iferr:=standby.WaitDBDown(30*time.Second);err!=nil {
480+
t.Fatalf("unexpected err: %v",err)
481+
}
482+
477483
fori:=1;i<1000;i++ {
478484
iferr:=write(t,master,i,i);err!=nil {
479485
t.Fatalf("unexpected err: %v",err)
@@ -529,6 +535,7 @@ func testOldMasterRestart(t *testing.T, syncRepl, usePgrewind bool) {
529535

530536
// wait for the keepers to have reported their state (needed to know the instance XLogPos)
531537
time.Sleep(5*time.Second)
538+
WaitClusterSyncedXLogPos([]string{master.uid,standbys[0].uid},sm,20*time.Second)
532539

533540
// Stop the keeper process on master, should also stop the database
534541
t.Logf("Stopping current master keeper: %s",master.uid)
@@ -637,6 +644,7 @@ func testPartition1(t *testing.T, syncRepl, usePgrewind bool) {
637644

638645
// wait for the keepers to have reported their state (needed to know the instance XLogPos)
639646
time.Sleep(5*time.Second)
647+
WaitClusterSyncedXLogPos([]string{master.uid,standbys[0].uid},sm,20*time.Second)
640648

641649
// Freeze the keeper and postgres processes on the master
642650
t.Logf("SIGSTOPping current master keeper: %s",master.uid)
@@ -754,6 +762,7 @@ func testTimelineFork(t *testing.T, syncRepl, usePgrewind bool) {
754762

755763
// wait for the keepers to have reported their state (needed to know the instance XLogPos)
756764
time.Sleep(5*time.Second)
765+
WaitClusterSyncedXLogPos([]string{master.uid,standbys[0].uid},sm,20*time.Second)
757766

758767
// Wait replicated data to standby
759768
iferr:=waitLines(t,standbys[0],1,10*time.Second);err!=nil {
@@ -897,6 +906,7 @@ func TestMasterChangedAddress(t *testing.T) {
897906

898907
// wait for the keepers to have reported their state (needed to know the instance XLogPos)
899908
time.Sleep(5*time.Second)
909+
WaitClusterSyncedXLogPos([]string{master.uid,standbys[0].uid},sm,20*time.Second)
900910

901911
// Wait standby synced with master
902912
iferr:=waitLines(t,master,1,60*time.Second);err!=nil {

‎tests/integration/utils.go

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,44 @@ func WaitClusterDataKeepers(keepersUIDs []string, e *store.StoreManager, timeout
888888
returnfmt.Errorf("timeout")
889889
}
890890

891+
// WaitClusterSyncedXLogPos waits for all the specified keepers to have the same
892+
// reported XLogPos
893+
funcWaitClusterSyncedXLogPos(keepersUIDs []string,e*store.StoreManager,timeout time.Duration)error {
894+
start:=time.Now()
895+
fortime.Now().Add(-timeout).Before(start) {
896+
c:=0
897+
curXLogPos:=uint64(0)
898+
cd,_,err:=e.GetClusterData()
899+
iferr!=nil||cd==nil {
900+
goto end
901+
}
902+
// Check for db on keeper to be initialized
903+
for_,keeper:=rangecd.Keepers {
904+
if!util.StringInSlice(keepersUIDs,keeper.UID) {
905+
continue
906+
}
907+
for_,db:=rangecd.DBs {
908+
ifdb.Spec.KeeperUID==keeper.UID {
909+
ifc==0 {
910+
curXLogPos=db.Status.XLogPos
911+
}else {
912+
ifdb.Status.XLogPos!=curXLogPos {
913+
goto end
914+
}
915+
}
916+
}
917+
}
918+
c++
919+
}
920+
ifc==len(keepersUIDs) {
921+
returnnil
922+
}
923+
end:
924+
time.Sleep(sleepInterval)
925+
}
926+
returnfmt.Errorf("timeout")
927+
}
928+
891929
functestFreeTCPPort(portint)error {
892930
ln,err:=net.Listen("tcp",fmt.Sprintf("localhost:%d",curPort))
893931
iferr!=nil {

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp