@@ -46,6 +46,7 @@ def test_pgpro434_1(self):
4646self .add_instance (backup_dir ,'node' ,node )
4747
4848# Make backup
49+ sleep (5 )
4950self .backup_node (backup_dir ,'node' ,node )
5051node .cleanup ()
5152
@@ -59,8 +60,9 @@ def test_pgpro434_1(self):
5960self .del_test_dir (module_name ,fname )
6061
6162# @unittest.skip("skip")
63+ @unittest .expectedFailure
6264def test_pgpro434_2 (self ):
63- """Check that timelines are correct"""
65+ """Check that timelines are correct. WAITING PGPRO-1053 for --immediate. replace time """
6466fname = self .id ().split ('.' )[3 ]
6567backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
6668node = self .make_simple_node (base_dir = "{0}/{1}/node" .format (module_name ,fname ),
@@ -81,15 +83,19 @@ def test_pgpro434_2(self):
8183recovery_time = self .show_pb (backup_dir ,'node' ,backup_id )["recovery-time" ]
8284node .safe_psql (
8385"postgres" ,
84- "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256 ) i" )
86+ "insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,1 ) i" )
8587
8688# SECOND TIMELIN
8789node .cleanup ()
8890self .restore_node (backup_dir ,'node' ,node ,options = ["--time={0}" .format (recovery_time )])
8991node .start ()
92+ while node .safe_psql ("postgres" ,"select pg_is_in_recovery()" )== 't\n ' :
93+ sleep (1 )
9094if self .verbose :
9195print ('Second timeline' )
9296print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
97+ self .assertFalse (node .execute ("postgres" ,"select exists(select 1 from t_heap where id = 100501)" )[0 ][0 ],
98+ 'data after restore not equal to original data' )
9399node .safe_psql (
94100"postgres" ,
95101"insert into t_heap select 2 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(100,200) i" )
@@ -103,6 +109,8 @@ def test_pgpro434_2(self):
103109node .cleanup ()
104110self .restore_node (backup_dir ,'node' ,node ,options = ["--time={0}" .format (recovery_time )])
105111node .start ()
112+ while node .safe_psql ("postgres" ,"select pg_is_in_recovery()" )== 't\n ' :
113+ sleep (1 )
106114if self .verbose :
107115print ('third timeline' )
108116print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -120,6 +128,8 @@ def test_pgpro434_2(self):
120128node .cleanup ()
121129self .restore_node (backup_dir ,'node' ,node ,options = ["--time={0}" .format (recovery_time )])
122130node .start ()
131+ while node .safe_psql ("postgres" ,"select pg_is_in_recovery()" )== 't\n ' :
132+ sleep (1 )
123133if self .verbose :
124134print ('Fourth timeline' )
125135print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -128,6 +138,8 @@ def test_pgpro434_2(self):
128138node .cleanup ()
129139self .restore_node (backup_dir ,'node' ,node ,options = ["--time={0}" .format (recovery_time )])
130140node .start ()
141+ while node .safe_psql ("postgres" ,"select pg_is_in_recovery()" )== 't\n ' :
142+ sleep (1 )
131143if self .verbose :
132144print ('Fifth timeline' )
133145print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -136,6 +148,8 @@ def test_pgpro434_2(self):
136148node .cleanup ()
137149self .restore_node (backup_dir ,'node' ,node ,options = ["--time={0}" .format (recovery_time )])
138150node .start ()
151+ while node .safe_psql ("postgres" ,"select pg_is_in_recovery()" )== 't\n ' :
152+ sleep (1 )
139153if self .verbose :
140154print ('Sixth timeline' )
141155print (node .safe_psql ("postgres" ,"select redo_wal_file from pg_control_checkpoint()" ))
@@ -147,7 +161,7 @@ def test_pgpro434_2(self):
147161'data after restore not equal to original data' )
148162
149163# Clean after yourself
150- # self.del_test_dir(module_name, fname)
164+ self .del_test_dir (module_name ,fname )
151165
152166# @unittest.skip("skip")
153167def test_pgpro434_3 (self ):
@@ -219,7 +233,7 @@ def test_arhive_push_file_exists(self):
219233
220234os .remove (file )
221235sleep (5 )
222- node .safe_psql ('postgres' ,'selectpg_switch_xlog ()' )
236+ node .safe_psql ('postgres' ,'selectpg_switch_wal ()' )
223237
224238with open (log_file ,'r' )as f :
225239log_content = f .read ()
@@ -229,9 +243,10 @@ def test_arhive_push_file_exists(self):
229243# Clean after yourself
230244self .del_test_dir (module_name ,fname )
231245
232- #@unittest.expectedFailure
246+ # @unittest.expectedFailure
247+ # @unittest.skip("skip")
233248def test_replica_archive (self ):
234- """make nodewithput archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
249+ """make nodewithout archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica"""
235250fname = self .id ().split ('.' )[3 ]
236251backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
237252master = self .make_simple_node (base_dir = "{0}/{1}/master" .format (module_name ,fname ),
@@ -240,6 +255,7 @@ def test_replica_archive(self):
240255pg_options = {'wal_level' :'replica' ,'max_wal_senders' :'2' ,'checkpoint_timeout' :'30s' }
241256 )
242257self .init_pb (backup_dir )
258+ # ADD INSTANCE 'MASTER'
243259self .add_instance (backup_dir ,'master' ,master )
244260# force more frequent wal switch
245261master .start ()
@@ -258,7 +274,7 @@ def test_replica_archive(self):
258274self .restore_node (backup_dir ,'master' ,replica )
259275self .set_replica (master ,replica ,synchronous = True )
260276self .set_archiving (backup_dir ,'replica' ,replica ,replica = True )
261- replica .start ({ "-t" : "600" } )
277+ replica .start ()
262278
263279# Check data correctness on replica
264280after = replica .safe_psql ("postgres" ,"SELECT * FROM t_heap" )
@@ -269,6 +285,7 @@ def test_replica_archive(self):
269285"postgres" ,
270286"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(256,512) i" )
271287before = master .safe_psql ("postgres" ,"SELECT * FROM t_heap" )
288+ # ADD INSTANCE 'REPLICA'
272289self .add_instance (backup_dir ,'replica' ,replica )
273290backup_id = self .backup_node (backup_dir ,'replica' ,replica ,options = ['--archive-timeout=30' ,
274291'--master-host=localhost' ,'--master-db=postgres' ,'--master-port={0}' .format (master .port )])
@@ -306,3 +323,62 @@ def test_replica_archive(self):
306323
307324# Clean after yourself
308325self .del_test_dir (module_name ,fname )
326+
327+ # @unittest.expectedFailure
328+ # @unittest.skip("skip")
329+ def test_master_and_replica_concurrent_archiving (self ):
330+ """make node 'master 'with archiving, take archive backup and turn it into replica, set replica with archiving, make archive backup from replica, make archive backup from master"""
331+ fname = self .id ().split ('.' )[3 ]
332+ backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
333+ master = self .make_simple_node (base_dir = "{0}/{1}/master" .format (module_name ,fname ),
334+ set_replication = True ,
335+ initdb_params = ['--data-checksums' ],
336+ pg_options = {'wal_level' :'replica' ,'max_wal_senders' :'2' ,'checkpoint_timeout' :'30s' }
337+ )
338+ replica = self .make_simple_node (base_dir = "{0}/{1}/replica" .format (module_name ,fname ))
339+ replica .cleanup ()
340+
341+ self .init_pb (backup_dir )
342+ # ADD INSTANCE 'MASTER'
343+ self .add_instance (backup_dir ,'master' ,master )
344+ self .set_archiving (backup_dir ,'master' ,master )
345+ master .start ()
346+
347+ master .psql (
348+ "postgres" ,
349+ "create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i" )
350+
351+ # TAKE FULL ARCHIVE BACKUP FROM MASTER
352+ self .backup_node (backup_dir ,'master' ,master )
353+ # GET LOGICAL CONTENT FROM MASTER
354+ before = master .safe_psql ("postgres" ,"SELECT * FROM t_heap" )
355+ # GET PHYSICAL CONTENT FROM MASTER
356+ pgdata_master = self .pgdata_content (master .data_dir )
357+
358+ # Settings for Replica
359+ self .restore_node (backup_dir ,'master' ,replica )
360+ # CHECK PHYSICAL CORRECTNESS on REPLICA
361+ pgdata_replica = self .pgdata_content (replica .data_dir )
362+ self .compare_pgdata (pgdata_master ,pgdata_replica )
363+
364+ self .set_replica (master ,replica ,synchronous = True )
365+ # ADD INSTANCE REPLICA
366+ self .add_instance (backup_dir ,'replica' ,replica )
367+ # SET ARCHIVING FOR REPLICA
368+ self .set_archiving (backup_dir ,'replica' ,replica ,replica = True )
369+ replica .start ()
370+
371+ # CHECK LOGICAL CORRECTNESS on REPLICA
372+ after = replica .safe_psql ("postgres" ,"SELECT * FROM t_heap" )
373+ self .assertEqual (before ,after )
374+
375+ # TAKE FULL ARCHIVE BACKUP FROM REPLICA
376+ backup_id = self .backup_node (backup_dir ,'replica' ,replica ,options = ['--archive-timeout=30' ,
377+ '--master-host=localhost' ,'--master-db=postgres' ,'--master-port={0}' .format (master .port )])
378+ self .validate_pb (backup_dir ,'replica' )
379+ self .assertEqual ('OK' ,self .show_pb (backup_dir ,'replica' ,backup_id )['status' ])
380+
381+ # TAKE FULL ARCHIVE BACKUP FROM MASTER
382+ backup_id = self .backup_node (backup_dir ,'master' ,master )
383+ self .validate_pb (backup_dir ,'master' )
384+ self .assertEqual ('OK' ,self .show_pb (backup_dir ,'master' ,backup_id )['status' ])