@@ -593,7 +593,7 @@ def test_archive_push_partial_file_exists(self):
593593self .del_test_dir (module_name ,fname )
594594
595595# @unittest.skip("skip")
596- def test_archive_push_partial_file_exists_not_stale (self ):
596+ def test_archive_push_part_file_exists_not_stale (self ):
597597"""Archive-push if .part file exists and it is not stale"""
598598fname = self .id ().split ('.' )[3 ]
599599backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
@@ -896,8 +896,8 @@ def test_basic_master_and_replica_concurrent_archiving(self):
896896"""
897897 make node 'master 'with archiving,
898898 take archive backup and turn it into replica,
899- set replica with archiving, make archive backup from replica,
900- makearchive backup from master
899+ set replica with archiving,
900+ makesure that archiving on both node is working.
901901 """
902902fname = self .id ().split ('.' )[3 ]
903903backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
@@ -959,13 +959,7 @@ def test_basic_master_and_replica_concurrent_archiving(self):
959959"from generate_series(0,10000) i" )
960960
961961# TAKE FULL ARCHIVE BACKUP FROM REPLICA
962- backup_id = self .backup_node (
963- backup_dir ,'master' ,replica ,
964- options = [
965- '--archive-timeout=30' ,
966- '--master-host=localhost' ,
967- '--master-db=postgres' ,
968- '--master-port={0}' .format (master .port )])
962+ backup_id = self .backup_node (backup_dir ,'master' ,replica )
969963
970964self .validate_pb (backup_dir ,'master' )
971965self .assertEqual (
@@ -977,7 +971,7 @@ def test_basic_master_and_replica_concurrent_archiving(self):
977971self .assertEqual (
978972'OK' ,self .show_pb (backup_dir ,'master' ,backup_id )['status' ])
979973
980- master .pgbench_init (scale = 50 )
974+ master .pgbench_init (scale = 10 )
981975
982976sleep (10 )
983977
@@ -986,8 +980,8 @@ def test_basic_master_and_replica_concurrent_archiving(self):
986980master .pgbench_init (scale = 10 )
987981replica .pgbench_init (scale = 10 )
988982
989-
990- exit ( 1 )
983+ self . backup_node ( backup_dir , 'master' , master )
984+ self . backup_node ( backup_dir , 'master' , replica )
991985
992986# Clean after yourself
993987self .del_test_dir (module_name ,fname )
@@ -1472,6 +1466,10 @@ def test_archive_catalog_1(self):
14721466"""
14731467 double segment - compressed and not
14741468 """
1469+ if not self .archive_compress :
1470+ return self .fail (
1471+ 'You need to enable ARCHIVE_COMPRESSION for this test to run' )
1472+
14751473fname = self .id ().split ('.' )[3 ]
14761474backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
14771475node = self .make_simple_node (
@@ -1524,6 +1522,10 @@ def test_archive_catalog_2(self):
15241522"""
15251523 double segment - compressed and not
15261524 """
1525+ if not self .archive_compress :
1526+ return self .fail (
1527+ 'You need to enable ARCHIVE_COMPRESSION for this test to run' )
1528+
15271529fname = self .id ().split ('.' )[3 ]
15281530backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
15291531node = self .make_simple_node (
@@ -2335,6 +2337,129 @@ def test_archive_get_prefetch_corruption(self):
23352337# Clean after yourself
23362338self .del_test_dir (module_name ,fname )
23372339
2340+ # @unittest.skip("skip")
2341+ def test_archive_show_partial_files_handling (self ):
2342+ """
2343+ check that files with '.part', '.part.gz', '.partial' and '.partial.gz'
2344+ siffixes are handled correctly
2345+ """
2346+ fname = self .id ().split ('.' )[3 ]
2347+ backup_dir = os .path .join (self .tmp_path ,module_name ,fname ,'backup' )
2348+ node = self .make_simple_node (
2349+ base_dir = os .path .join (module_name ,fname ,'node' ),
2350+ set_replication = True ,
2351+ initdb_params = ['--data-checksums' ])
2352+
2353+ self .init_pb (backup_dir )
2354+ self .add_instance (backup_dir ,'node' ,node )
2355+ self .set_archiving (backup_dir ,'node' ,node ,compress = False )
2356+
2357+ node .slow_start ()
2358+
2359+ self .backup_node (backup_dir ,'node' ,node )
2360+
2361+ wals_dir = os .path .join (backup_dir ,'wal' ,'node' )
2362+
2363+ # .part file
2364+ node .safe_psql (
2365+ "postgres" ,
2366+ "create table t1()" )
2367+
2368+ if self .get_version (node )< 100000 :
2369+ filename = node .safe_psql (
2370+ "postgres" ,
2371+ "SELECT file_name "
2372+ "FROM pg_xlogfile_name_offset(pg_current_xlog_location())" ).rstrip ()
2373+ else :
2374+ filename = node .safe_psql (
2375+ "postgres" ,
2376+ "SELECT file_name "
2377+ "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())" ).rstrip ()
2378+
2379+ self .switch_wal_segment (node )
2380+
2381+ os .rename (
2382+ os .path .join (wals_dir ,filename ),
2383+ os .path .join (wals_dir ,'{0}.part' .format (filename )))
2384+
2385+ # .gz.part file
2386+ node .safe_psql (
2387+ "postgres" ,
2388+ "create table t2()" )
2389+
2390+ if self .get_version (node )< 100000 :
2391+ filename = node .safe_psql (
2392+ "postgres" ,
2393+ "SELECT file_name "
2394+ "FROM pg_xlogfile_name_offset(pg_current_xlog_location())" ).rstrip ()
2395+ else :
2396+ filename = node .safe_psql (
2397+ "postgres" ,
2398+ "SELECT file_name "
2399+ "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())" ).rstrip ()
2400+
2401+ self .switch_wal_segment (node )
2402+
2403+ os .rename (
2404+ os .path .join (wals_dir ,filename ),
2405+ os .path .join (wals_dir ,'{0}.gz.part' .format (filename )))
2406+
2407+ # .partial file
2408+ node .safe_psql (
2409+ "postgres" ,
2410+ "create table t3()" )
2411+
2412+ if self .get_version (node )< 100000 :
2413+ filename = node .safe_psql (
2414+ "postgres" ,
2415+ "SELECT file_name "
2416+ "FROM pg_xlogfile_name_offset(pg_current_xlog_location())" ).rstrip ()
2417+ else :
2418+ filename = node .safe_psql (
2419+ "postgres" ,
2420+ "SELECT file_name "
2421+ "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())" ).rstrip ()
2422+
2423+ self .switch_wal_segment (node )
2424+
2425+ os .rename (
2426+ os .path .join (wals_dir ,filename ),
2427+ os .path .join (wals_dir ,'{0}.partial' .format (filename )))
2428+
2429+ # .gz.partial file
2430+ node .safe_psql (
2431+ "postgres" ,
2432+ "create table t4()" )
2433+
2434+ if self .get_version (node )< 100000 :
2435+ filename = node .safe_psql (
2436+ "postgres" ,
2437+ "SELECT file_name "
2438+ "FROM pg_xlogfile_name_offset(pg_current_xlog_location())" ).rstrip ()
2439+ else :
2440+ filename = node .safe_psql (
2441+ "postgres" ,
2442+ "SELECT file_name "
2443+ "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())" ).rstrip ()
2444+
2445+ self .switch_wal_segment (node )
2446+
2447+ os .rename (
2448+ os .path .join (wals_dir ,filename ),
2449+ os .path .join (wals_dir ,'{0}.gz.partial' .format (filename )))
2450+
2451+ self .show_archive (backup_dir ,'node' ,options = ['--log-level-file=VERBOSE' ])
2452+
2453+ with open (os .path .join (backup_dir ,'log' ,'pg_probackup.log' ),'r' )as f :
2454+ log_content = f .read ()
2455+
2456+ self .assertNotIn (
2457+ 'WARNING' ,
2458+ log_content )
2459+
2460+ # Clean after yourself
2461+ self .del_test_dir (module_name ,fname )
2462+
23382463# TODO test with multiple not archived segments.
23392464# TODO corrupted file in archive.
23402465