|
| 1 | +importos |
| 2 | +importunittest |
| 3 | +from .helpers.ptrack_helpersimportProbackupTest,ProbackupException,archive_script |
| 4 | +fromdatetimeimportdatetime,timedelta |
| 5 | +importsubprocess |
| 6 | +fromsysimportexit |
| 7 | +fromtimeimportsleep |
| 8 | + |
| 9 | + |
| 10 | +module_name='archive' |
| 11 | + |
| 12 | + |
| 13 | +classArchiveTest(ProbackupTest,unittest.TestCase): |
| 14 | + |
| 15 | +# @unittest.expectedFailure |
| 16 | +# @unittest.skip("skip") |
| 17 | +deftest_pgpro434_1(self): |
| 18 | +"""Description in jira issue PGPRO-434""" |
| 19 | +fname=self.id().split('.')[3] |
| 20 | +backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
| 21 | +node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname), |
| 22 | +set_replication=True, |
| 23 | +initdb_params=['--data-checksums'], |
| 24 | +pg_options={'wal_level':'replica','max_wal_senders':'2','checkpoint_timeout':'30s'} |
| 25 | + ) |
| 26 | +self.init_pb(backup_dir) |
| 27 | +self.add_instance(backup_dir,'node',node) |
| 28 | +self.set_archiving(backup_dir,'node',node) |
| 29 | +# force more frequent wal switch |
| 30 | +node.append_conf('postgresql.auto.conf','archive_timeout = 30') |
| 31 | +node.start() |
| 32 | + |
| 33 | +node.safe_psql( |
| 34 | +"postgres", |
| 35 | +"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i") |
| 36 | + |
| 37 | +result=node.safe_psql("postgres","SELECT * FROM t_heap") |
| 38 | +self.backup_node(backup_dir,'node',node) |
| 39 | +node.cleanup() |
| 40 | + |
| 41 | +self.restore_node(backup_dir,'node',node) |
| 42 | +node.start() |
| 43 | + |
| 44 | +# Recreate backup calagoue |
| 45 | +self.init_pb(backup_dir) |
| 46 | +self.add_instance(backup_dir,'node',node) |
| 47 | + |
| 48 | +# Make backup |
| 49 | +self.backup_node(backup_dir,'node',node) |
| 50 | +node.cleanup() |
| 51 | + |
| 52 | +# Restore Database |
| 53 | +self.restore_node(backup_dir,'node',node) |
| 54 | +node.start() |
| 55 | + |
| 56 | +self.assertEqual(result,node.safe_psql("postgres","SELECT * FROM t_heap"), |
| 57 | +'data after restore not equal to original data') |
| 58 | +# Clean after yourself |
| 59 | +self.del_test_dir(module_name,fname) |
| 60 | + |
| 61 | +# @unittest.skip("skip") |
| 62 | +deftest_pgpro434_2(self): |
| 63 | +"""Check that timelines are correct""" |
| 64 | +fname=self.id().split('.')[3] |
| 65 | +backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
| 66 | +node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname), |
| 67 | +set_replication=True, |
| 68 | +initdb_params=['--data-checksums'], |
| 69 | +pg_options={'wal_level':'replica','max_wal_senders':'2','checkpoint_timeout':'30s'} |
| 70 | + ) |
| 71 | +self.init_pb(backup_dir) |
| 72 | +self.add_instance(backup_dir,'node',node) |
| 73 | +self.set_archiving(backup_dir,'node',node) |
| 74 | +node.start() |
| 75 | + |
| 76 | +# FIRST TIMELINE |
| 77 | +node.safe_psql( |
| 78 | +"postgres", |
| 79 | +"create table t_heap as select 1 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100) i") |
| 80 | +backup_id=self.backup_node(backup_dir,'node',node) |
| 81 | +recovery_time=self.show_pb(backup_dir,'node',backup_id)["recovery-time"] |
| 82 | +node.safe_psql( |
| 83 | +"postgres", |
| 84 | +"insert into t_heap select 100501 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") |
| 85 | + |
| 86 | +# SECOND TIMELIN |
| 87 | +node.cleanup() |
| 88 | +self.restore_node(backup_dir,'node',node,options=["--time={0}".format(recovery_time)]) |
| 89 | +node.start() |
| 90 | +ifself.verbose: |
| 91 | +print('Second timeline') |
| 92 | +print(node.safe_psql("postgres","select redo_wal_file from pg_control_checkpoint()")) |
| 93 | +node.safe_psql( |
| 94 | +"postgres", |
| 95 | +"insert into t_heap select 2 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(100,200) i") |
| 96 | +backup_id=self.backup_node(backup_dir,'node',node) |
| 97 | +recovery_time=self.show_pb(backup_dir,'node',backup_id)["recovery-time"] |
| 98 | +node.safe_psql( |
| 99 | +"postgres", |
| 100 | +"insert into t_heap select 100502 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") |
| 101 | + |
| 102 | +# THIRD TIMELINE |
| 103 | +node.cleanup() |
| 104 | +self.restore_node(backup_dir,'node',node,options=["--time={0}".format(recovery_time)]) |
| 105 | +node.start() |
| 106 | +ifself.verbose: |
| 107 | +print('third timeline') |
| 108 | +print(node.safe_psql("postgres","select redo_wal_file from pg_control_checkpoint()")) |
| 109 | +node.safe_psql( |
| 110 | +"postgres", |
| 111 | +"insert into t_heap select 3 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(200,300) i") |
| 112 | +backup_id=self.backup_node(backup_dir,'node',node) |
| 113 | +recovery_time=self.show_pb(backup_dir,'node',backup_id)["recovery-time"] |
| 114 | +result=node.safe_psql("postgres","SELECT * FROM t_heap") |
| 115 | +node.safe_psql( |
| 116 | +"postgres", |
| 117 | +"insert into t_heap select 100503 as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") |
| 118 | + |
| 119 | +# FOURTH TIMELINE |
| 120 | +node.cleanup() |
| 121 | +self.restore_node(backup_dir,'node',node,options=["--time={0}".format(recovery_time)]) |
| 122 | +node.start() |
| 123 | +ifself.verbose: |
| 124 | +print('Fourth timeline') |
| 125 | +print(node.safe_psql("postgres","select redo_wal_file from pg_control_checkpoint()")) |
| 126 | + |
| 127 | +# FIFTH TIMELINE |
| 128 | +node.cleanup() |
| 129 | +self.restore_node(backup_dir,'node',node,options=["--time={0}".format(recovery_time)]) |
| 130 | +node.start() |
| 131 | +ifself.verbose: |
| 132 | +print('Fifth timeline') |
| 133 | +print(node.safe_psql("postgres","select redo_wal_file from pg_control_checkpoint()")) |
| 134 | + |
| 135 | +# SIXTH TIMELINE |
| 136 | +node.cleanup() |
| 137 | +self.restore_node(backup_dir,'node',node,options=["--time={0}".format(recovery_time)]) |
| 138 | +node.start() |
| 139 | +ifself.verbose: |
| 140 | +print('Sixth timeline') |
| 141 | +print(node.safe_psql("postgres","select redo_wal_file from pg_control_checkpoint()")) |
| 142 | + |
| 143 | +self.assertFalse(node.execute("postgres","select exists(select 1 from t_heap where id > 100500)")[0][0], |
| 144 | +'data after restore not equal to original data') |
| 145 | + |
| 146 | +self.assertEqual(result,node.safe_psql("postgres","SELECT * FROM t_heap"), |
| 147 | +'data after restore not equal to original data') |
| 148 | + |
| 149 | +# Clean after yourself |
| 150 | +# self.del_test_dir(module_name, fname) |
| 151 | + |
| 152 | +# @unittest.skip("skip") |
| 153 | +deftest_pgpro434_3(self): |
| 154 | +"""Check pg_stop_backup_timeout""" |
| 155 | +fname=self.id().split('.')[3] |
| 156 | +backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
| 157 | +node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname), |
| 158 | +set_replication=True, |
| 159 | +initdb_params=['--data-checksums'], |
| 160 | +pg_options={'wal_level':'replica','max_wal_senders':'2','checkpoint_timeout':'30s'} |
| 161 | + ) |
| 162 | +self.init_pb(backup_dir) |
| 163 | +self.add_instance(backup_dir,'node',node) |
| 164 | +self.set_archiving(backup_dir,'node',node) |
| 165 | + |
| 166 | +archive_script=os.path.join(backup_dir,'archive_script.sh') |
| 167 | +withopen(archive_script,'w+')asf: |
| 168 | +f.write(archive_script.format(backup_dir=backup_dir,node_name='node',count_limit=2)) |
| 169 | + |
| 170 | +st=os.stat(archive_script) |
| 171 | +os.chmod(archive_script,st.st_mode|0o111) |
| 172 | +node.append_conf('postgresql.auto.conf',"archive_command = '{0} %p %f'".format(archive_script)) |
| 173 | +node.start() |
| 174 | +try: |
| 175 | +self.backup_node(backup_dir,'node',node,options=["--stream"]) |
| 176 | +# we should die here because exception is what we expect to happen |
| 177 | +self.assertEqual(1,0,"Expecting Error because pg_stop_backup failed to answer.\n Output: {0}\n CMD: {1}".format( |
| 178 | +repr(self.output),self.cmd)) |
| 179 | +exceptProbackupExceptionase: |
| 180 | +self.assertTrue("ERROR: pg_stop_backup doesn't answer"ine.message |
| 181 | +and"cancel it"ine.message, |
| 182 | +'\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message),self.cmd)) |
| 183 | + |
| 184 | +# Clean after yourself |
| 185 | +self.del_test_dir(module_name,fname) |
| 186 | + |
| 187 | +# @unittest.skip("skip") |
| 188 | +deftest_arhive_push_file_exists(self): |
| 189 | +"""Archive-push if file exists""" |
| 190 | +fname=self.id().split('.')[3] |
| 191 | +backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
| 192 | +node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname), |
| 193 | +set_replication=True, |
| 194 | +initdb_params=['--data-checksums'], |
| 195 | +pg_options={'wal_level':'replica','max_wal_senders':'2','checkpoint_timeout':'30s','archive_timeout':'1'} |
| 196 | + ) |
| 197 | +self.init_pb(backup_dir) |
| 198 | +self.add_instance(backup_dir,'node',node) |
| 199 | +self.set_archiving(backup_dir,'node',node) |
| 200 | + |
| 201 | +wals_dir=os.path.join(backup_dir,'wal','node') |
| 202 | +file=os.path.join(wals_dir,'000000010000000000000001') |
| 203 | +withopen(file,'a')asf: |
| 204 | +pass |
| 205 | +node.start() |
| 206 | +node.safe_psql( |
| 207 | +"postgres", |
| 208 | +"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,100500) i") |
| 209 | +log_file=os.path.join(node.logs_dir,'postgresql.log') |
| 210 | +withopen(log_file,'r')asf: |
| 211 | +log_content=f.read() |
| 212 | +self.assertTrue('LOG: archive command failed with exit code 1'inlog_content |
| 213 | +and'DETAIL: The failed archive command was:'inlog_content |
| 214 | +and'INFO: pg_probackup archive-push from'inlog_content |
| 215 | +and"ERROR: file '{0}', already exists.".format(file)inlog_content, |
| 216 | +'Expecting error messages about failed archive_command' |
| 217 | + ) |
| 218 | +self.assertFalse('pg_probackup archive-push completed successfully'inlog_content) |
| 219 | + |
| 220 | +os.remove(file) |
| 221 | +sleep(5) |
| 222 | +node.safe_psql('postgres','select pg_switch_xlog()') |
| 223 | + |
| 224 | +withopen(log_file,'r')asf: |
| 225 | +log_content=f.read() |
| 226 | +self.assertTrue('pg_probackup archive-push completed successfully'inlog_content, |
| 227 | +'Expecting messages about successfull execution archive_command') |
| 228 | + |
| 229 | +# Clean after yourself |
| 230 | +self.del_test_dir(module_name,fname) |
| 231 | + |
| 232 | +@unittest.expectedFailure |
| 233 | +deftest_replica_archive(self): |
| 234 | +"""make node withput archiving, take stream backup and turn it into replica, set replica with archiving, make archive backup from replica""" |
| 235 | +fname=self.id().split('.')[3] |
| 236 | +backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
| 237 | +master=self.make_simple_node(base_dir="{0}/{1}/master".format(module_name,fname), |
| 238 | +set_replication=True, |
| 239 | +initdb_params=['--data-checksums'], |
| 240 | +pg_options={'wal_level':'replica','max_wal_senders':'2','checkpoint_timeout':'30s'} |
| 241 | + ) |
| 242 | +self.init_pb(backup_dir) |
| 243 | +self.add_instance(backup_dir,'master',master) |
| 244 | +# force more frequent wal switch |
| 245 | +master.start() |
| 246 | + |
| 247 | +replica=self.make_simple_node(base_dir="{0}/{1}/replica".format(module_name,fname)) |
| 248 | +replica.cleanup() |
| 249 | + |
| 250 | +master.psql( |
| 251 | +"postgres", |
| 252 | +"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") |
| 253 | + |
| 254 | +self.backup_node(backup_dir,'master',master,options=['--stream']) |
| 255 | +before=master.safe_psql("postgres","SELECT * FROM t_heap") |
| 256 | + |
| 257 | +# Settings for Replica |
| 258 | +self.restore_node(backup_dir,'master',replica) |
| 259 | +self.set_replica(master,replica) |
| 260 | +self.set_archiving(backup_dir,'replica',replica,replica=True) |
| 261 | +replica.start({"-t":"600"}) |
| 262 | + |
| 263 | +# Check data correctness on replica |
| 264 | +after=replica.safe_psql("postgres","SELECT * FROM t_heap") |
| 265 | +self.assertEqual(before,after) |
| 266 | + |
| 267 | +# Change data on master, take FULL backup from replica, restore taken backup and check that restored data equal to original data |
| 268 | +master.psql( |
| 269 | +"postgres", |
| 270 | +"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(256,512) i") |
| 271 | +before=master.safe_psql("postgres","SELECT * FROM t_heap") |
| 272 | +self.add_instance(backup_dir,'replica',replica) |
| 273 | +backup_id=self.backup_node(backup_dir,'replica',replica,options=['--archive-timeout=30', |
| 274 | +'--master-host=localhost','--master-db=postgres','--master-port={0}'.format(master.port)]) |
| 275 | +self.validate_pb(backup_dir,'replica') |
| 276 | +self.assertEqual('OK',self.show_pb(backup_dir,'replica',backup_id)['status']) |
| 277 | + |
| 278 | +# RESTORE FULL BACKUP TAKEN FROM replica |
| 279 | +node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname)) |
| 280 | +node.cleanup() |
| 281 | +self.restore_node(backup_dir,'replica',data_dir=node.data_dir) |
| 282 | +node.append_conf('postgresql.auto.conf','port = {0}'.format(node.port)) |
| 283 | +node.start({"-t":"600"}) |
| 284 | +# CHECK DATA CORRECTNESS |
| 285 | +after=node.safe_psql("postgres","SELECT * FROM t_heap") |
| 286 | +self.assertEqual(before,after) |
| 287 | + |
| 288 | +# Change data on master, make PAGE backup from replica, restore taken backup and check that restored data equal to original data |
| 289 | +master.psql( |
| 290 | +"postgres", |
| 291 | +"insert into t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(512,768) i") |
| 292 | +before=master.safe_psql("postgres","SELECT * FROM t_heap") |
| 293 | +backup_id=self.backup_node(backup_dir,'replica',replica,backup_type='page',options=['--archive-timeout=30', |
| 294 | +'--master-host=localhost','--master-db=postgres','--master-port={0}'.format(master.port)]) |
| 295 | +self.validate_pb(backup_dir,'replica') |
| 296 | +self.assertEqual('OK',self.show_pb(backup_dir,'replica',backup_id)['status']) |
| 297 | + |
| 298 | +# RESTORE PAGE BACKUP TAKEN FROM replica |
| 299 | +node.cleanup() |
| 300 | +self.restore_node(backup_dir,'replica',data_dir=node.data_dir,backup_id=backup_id) |
| 301 | +node.append_conf('postgresql.auto.conf','port = {0}'.format(node.port)) |
| 302 | +node.start({"-t":"600"}) |
| 303 | +# CHECK DATA CORRECTNESS |
| 304 | +after=node.safe_psql("postgres","SELECT * FROM t_heap") |
| 305 | +self.assertEqual(before,after) |
| 306 | + |
| 307 | +# Clean after yourself |
| 308 | +self.del_test_dir(module_name,fname) |