|
10 | 10 |
|
11 | 11 | classFalsePositive(ProbackupTest,unittest.TestCase): |
12 | 12 |
|
13 | | -# @unittest.skip("skip") |
14 | | -# @unittest.expectedFailure |
15 | | -deftest_pgpro561(self): |
16 | | -""" |
17 | | - make node with archiving, make stream backup, restore it to node1, |
18 | | - check that archiving is not successful on node1 |
19 | | - """ |
20 | | -fname=self.id().split('.')[3] |
21 | | -node1=self.make_simple_node(base_dir="{0}/{1}/node1".format(module_name,fname), |
22 | | -set_replication=True, |
23 | | -initdb_params=['--data-checksums'], |
24 | | -pg_options={'wal_level':'replica','max_wal_senders':'2'} |
25 | | - ) |
26 | | -backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
27 | | -self.init_pb(backup_dir) |
28 | | -self.add_instance(backup_dir,'node1',node1) |
29 | | -self.set_archiving(backup_dir,'node1',node1) |
30 | | -node1.start() |
31 | | - |
32 | | -backup_id=self.backup_node(backup_dir,'node1',node1,options=["--stream"]) |
33 | | - |
34 | | -node2=self.make_simple_node(base_dir="{0}/{1}/node2".format(module_name,fname)) |
35 | | -node2.cleanup() |
36 | | - |
37 | | -node1.psql( |
38 | | -"postgres", |
39 | | -"create table t_heap as select i as id, md5(i::text) as text, md5(repeat(i::text,10))::tsvector as tsvector from generate_series(0,256) i") |
40 | | - |
41 | | -self.backup_node(backup_dir,'node1',node1,backup_type='page',options=["--stream"]) |
42 | | -self.restore_node(backup_dir,'node1',data_dir=node2.data_dir) |
43 | | -node2.append_conf('postgresql.auto.conf','port = {0}'.format(node2.port)) |
44 | | -node2.start({"-t":"600"}) |
45 | | - |
46 | | -timeline_node1=node1.get_control_data()["Latest checkpoint's TimeLineID"] |
47 | | -timeline_node2=node2.get_control_data()["Latest checkpoint's TimeLineID"] |
48 | | -self.assertEqual(timeline_node1,timeline_node2,"Timelines on Master and Node1 should be equal. This is unexpected") |
49 | | - |
50 | | -archive_command_node1=node1.safe_psql("postgres","show archive_command") |
51 | | -archive_command_node2=node2.safe_psql("postgres","show archive_command") |
52 | | -self.assertEqual(archive_command_node1,archive_command_node2,"Archive command on Master and Node should be equal. This is unexpected") |
53 | | - |
54 | | -result=node2.safe_psql("postgres","select last_failed_wal from pg_stat_get_archiver() where last_failed_wal is not NULL") |
55 | | -# self.assertEqual(res, six.b(""), 'Restored Node1 failed to archive segment {0} due to having the same archive command as Master'.format(res.rstrip())) |
56 | | -ifresult=="": |
57 | | -self.assertEqual(1,0,'Error is expected due to Master and Node1 having the common archive and archive_command') |
58 | | - |
59 | | -# Clean after yourself |
60 | | -self.del_test_dir(module_name,fname) |
61 | | - |
62 | | -# @unittest.skip("skip") |
63 | | -defpgpro688(self): |
64 | | -"""make node with archiving, make backup, get Recovery Time, validate to Recovery Time. Waiting PGPRO-688. RESOLVED""" |
65 | | -fname=self.id().split('.')[3] |
66 | | -node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname), |
67 | | -set_replication=True, |
68 | | -initdb_params=['--data-checksums'], |
69 | | -pg_options={'wal_level':'replica','max_wal_senders':'2'} |
70 | | - ) |
71 | | -backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
72 | | -self.init_pb(backup_dir) |
73 | | -self.add_instance(backup_dir,'node',node) |
74 | | -self.set_archiving(backup_dir,'node',node) |
75 | | -node.start() |
76 | | - |
77 | | -backup_id=self.backup_node(backup_dir,'node',node) |
78 | | -recovery_time=self.show_pb(backup_dir,'node',backup_id)['recovery-time'] |
79 | | - |
80 | | -# Uncommenting this section will make this test True Positive |
81 | | -#node.safe_psql("postgres", "select pg_create_restore_point('123')") |
82 | | -#node.safe_psql("postgres", "select txid_current()") |
83 | | -#node.safe_psql("postgres", "select pg_switch_xlog()") |
84 | | -#### |
85 | | - |
86 | | -#try: |
87 | | -self.validate_pb(backup_dir,'node',options=["--time='{0}'".format(recovery_time)]) |
88 | | -# we should die here because exception is what we expect to happen |
89 | | -# self.assertEqual(1, 0, "Expecting Error because it should not be possible safely validate 'Recovery Time' without wal record with timestamp.\n Output: {0} \n CMD: {1}".format( |
90 | | -# repr(self.output), self.cmd)) |
91 | | -# except ProbackupException as e: |
92 | | -# self.assertTrue('WARNING: recovery can be done up to time {0}'.format(recovery_time) in e.message, |
93 | | -# '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) |
94 | | - |
95 | | -# Clean after yourself |
96 | | -self.del_test_dir(module_name,fname) |
97 | | - |
98 | | -# @unittest.skip("skip") |
99 | | -defpgpro702_688(self): |
100 | | -"""make node without archiving, make stream backup, get Recovery Time, validate to Recovery Time""" |
101 | | -fname=self.id().split('.')[3] |
102 | | -node=self.make_simple_node(base_dir="{0}/{1}/node".format(module_name,fname), |
103 | | -set_replication=True, |
104 | | -initdb_params=['--data-checksums'], |
105 | | -pg_options={'wal_level':'replica','max_wal_senders':'2'} |
106 | | - ) |
107 | | -backup_dir=os.path.join(self.tmp_path,module_name,fname,'backup') |
108 | | -self.init_pb(backup_dir) |
109 | | -self.add_instance(backup_dir,'node',node) |
110 | | -node.start() |
111 | | - |
112 | | -backup_id=self.backup_node(backup_dir,'node',node,options=["--stream"]) |
113 | | -recovery_time=self.show_pb(backup_dir,'node',backup_id)['recovery-time'] |
114 | | - |
115 | | -self.assertIn(six.b("INFO: backup validation completed successfully on"), |
116 | | -self.validate_pb(backup_dir,'node',node,options=["--time='{0}'".format(recovery_time)])) |
117 | | - |
118 | | -# Clean after yourself |
119 | | -self.del_test_dir(module_name,fname) |
120 | | - |
121 | 13 | # @unittest.skip("skip") |
122 | 14 | @unittest.expectedFailure |
123 | 15 | deftest_validate_wal_lost_segment(self): |
|