@@ -675,6 +675,34 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid)
675
675
/*
676
676
* Mark modified block in ptrack_map.
677
677
*/
678
+ static void swap_slots (size_t * slot1 ,size_t * slot2 ) {
679
+ * slot1 ^=* slot2 ;
680
+ * slot2 = * slot1 ^* slot2 ;
681
+ * slot1 = * slot1 ^* slot2 ;
682
+ }
683
+
684
+ static void
685
+ ptrack_mark_map_pair (size_t slot1 ,size_t slot2 ,XLogRecPtr new_lsn )
686
+ {
687
+ /*
688
+ * We use pg_atomic_uint64 here only for alignment purposes, because
689
+ * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
690
+ */
691
+ pg_atomic_uint64 old_lsn ;
692
+
693
+ /* Atomically assign new LSN value to the first slot */
694
+ old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot1 ]);
695
+ elog (DEBUG3 ,"ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT ,slot1 ,old_lsn .value ,new_lsn );
696
+ while (old_lsn .value < new_lsn &&
697
+ !pg_atomic_compare_exchange_u64 (& ptrack_map -> entries [slot1 ], (uint64 * )& old_lsn .value ,new_lsn ));
698
+
699
+ /* And to the second */
700
+ old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot2 ]);
701
+ elog (DEBUG3 ,"ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT ,slot2 ,old_lsn .value ,new_lsn );
702
+ while (old_lsn .value < new_lsn &&
703
+ !pg_atomic_compare_exchange_u64 (& ptrack_map -> entries [slot2 ], (uint64 * )& old_lsn .value ,new_lsn ));
704
+ }
705
+
678
706
void
679
707
ptrack_mark_block (RelFileNodeBackend smgr_rnode ,
680
708
ForkNumber forknum ,BlockNumber blocknum )
@@ -683,12 +711,13 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
683
711
uint64 hash ;
684
712
size_t slot1 ;
685
713
size_t slot2 ;
714
+ size_t max_lsn_slot1 ;
715
+ size_t max_lsn_slot2 ;
686
716
XLogRecPtr new_lsn ;
687
717
/*
688
718
* We use pg_atomic_uint64 here only for alignment purposes, because
689
719
* pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
690
720
*/
691
- pg_atomic_uint64 old_lsn ;
692
721
pg_atomic_uint64 old_init_lsn ;
693
722
694
723
if (ptrack_map_size == 0
@@ -705,6 +734,14 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
705
734
slot1 = (size_t )(hash %PtrackContentNblocks );
706
735
slot2 = (size_t )(((hash <<32 ) | (hash >>32 )) %PtrackContentNblocks );
707
736
737
+ bid .blocknum = InvalidBlockNumber ;
738
+ hash = BID_HASH_FUNC (bid );
739
+ max_lsn_slot1 = (size_t )(hash %PtrackContentNblocks );
740
+ max_lsn_slot2 = max_lsn_slot1 + 1 ;
741
+
742
+ if (max_lsn_slot2 < max_lsn_slot1 )
743
+ swap_slots (& max_lsn_slot1 ,& max_lsn_slot2 );
744
+
708
745
if (RecoveryInProgress ())
709
746
new_lsn = GetXLogReplayRecPtr (NULL );
710
747
else
@@ -720,15 +757,35 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
720
757
!pg_atomic_compare_exchange_u64 (& ptrack_map -> init_lsn , (uint64 * )& old_init_lsn .value ,new_lsn ));
721
758
}
722
759
723
- /* Atomically assign new LSN value to thefirst slot */
724
- old_lsn . value = pg_atomic_read_u64 ( & ptrack_map -> entries [ slot1 ] );
725
- elog ( DEBUG3 , "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT , slot1 , old_lsn . value , new_lsn );
726
- while ( old_lsn . value < new_lsn &&
727
- ! pg_atomic_compare_exchange_u64 ( & ptrack_map -> entries [ slot1 ], ( uint64 * ) & old_lsn . value , new_lsn ));
760
+ // mark thepage
761
+ ptrack_mark_map_pair ( slot1 , slot2 , new_lsn );
762
+ // mark the file (new LSN is always valid maximum LSN)
763
+ ptrack_mark_map_pair ( max_lsn_slot1 , max_lsn_slot2 , new_lsn );
764
+ }
728
765
729
- /* And to the second */
730
- old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot2 ]);
731
- elog (DEBUG3 ,"ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT ,slot2 ,old_lsn .value ,new_lsn );
732
- while (old_lsn .value < new_lsn &&
733
- !pg_atomic_compare_exchange_u64 (& ptrack_map -> entries [slot2 ], (uint64 * )& old_lsn .value ,new_lsn ));
766
+ XLogRecPtr ptrack_read_file_maxlsn (RelFileNode rnode ,ForkNumber forknum )
767
+ {
768
+ PtBlockId bid ;
769
+ uint64 hash ;
770
+ size_t slot1 ;
771
+ size_t slot2 ;
772
+ XLogRecPtr update_lsn1 ;
773
+ XLogRecPtr update_lsn2 ;
774
+
775
+ bid .relnode = rnode ;
776
+ bid .forknum = forknum ;
777
+ bid .blocknum = InvalidBlockNumber ;
778
+
779
+ hash = BID_HASH_FUNC (bid );
780
+
781
+ slot1 = (size_t )(hash %PtrackContentNblocks );
782
+ slot2 = slot1 + 1 ;
783
+
784
+ if (slot2 < slot1 )
785
+ swap_slots (& slot1 ,& slot2 );
786
+
787
+ update_lsn1 = pg_atomic_read_u64 (& ptrack_map -> entries [slot1 ]);
788
+ update_lsn2 = pg_atomic_read_u64 (& ptrack_map -> entries [slot2 ]);
789
+
790
+ return update_lsn1 == update_lsn2 ?update_lsn1 :InvalidXLogRecPtr ;
734
791
}