@@ -686,6 +686,35 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid)
686
686
FreeDir (dir );/* we ignore any error here */
687
687
}
688
688
689
+ /*
690
+ * Get a second position within ptrack map so that it fits
691
+ * within the same cache line.
692
+ */
693
+ size_t
694
+ get_slot2 (size_t slot1 ,uint64 hash ) {
695
+ size_t cache_line_ep ;// ending point of a cache line
696
+ size_t cache_line_sp ;// starting point of a cache line
697
+ size_t cache_line_interval ;
698
+ size_t slot2 ;
699
+
700
+ /* Get the ending point of a cache line within entries[]. */
701
+ cache_line_ep = (CACHE_LINE_ALIGN (offsetof(PtrackMapHdr ,entries )+ slot1 * sizeof (XLogRecPtr ))
702
+ - offsetof(PtrackMapHdr ,entries )) /sizeof (XLogRecPtr );
703
+ /* handling an overflow beyond the entries boundary */
704
+ cache_line_ep = cache_line_ep > PtrackContentNblocks ?PtrackContentNblocks :cache_line_ep ;
705
+
706
+ /* Get the starting point of a cache line within entries[]. */
707
+ cache_line_sp = cache_line_ep - ENTRIES_PER_LINE ;
708
+
709
+ /* Handling overflow below zero (sp then must be larger than ep) */
710
+ cache_line_sp = cache_line_sp > cache_line_ep ?0 :cache_line_sp ;
711
+
712
+ cache_line_interval = cache_line_ep - cache_line_sp ;
713
+ slot2 = (size_t )(cache_line_sp + (((hash <<32 ) | (hash >>32 )) %cache_line_interval ));
714
+ slot2 = (slot1 == slot2 ) ? ((slot1 + 1 ) %cache_line_interval ) :slot2 ;
715
+ return slot2 ;
716
+ }
717
+
689
718
/*
690
719
* Mark modified block in ptrack_map.
691
720
*/
@@ -738,7 +767,6 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
738
767
* We use pg_atomic_uint64 here only for alignment purposes, because
739
768
* pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
740
769
*/
741
- pg_atomic_uint32 old_lsn ;
742
770
pg_atomic_uint32 old_init_lsn ;
743
771
744
772
if (ptrack_map_size == 0
@@ -753,7 +781,7 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
753
781
754
782
hash = BID_HASH_FUNC (bid );
755
783
slot1 = (size_t )(hash %PtrackContentNblocks );
756
- slot2 = ( size_t )((( hash << 32 ) | ( hash >> 32 )) % PtrackContentNblocks );
784
+ slot2 = get_slot2 ( slot1 , hash );
757
785
758
786
bid .blocknum = InvalidBlockNumber ;
759
787
hash = BID_HASH_FUNC (bid );
@@ -807,8 +835,8 @@ XLogRecPtr ptrack_read_file_maxlsn(RelFileNode rnode, ForkNumber forknum)
807
835
if (slot2 < slot1 )
808
836
swap_slots (& slot1 ,& slot2 );
809
837
810
- update_lsn1 = pg_atomic_read_u64 (& ptrack_map -> entries [slot1 ]);
811
- update_lsn2 = pg_atomic_read_u64 (& ptrack_map -> entries [slot2 ]);
838
+ update_lsn1 = pg_atomic_read_u32 (& ptrack_map -> entries [slot1 ]);
839
+ update_lsn2 = pg_atomic_read_u32 (& ptrack_map -> entries [slot2 ]);
812
840
813
841
return update_lsn1 == update_lsn2 ?update_lsn1 :InvalidXLogRecPtr ;
814
842
}