@@ -245,8 +245,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
245245if (!RelationUsesLocalBuffers (scan -> rs_base .rs_rd )&&
246246scan -> rs_nblocks > NBuffers /4 )
247247{
248- allow_strat = scan -> rs_base .rs_allow_strat ;
249- allow_sync = scan -> rs_base .rs_allow_sync ;
248+ allow_strat = ( scan -> rs_base .rs_flags & SO_ALLOW_STRAT ) != 0 ;
249+ allow_sync = ( scan -> rs_base .rs_flags & SO_ALLOW_SYNC ) != 0 ;
250250}
251251else
252252allow_strat = allow_sync = false;
@@ -267,7 +267,10 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
267267if (scan -> rs_base .rs_parallel != NULL )
268268{
269269/* For parallel scan, believe whatever ParallelTableScanDesc says. */
270- scan -> rs_base .rs_syncscan = scan -> rs_base .rs_parallel -> phs_syncscan ;
270+ if (scan -> rs_base .rs_parallel -> phs_syncscan )
271+ scan -> rs_base .rs_flags |=SO_ALLOW_SYNC ;
272+ else
273+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
271274}
272275else if (keep_startblock )
273276{
@@ -276,16 +279,19 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
276279 * so that rewinding a cursor doesn't generate surprising results.
277280 * Reset the active syncscan setting, though.
278281 */
279- scan -> rs_base .rs_syncscan = (allow_sync && synchronize_seqscans );
282+ if (allow_sync && synchronize_seqscans )
283+ scan -> rs_base .rs_flags |=SO_ALLOW_SYNC ;
284+ else
285+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
280286}
281287else if (allow_sync && synchronize_seqscans )
282288{
283- scan -> rs_base .rs_syncscan = true ;
289+ scan -> rs_base .rs_flags |= SO_ALLOW_SYNC ;
284290scan -> rs_startblock = ss_get_location (scan -> rs_base .rs_rd ,scan -> rs_nblocks );
285291}
286292else
287293{
288- scan -> rs_base .rs_syncscan = false ;
294+ scan -> rs_base .rs_flags &= ~ SO_ALLOW_SYNC ;
289295scan -> rs_startblock = 0 ;
290296}
291297
@@ -305,11 +311,11 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
305311memcpy (scan -> rs_base .rs_key ,key ,scan -> rs_base .rs_nkeys * sizeof (ScanKeyData ));
306312
307313/*
308- * Currently, wedon't have a stats counter forbitmap heap scans (but the
309- * underlying bitmap index scans will be counted) or sample scans (we only
310- * update stats for tuple fetches there)
314+ * Currently, weonly have a stats counter forsequential heap scans (but
315+ *e.g for bitmap scans the underlying bitmap index scans will be counted,
316+ *and for sample scans we update stats for tuple fetches).
311317 */
312- if (! scan -> rs_base .rs_bitmapscan && ! scan -> rs_base . rs_samplescan )
318+ if (scan -> rs_base .rs_flags & SO_TYPE_SEQSCAN )
313319pgstat_count_heap_scan (scan -> rs_base .rs_rd );
314320}
315321
@@ -325,7 +331,8 @@ heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlk
325331HeapScanDesc scan = (HeapScanDesc )sscan ;
326332
327333Assert (!scan -> rs_inited );/* else too late to change */
328- Assert (!scan -> rs_base .rs_syncscan );/* else rs_startblock is significant */
334+ /* else rs_startblock is significant */
335+ Assert (!(scan -> rs_base .rs_flags & SO_ALLOW_SYNC ));
329336
330337/* Check startBlk is valid (but allow case of zero blocks...) */
331338Assert (startBlk == 0 || startBlk < scan -> rs_nblocks );
@@ -375,7 +382,7 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
375382RBM_NORMAL ,scan -> rs_strategy );
376383scan -> rs_cblock = page ;
377384
378- if (!scan -> rs_base .rs_pageatatime )
385+ if (!( scan -> rs_base .rs_flags & SO_ALLOW_PAGEMODE ) )
379386return ;
380387
381388buffer = scan -> rs_cbuf ;
@@ -574,7 +581,7 @@ heapgettup(HeapScanDesc scan,
574581 * time, and much more likely that we'll just bollix things for
575582 * forward scanners.
576583 */
577- scan -> rs_base .rs_syncscan = false ;
584+ scan -> rs_base .rs_flags &= ~ SO_ALLOW_SYNC ;
578585/* start from last page of the scan */
579586if (scan -> rs_startblock > 0 )
580587page = scan -> rs_startblock - 1 ;
@@ -738,7 +745,7 @@ heapgettup(HeapScanDesc scan,
738745 * a little bit backwards on every invocation, which is confusing.
739746 * We don't guarantee any specific ordering in general, though.
740747 */
741- if (scan -> rs_base .rs_syncscan )
748+ if (scan -> rs_base .rs_flags & SO_ALLOW_SYNC )
742749ss_report_location (scan -> rs_base .rs_rd ,page );
743750}
744751
@@ -885,7 +892,7 @@ heapgettup_pagemode(HeapScanDesc scan,
885892 * time, and much more likely that we'll just bollix things for
886893 * forward scanners.
887894 */
888- scan -> rs_base .rs_syncscan = false ;
895+ scan -> rs_base .rs_flags &= ~ SO_ALLOW_SYNC ;
889896/* start from last page of the scan */
890897if (scan -> rs_startblock > 0 )
891898page = scan -> rs_startblock - 1 ;
@@ -1037,7 +1044,7 @@ heapgettup_pagemode(HeapScanDesc scan,
10371044 * a little bit backwards on every invocation, which is confusing.
10381045 * We don't guarantee any specific ordering in general, though.
10391046 */
1040- if (scan -> rs_base .rs_syncscan )
1047+ if (scan -> rs_base .rs_flags & SO_ALLOW_SYNC )
10411048ss_report_location (scan -> rs_base .rs_rd ,page );
10421049}
10431050
@@ -1125,12 +1132,7 @@ TableScanDesc
11251132heap_beginscan (Relation relation ,Snapshot snapshot ,
11261133int nkeys ,ScanKey key ,
11271134ParallelTableScanDesc parallel_scan ,
1128- bool allow_strat ,
1129- bool allow_sync ,
1130- bool allow_pagemode ,
1131- bool is_bitmapscan ,
1132- bool is_samplescan ,
1133- bool temp_snap )
1135+ uint32 flags )
11341136{
11351137HeapScanDesc scan ;
11361138
@@ -1151,33 +1153,39 @@ heap_beginscan(Relation relation, Snapshot snapshot,
11511153scan -> rs_base .rs_rd = relation ;
11521154scan -> rs_base .rs_snapshot = snapshot ;
11531155scan -> rs_base .rs_nkeys = nkeys ;
1154- scan -> rs_base .rs_bitmapscan = is_bitmapscan ;
1155- scan -> rs_base .rs_samplescan = is_samplescan ;
1156- scan -> rs_strategy = NULL ;/* set in initscan */
1157- scan -> rs_base .rs_allow_strat = allow_strat ;
1158- scan -> rs_base .rs_allow_sync = allow_sync ;
1159- scan -> rs_base .rs_temp_snap = temp_snap ;
1156+ scan -> rs_base .rs_flags = flags ;
11601157scan -> rs_base .rs_parallel = parallel_scan ;
1158+ scan -> rs_strategy = NULL ;/* set in initscan */
11611159
11621160/*
1163- *we can use page-at-a-time mode if it'san MVCC-safe snapshot
1161+ *Disable page-at-a-time mode if it'snot a MVCC-safe snapshot.
11641162 */
1165- scan -> rs_base . rs_pageatatime =
1166- allow_pagemode && snapshot && IsMVCCSnapshot ( snapshot ) ;
1163+ if (!( snapshot && IsMVCCSnapshot ( snapshot )))
1164+ scan -> rs_base . rs_flags &= ~ SO_ALLOW_PAGEMODE ;
11671165
11681166/*
1169- * For a seqscan in a serializable transaction, acquire a predicate lock
1170- * on the entire relation. This is required not only to lock all the
1171- * matching tuples, but also to conflict with new insertions into the
1172- * table. In an indexscan, we take page locks on the index pages covering
1173- * the range specified in the scan qual, but in a heap scan there is
1174- * nothing more fine-grained to lock. A bitmap scan is a different story,
1175- * there we have already scanned the index and locked the index pages
1176- * covering the predicate. But in that case we still have to lock any
1177- * matching heap tuples.
1167+ * For seqscan and sample scans in a serializable transaction, acquire a
1168+ * predicate lock on the entire relation. This is required not only to
1169+ * lock all the matching tuples, but also to conflict with new insertions
1170+ * into the table. In an indexscan, we take page locks on the index pages
1171+ * covering the range specified in the scan qual, but in a heap scan there
1172+ * is nothing more fine-grained to lock. A bitmap scan is a different
1173+ * story, there we have already scanned the index and locked the index
1174+ * pages covering the predicate. But in that case we still have to lock
1175+ * any matching heap tuples. For sample scan we could optimize the locking
1176+ * to be at least page-level granularity, but we'd need to add per-tuple
1177+ * locking for that.
11781178 */
1179- if (!is_bitmapscan )
1179+ if (scan -> rs_base .rs_flags & (SO_TYPE_SEQSCAN |SO_TYPE_SAMPLESCAN ))
1180+ {
1181+ /*
1182+ * Ensure a missing snapshot is noticed reliably, even if the
1183+ * isolation mode means predicate locking isn't performed (and
1184+ * therefore the snapshot isn't used here).
1185+ */
1186+ Assert (snapshot );
11801187PredicateLockRelation (relation ,snapshot );
1188+ }
11811189
11821190/* we only need to set this up once */
11831191scan -> rs_ctup .t_tableOid = RelationGetRelid (relation );
@@ -1204,10 +1212,21 @@ heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
12041212
12051213if (set_params )
12061214{
1207- scan -> rs_base .rs_allow_strat = allow_strat ;
1208- scan -> rs_base .rs_allow_sync = allow_sync ;
1209- scan -> rs_base .rs_pageatatime =
1210- allow_pagemode && IsMVCCSnapshot (scan -> rs_base .rs_snapshot );
1215+ if (allow_strat )
1216+ scan -> rs_base .rs_flags |=SO_ALLOW_STRAT ;
1217+ else
1218+ scan -> rs_base .rs_flags &= ~SO_ALLOW_STRAT ;
1219+
1220+ if (allow_sync )
1221+ scan -> rs_base .rs_flags |=SO_ALLOW_SYNC ;
1222+ else
1223+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
1224+
1225+ if (allow_pagemode && scan -> rs_base .rs_snapshot &&
1226+ IsMVCCSnapshot (scan -> rs_base .rs_snapshot ))
1227+ scan -> rs_base .rs_flags |=SO_ALLOW_PAGEMODE ;
1228+ else
1229+ scan -> rs_base .rs_flags &= ~SO_ALLOW_PAGEMODE ;
12111230}
12121231
12131232/*
@@ -1246,7 +1265,7 @@ heap_endscan(TableScanDesc sscan)
12461265if (scan -> rs_strategy != NULL )
12471266FreeAccessStrategy (scan -> rs_strategy );
12481267
1249- if (scan -> rs_base .rs_temp_snap )
1268+ if (scan -> rs_base .rs_flags & SO_TEMP_SNAPSHOT )
12501269UnregisterSnapshot (scan -> rs_base .rs_snapshot );
12511270
12521271pfree (scan );
@@ -1288,7 +1307,7 @@ heap_getnext(TableScanDesc sscan, ScanDirection direction)
12881307
12891308HEAPDEBUG_1 ;/* heap_getnext( info ) */
12901309
1291- if (scan -> rs_base .rs_pageatatime )
1310+ if (scan -> rs_base .rs_flags & SO_ALLOW_PAGEMODE )
12921311heapgettup_pagemode (scan ,direction ,
12931312scan -> rs_base .rs_nkeys ,scan -> rs_base .rs_key );
12941313else
@@ -1335,11 +1354,10 @@ heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *s
13351354
13361355HEAPAMSLOTDEBUG_1 ;/* heap_getnextslot( info ) */
13371356
1338- if (scan -> rs_base .rs_pageatatime )
1339- heapgettup_pagemode (scan ,direction ,
1340- scan -> rs_base .rs_nkeys ,scan -> rs_base .rs_key );
1357+ if (sscan -> rs_flags & SO_ALLOW_PAGEMODE )
1358+ heapgettup_pagemode (scan ,direction ,sscan -> rs_nkeys ,sscan -> rs_key );
13411359else
1342- heapgettup (scan ,direction ,scan -> rs_base . rs_nkeys ,scan -> rs_base . rs_key );
1360+ heapgettup (scan ,direction ,sscan -> rs_nkeys ,sscan -> rs_key );
13431361
13441362if (scan -> rs_ctup .t_data == NULL )
13451363{