@@ -1369,34 +1369,34 @@ SearchCatCacheMiss(CatCache *cache,
1369
1369
cur_skey [2 ].sk_argument = v3 ;
1370
1370
cur_skey [3 ].sk_argument = v4 ;
1371
1371
1372
- scandesc = systable_beginscan (relation ,
1373
- cache -> cc_indexoid ,
1374
- IndexScanOK (cache ,cur_skey ),
1375
- NULL ,
1376
- nkeys ,
1377
- cur_skey );
1372
+ scandesc = systable_beginscan (relation ,
1373
+ cache -> cc_indexoid ,
1374
+ IndexScanOK (cache ,cur_skey ),
1375
+ NULL ,
1376
+ nkeys ,
1377
+ cur_skey );
1378
1378
1379
- ct = NULL ;
1380
- stale = false;
1379
+ ct = NULL ;
1380
+ stale = false;
1381
1381
1382
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1383
- {
1384
- ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1385
- hashValue ,hashIndex );
1386
- /* upon failure, we must start the scan over */
1387
- if (ct == NULL )
1382
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1388
1383
{
1389
- stale = true;
1390
- break ;
1384
+ ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1385
+ hashValue ,hashIndex );
1386
+ /* upon failure, we must start the scan over */
1387
+ if (ct == NULL )
1388
+ {
1389
+ stale = true;
1390
+ break ;
1391
+ }
1392
+ /* immediately set the refcount to 1 */
1393
+ ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1394
+ ct -> refcount ++ ;
1395
+ ResourceOwnerRememberCatCacheRef (CurrentResourceOwner ,& ct -> tuple );
1396
+ break ;/* assume only one match */
1391
1397
}
1392
- /* immediately set the refcount to 1 */
1393
- ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1394
- ct -> refcount ++ ;
1395
- ResourceOwnerRememberCatCacheRef (CurrentResourceOwner ,& ct -> tuple );
1396
- break ;/* assume only one match */
1397
- }
1398
1398
1399
- systable_endscan (scandesc );
1399
+ systable_endscan (scandesc );
1400
1400
}while (stale );
1401
1401
1402
1402
table_close (relation ,AccessShareLock );
@@ -1656,95 +1656,95 @@ SearchCatCacheList(CatCache *cache,
1656
1656
cur_skey [2 ].sk_argument = v3 ;
1657
1657
cur_skey [3 ].sk_argument = v4 ;
1658
1658
1659
- scandesc = systable_beginscan (relation ,
1660
- cache -> cc_indexoid ,
1661
- IndexScanOK (cache ,cur_skey ),
1662
- NULL ,
1663
- nkeys ,
1664
- cur_skey );
1665
-
1666
- /* The list will be ordered iff we are doing an index scan */
1667
- ordered = (scandesc -> irel != NULL );
1668
-
1669
- stale = false;
1659
+ scandesc = systable_beginscan (relation ,
1660
+ cache -> cc_indexoid ,
1661
+ IndexScanOK (cache ,cur_skey ),
1662
+ NULL ,
1663
+ nkeys ,
1664
+ cur_skey );
1670
1665
1671
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1672
- {
1673
- uint32 hashValue ;
1674
- Index hashIndex ;
1675
- bool found = false;
1676
- dlist_head * bucket ;
1666
+ /* The list will be ordered iff we are doing an index scan */
1667
+ ordered = (scandesc -> irel != NULL );
1677
1668
1678
- /*
1679
- * See if there's an entry for this tuple already.
1680
- */
1681
- ct = NULL ;
1682
- hashValue = CatalogCacheComputeTupleHashValue (cache ,cache -> cc_nkeys ,ntp );
1683
- hashIndex = HASH_INDEX (hashValue ,cache -> cc_nbuckets );
1669
+ stale = false;
1684
1670
1685
- bucket = & cache -> cc_bucket [hashIndex ];
1686
- dlist_foreach (iter ,bucket )
1671
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1687
1672
{
1688
- ct = dlist_container (CatCTup ,cache_elem ,iter .cur );
1673
+ uint32 hashValue ;
1674
+ Index hashIndex ;
1675
+ bool found = false;
1676
+ dlist_head * bucket ;
1689
1677
1690
- if (ct -> dead || ct -> negative )
1691
- continue ;/* ignore dead and negative entries */
1678
+ /*
1679
+ * See if there's an entry for this tuple already.
1680
+ */
1681
+ ct = NULL ;
1682
+ hashValue = CatalogCacheComputeTupleHashValue (cache ,cache -> cc_nkeys ,ntp );
1683
+ hashIndex = HASH_INDEX (hashValue ,cache -> cc_nbuckets );
1692
1684
1693
- if (ct -> hash_value != hashValue )
1694
- continue ;/* quickly skip entry if wrong hash val */
1685
+ bucket = & cache -> cc_bucket [hashIndex ];
1686
+ dlist_foreach (iter ,bucket )
1687
+ {
1688
+ ct = dlist_container (CatCTup ,cache_elem ,iter .cur );
1695
1689
1696
- if (! ItemPointerEquals ( & ( ct -> tuple . t_self ), & ( ntp -> t_self )) )
1697
- continue ;/*not same tuple */
1690
+ if (ct -> dead || ct -> negative )
1691
+ continue ;/*ignore dead and negative entries */
1698
1692
1699
- /*
1700
- * Found a match, but can't use it if it belongs to another
1701
- * list already
1702
- */
1703
- if (ct -> c_list )
1704
- continue ;
1693
+ if (ct -> hash_value != hashValue )
1694
+ continue ;/* quickly skip entry if wrong hash val */
1705
1695
1706
- found = true;
1707
- break ;/* A-OK */
1708
- }
1696
+ if (!ItemPointerEquals (& (ct -> tuple .t_self ),& (ntp -> t_self )))
1697
+ continue ;/* not same tuple */
1709
1698
1710
- if (!found )
1711
- {
1712
- /* We didn't find a usable entry, so make a new one */
1713
- ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1714
- hashValue ,hashIndex );
1715
- /* upon failure, we must start the scan over */
1716
- if (ct == NULL )
1717
- {
1718
1699
/*
1719
- * Release refcounts on any items we already had. We dare
1720
- * not try to free them if they're now unreferenced, since
1721
- * an error while doing that would result in the PG_CATCH
1722
- * below doing extra refcount decrements. Besides, we'll
1723
- * likely re-adopt those items in the next iteration, so
1724
- * it's not worth complicating matters to try to get rid
1725
- * of them.
1700
+ * Found a match, but can't use it if it belongs to
1701
+ * another list already
1726
1702
*/
1727
- foreach (ctlist_item ,ctlist )
1703
+ if (ct -> c_list )
1704
+ continue ;
1705
+
1706
+ found = true;
1707
+ break ;/* A-OK */
1708
+ }
1709
+
1710
+ if (!found )
1711
+ {
1712
+ /* We didn't find a usable entry, so make a new one */
1713
+ ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1714
+ hashValue ,hashIndex );
1715
+ /* upon failure, we must start the scan over */
1716
+ if (ct == NULL )
1728
1717
{
1729
- ct = (CatCTup * )lfirst (ctlist_item );
1730
- Assert (ct -> c_list == NULL );
1731
- Assert (ct -> refcount > 0 );
1732
- ct -> refcount -- ;
1718
+ /*
1719
+ * Release refcounts on any items we already had. We
1720
+ * dare not try to free them if they're now
1721
+ * unreferenced, since an error while doing that would
1722
+ * result in the PG_CATCH below doing extra refcount
1723
+ * decrements. Besides, we'll likely re-adopt those
1724
+ * items in the next iteration, so it's not worth
1725
+ * complicating matters to try to get rid of them.
1726
+ */
1727
+ foreach (ctlist_item ,ctlist )
1728
+ {
1729
+ ct = (CatCTup * )lfirst (ctlist_item );
1730
+ Assert (ct -> c_list == NULL );
1731
+ Assert (ct -> refcount > 0 );
1732
+ ct -> refcount -- ;
1733
+ }
1734
+ /* Reset ctlist in preparation for new try */
1735
+ ctlist = NIL ;
1736
+ stale = true;
1737
+ break ;
1733
1738
}
1734
- /* Reset ctlist in preparation for new try */
1735
- ctlist = NIL ;
1736
- stale = true;
1737
- break ;
1738
1739
}
1739
- }
1740
1740
1741
- /* Careful here: add entry to ctlist, then bump its refcount */
1742
- /* This way leaves state correct if lappend runs out of memory */
1743
- ctlist = lappend (ctlist ,ct );
1744
- ct -> refcount ++ ;
1745
- }
1741
+ /* Careful here: add entry to ctlist, then bump its refcount */
1742
+ /* This way leaves state correct if lappend runs out of memory */
1743
+ ctlist = lappend (ctlist ,ct );
1744
+ ct -> refcount ++ ;
1745
+ }
1746
1746
1747
- systable_endscan (scandesc );
1747
+ systable_endscan (scandesc );
1748
1748
}while (stale );
1749
1749
1750
1750
table_close (relation ,AccessShareLock );