@@ -1367,34 +1367,34 @@ SearchCatCacheMiss(CatCache *cache,
1367
1367
cur_skey [2 ].sk_argument = v3 ;
1368
1368
cur_skey [3 ].sk_argument = v4 ;
1369
1369
1370
- scandesc = systable_beginscan (relation ,
1371
- cache -> cc_indexoid ,
1372
- IndexScanOK (cache ,cur_skey ),
1373
- NULL ,
1374
- nkeys ,
1375
- cur_skey );
1370
+ scandesc = systable_beginscan (relation ,
1371
+ cache -> cc_indexoid ,
1372
+ IndexScanOK (cache ,cur_skey ),
1373
+ NULL ,
1374
+ nkeys ,
1375
+ cur_skey );
1376
1376
1377
- ct = NULL ;
1378
- stale = false;
1377
+ ct = NULL ;
1378
+ stale = false;
1379
1379
1380
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1381
- {
1382
- ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1383
- hashValue ,hashIndex );
1384
- /* upon failure, we must start the scan over */
1385
- if (ct == NULL )
1380
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1386
1381
{
1387
- stale = true;
1388
- break ;
1382
+ ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1383
+ hashValue ,hashIndex );
1384
+ /* upon failure, we must start the scan over */
1385
+ if (ct == NULL )
1386
+ {
1387
+ stale = true;
1388
+ break ;
1389
+ }
1390
+ /* immediately set the refcount to 1 */
1391
+ ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1392
+ ct -> refcount ++ ;
1393
+ ResourceOwnerRememberCatCacheRef (CurrentResourceOwner ,& ct -> tuple );
1394
+ break ;/* assume only one match */
1389
1395
}
1390
- /* immediately set the refcount to 1 */
1391
- ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1392
- ct -> refcount ++ ;
1393
- ResourceOwnerRememberCatCacheRef (CurrentResourceOwner ,& ct -> tuple );
1394
- break ;/* assume only one match */
1395
- }
1396
1396
1397
- systable_endscan (scandesc );
1397
+ systable_endscan (scandesc );
1398
1398
}while (stale );
1399
1399
1400
1400
table_close (relation ,AccessShareLock );
@@ -1654,95 +1654,95 @@ SearchCatCacheList(CatCache *cache,
1654
1654
cur_skey [2 ].sk_argument = v3 ;
1655
1655
cur_skey [3 ].sk_argument = v4 ;
1656
1656
1657
- scandesc = systable_beginscan (relation ,
1658
- cache -> cc_indexoid ,
1659
- IndexScanOK (cache ,cur_skey ),
1660
- NULL ,
1661
- nkeys ,
1662
- cur_skey );
1663
-
1664
- /* The list will be ordered iff we are doing an index scan */
1665
- ordered = (scandesc -> irel != NULL );
1666
-
1667
- stale = false;
1657
+ scandesc = systable_beginscan (relation ,
1658
+ cache -> cc_indexoid ,
1659
+ IndexScanOK (cache ,cur_skey ),
1660
+ NULL ,
1661
+ nkeys ,
1662
+ cur_skey );
1668
1663
1669
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1670
- {
1671
- uint32 hashValue ;
1672
- Index hashIndex ;
1673
- bool found = false;
1674
- dlist_head * bucket ;
1664
+ /* The list will be ordered iff we are doing an index scan */
1665
+ ordered = (scandesc -> irel != NULL );
1675
1666
1676
- /*
1677
- * See if there's an entry for this tuple already.
1678
- */
1679
- ct = NULL ;
1680
- hashValue = CatalogCacheComputeTupleHashValue (cache ,cache -> cc_nkeys ,ntp );
1681
- hashIndex = HASH_INDEX (hashValue ,cache -> cc_nbuckets );
1667
+ stale = false;
1682
1668
1683
- bucket = & cache -> cc_bucket [hashIndex ];
1684
- dlist_foreach (iter ,bucket )
1669
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1685
1670
{
1686
- ct = dlist_container (CatCTup ,cache_elem ,iter .cur );
1671
+ uint32 hashValue ;
1672
+ Index hashIndex ;
1673
+ bool found = false;
1674
+ dlist_head * bucket ;
1687
1675
1688
- if (ct -> dead || ct -> negative )
1689
- continue ;/* ignore dead and negative entries */
1676
+ /*
1677
+ * See if there's an entry for this tuple already.
1678
+ */
1679
+ ct = NULL ;
1680
+ hashValue = CatalogCacheComputeTupleHashValue (cache ,cache -> cc_nkeys ,ntp );
1681
+ hashIndex = HASH_INDEX (hashValue ,cache -> cc_nbuckets );
1690
1682
1691
- if (ct -> hash_value != hashValue )
1692
- continue ;/* quickly skip entry if wrong hash val */
1683
+ bucket = & cache -> cc_bucket [hashIndex ];
1684
+ dlist_foreach (iter ,bucket )
1685
+ {
1686
+ ct = dlist_container (CatCTup ,cache_elem ,iter .cur );
1693
1687
1694
- if (! ItemPointerEquals ( & ( ct -> tuple . t_self ), & ( ntp -> t_self )) )
1695
- continue ;/*not same tuple */
1688
+ if (ct -> dead || ct -> negative )
1689
+ continue ;/*ignore dead and negative entries */
1696
1690
1697
- /*
1698
- * Found a match, but can't use it if it belongs to another
1699
- * list already
1700
- */
1701
- if (ct -> c_list )
1702
- continue ;
1691
+ if (ct -> hash_value != hashValue )
1692
+ continue ;/* quickly skip entry if wrong hash val */
1703
1693
1704
- found = true;
1705
- break ;/* A-OK */
1706
- }
1694
+ if (!ItemPointerEquals (& (ct -> tuple .t_self ),& (ntp -> t_self )))
1695
+ continue ;/* not same tuple */
1707
1696
1708
- if (!found )
1709
- {
1710
- /* We didn't find a usable entry, so make a new one */
1711
- ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1712
- hashValue ,hashIndex );
1713
- /* upon failure, we must start the scan over */
1714
- if (ct == NULL )
1715
- {
1716
1697
/*
1717
- * Release refcounts on any items we already had. We dare
1718
- * not try to free them if they're now unreferenced, since
1719
- * an error while doing that would result in the PG_CATCH
1720
- * below doing extra refcount decrements. Besides, we'll
1721
- * likely re-adopt those items in the next iteration, so
1722
- * it's not worth complicating matters to try to get rid
1723
- * of them.
1698
+ * Found a match, but can't use it if it belongs to
1699
+ * another list already
1724
1700
*/
1725
- foreach (ctlist_item ,ctlist )
1701
+ if (ct -> c_list )
1702
+ continue ;
1703
+
1704
+ found = true;
1705
+ break ;/* A-OK */
1706
+ }
1707
+
1708
+ if (!found )
1709
+ {
1710
+ /* We didn't find a usable entry, so make a new one */
1711
+ ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1712
+ hashValue ,hashIndex );
1713
+ /* upon failure, we must start the scan over */
1714
+ if (ct == NULL )
1726
1715
{
1727
- ct = (CatCTup * )lfirst (ctlist_item );
1728
- Assert (ct -> c_list == NULL );
1729
- Assert (ct -> refcount > 0 );
1730
- ct -> refcount -- ;
1716
+ /*
1717
+ * Release refcounts on any items we already had. We
1718
+ * dare not try to free them if they're now
1719
+ * unreferenced, since an error while doing that would
1720
+ * result in the PG_CATCH below doing extra refcount
1721
+ * decrements. Besides, we'll likely re-adopt those
1722
+ * items in the next iteration, so it's not worth
1723
+ * complicating matters to try to get rid of them.
1724
+ */
1725
+ foreach (ctlist_item ,ctlist )
1726
+ {
1727
+ ct = (CatCTup * )lfirst (ctlist_item );
1728
+ Assert (ct -> c_list == NULL );
1729
+ Assert (ct -> refcount > 0 );
1730
+ ct -> refcount -- ;
1731
+ }
1732
+ /* Reset ctlist in preparation for new try */
1733
+ ctlist = NIL ;
1734
+ stale = true;
1735
+ break ;
1731
1736
}
1732
- /* Reset ctlist in preparation for new try */
1733
- ctlist = NIL ;
1734
- stale = true;
1735
- break ;
1736
1737
}
1737
- }
1738
1738
1739
- /* Careful here: add entry to ctlist, then bump its refcount */
1740
- /* This way leaves state correct if lappend runs out of memory */
1741
- ctlist = lappend (ctlist ,ct );
1742
- ct -> refcount ++ ;
1743
- }
1739
+ /* Careful here: add entry to ctlist, then bump its refcount */
1740
+ /* This way leaves state correct if lappend runs out of memory */
1741
+ ctlist = lappend (ctlist ,ct );
1742
+ ct -> refcount ++ ;
1743
+ }
1744
1744
1745
- systable_endscan (scandesc );
1745
+ systable_endscan (scandesc );
1746
1746
}while (stale );
1747
1747
1748
1748
table_close (relation ,AccessShareLock );