@@ -1364,34 +1364,34 @@ SearchCatCacheMiss(CatCache *cache,
1364
1364
cur_skey [2 ].sk_argument = v3 ;
1365
1365
cur_skey [3 ].sk_argument = v4 ;
1366
1366
1367
- scandesc = systable_beginscan (relation ,
1368
- cache -> cc_indexoid ,
1369
- IndexScanOK (cache ,cur_skey ),
1370
- NULL ,
1371
- nkeys ,
1372
- cur_skey );
1367
+ scandesc = systable_beginscan (relation ,
1368
+ cache -> cc_indexoid ,
1369
+ IndexScanOK (cache ,cur_skey ),
1370
+ NULL ,
1371
+ nkeys ,
1372
+ cur_skey );
1373
1373
1374
- ct = NULL ;
1375
- stale = false;
1374
+ ct = NULL ;
1375
+ stale = false;
1376
1376
1377
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1378
- {
1379
- ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1380
- hashValue ,hashIndex );
1381
- /* upon failure, we must start the scan over */
1382
- if (ct == NULL )
1377
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1383
1378
{
1384
- stale = true;
1385
- break ;
1379
+ ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1380
+ hashValue ,hashIndex );
1381
+ /* upon failure, we must start the scan over */
1382
+ if (ct == NULL )
1383
+ {
1384
+ stale = true;
1385
+ break ;
1386
+ }
1387
+ /* immediately set the refcount to 1 */
1388
+ ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1389
+ ct -> refcount ++ ;
1390
+ ResourceOwnerRememberCatCacheRef (CurrentResourceOwner ,& ct -> tuple );
1391
+ break ;/* assume only one match */
1386
1392
}
1387
- /* immediately set the refcount to 1 */
1388
- ResourceOwnerEnlargeCatCacheRefs (CurrentResourceOwner );
1389
- ct -> refcount ++ ;
1390
- ResourceOwnerRememberCatCacheRef (CurrentResourceOwner ,& ct -> tuple );
1391
- break ;/* assume only one match */
1392
- }
1393
1393
1394
- systable_endscan (scandesc );
1394
+ systable_endscan (scandesc );
1395
1395
}while (stale );
1396
1396
1397
1397
table_close (relation ,AccessShareLock );
@@ -1651,95 +1651,95 @@ SearchCatCacheList(CatCache *cache,
1651
1651
cur_skey [2 ].sk_argument = v3 ;
1652
1652
cur_skey [3 ].sk_argument = v4 ;
1653
1653
1654
- scandesc = systable_beginscan (relation ,
1655
- cache -> cc_indexoid ,
1656
- IndexScanOK (cache ,cur_skey ),
1657
- NULL ,
1658
- nkeys ,
1659
- cur_skey );
1660
-
1661
- /* The list will be ordered iff we are doing an index scan */
1662
- ordered = (scandesc -> irel != NULL );
1663
-
1664
- stale = false;
1654
+ scandesc = systable_beginscan (relation ,
1655
+ cache -> cc_indexoid ,
1656
+ IndexScanOK (cache ,cur_skey ),
1657
+ NULL ,
1658
+ nkeys ,
1659
+ cur_skey );
1665
1660
1666
- while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1667
- {
1668
- uint32 hashValue ;
1669
- Index hashIndex ;
1670
- bool found = false;
1671
- dlist_head * bucket ;
1661
+ /* The list will be ordered iff we are doing an index scan */
1662
+ ordered = (scandesc -> irel != NULL );
1672
1663
1673
- /*
1674
- * See if there's an entry for this tuple already.
1675
- */
1676
- ct = NULL ;
1677
- hashValue = CatalogCacheComputeTupleHashValue (cache ,cache -> cc_nkeys ,ntp );
1678
- hashIndex = HASH_INDEX (hashValue ,cache -> cc_nbuckets );
1664
+ stale = false;
1679
1665
1680
- bucket = & cache -> cc_bucket [hashIndex ];
1681
- dlist_foreach (iter ,bucket )
1666
+ while (HeapTupleIsValid (ntp = systable_getnext (scandesc )))
1682
1667
{
1683
- ct = dlist_container (CatCTup ,cache_elem ,iter .cur );
1668
+ uint32 hashValue ;
1669
+ Index hashIndex ;
1670
+ bool found = false;
1671
+ dlist_head * bucket ;
1684
1672
1685
- if (ct -> dead || ct -> negative )
1686
- continue ;/* ignore dead and negative entries */
1673
+ /*
1674
+ * See if there's an entry for this tuple already.
1675
+ */
1676
+ ct = NULL ;
1677
+ hashValue = CatalogCacheComputeTupleHashValue (cache ,cache -> cc_nkeys ,ntp );
1678
+ hashIndex = HASH_INDEX (hashValue ,cache -> cc_nbuckets );
1687
1679
1688
- if (ct -> hash_value != hashValue )
1689
- continue ;/* quickly skip entry if wrong hash val */
1680
+ bucket = & cache -> cc_bucket [hashIndex ];
1681
+ dlist_foreach (iter ,bucket )
1682
+ {
1683
+ ct = dlist_container (CatCTup ,cache_elem ,iter .cur );
1690
1684
1691
- if (! ItemPointerEquals ( & ( ct -> tuple . t_self ), & ( ntp -> t_self )) )
1692
- continue ;/*not same tuple */
1685
+ if (ct -> dead || ct -> negative )
1686
+ continue ;/*ignore dead and negative entries */
1693
1687
1694
- /*
1695
- * Found a match, but can't use it if it belongs to another
1696
- * list already
1697
- */
1698
- if (ct -> c_list )
1699
- continue ;
1688
+ if (ct -> hash_value != hashValue )
1689
+ continue ;/* quickly skip entry if wrong hash val */
1700
1690
1701
- found = true;
1702
- break ;/* A-OK */
1703
- }
1691
+ if (!ItemPointerEquals (& (ct -> tuple .t_self ),& (ntp -> t_self )))
1692
+ continue ;/* not same tuple */
1704
1693
1705
- if (!found )
1706
- {
1707
- /* We didn't find a usable entry, so make a new one */
1708
- ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1709
- hashValue ,hashIndex );
1710
- /* upon failure, we must start the scan over */
1711
- if (ct == NULL )
1712
- {
1713
1694
/*
1714
- * Release refcounts on any items we already had. We dare
1715
- * not try to free them if they're now unreferenced, since
1716
- * an error while doing that would result in the PG_CATCH
1717
- * below doing extra refcount decrements. Besides, we'll
1718
- * likely re-adopt those items in the next iteration, so
1719
- * it's not worth complicating matters to try to get rid
1720
- * of them.
1695
+ * Found a match, but can't use it if it belongs to
1696
+ * another list already
1721
1697
*/
1722
- foreach (ctlist_item ,ctlist )
1698
+ if (ct -> c_list )
1699
+ continue ;
1700
+
1701
+ found = true;
1702
+ break ;/* A-OK */
1703
+ }
1704
+
1705
+ if (!found )
1706
+ {
1707
+ /* We didn't find a usable entry, so make a new one */
1708
+ ct = CatalogCacheCreateEntry (cache ,ntp ,scandesc ,NULL ,
1709
+ hashValue ,hashIndex );
1710
+ /* upon failure, we must start the scan over */
1711
+ if (ct == NULL )
1723
1712
{
1724
- ct = (CatCTup * )lfirst (ctlist_item );
1725
- Assert (ct -> c_list == NULL );
1726
- Assert (ct -> refcount > 0 );
1727
- ct -> refcount -- ;
1713
+ /*
1714
+ * Release refcounts on any items we already had. We
1715
+ * dare not try to free them if they're now
1716
+ * unreferenced, since an error while doing that would
1717
+ * result in the PG_CATCH below doing extra refcount
1718
+ * decrements. Besides, we'll likely re-adopt those
1719
+ * items in the next iteration, so it's not worth
1720
+ * complicating matters to try to get rid of them.
1721
+ */
1722
+ foreach (ctlist_item ,ctlist )
1723
+ {
1724
+ ct = (CatCTup * )lfirst (ctlist_item );
1725
+ Assert (ct -> c_list == NULL );
1726
+ Assert (ct -> refcount > 0 );
1727
+ ct -> refcount -- ;
1728
+ }
1729
+ /* Reset ctlist in preparation for new try */
1730
+ ctlist = NIL ;
1731
+ stale = true;
1732
+ break ;
1728
1733
}
1729
- /* Reset ctlist in preparation for new try */
1730
- ctlist = NIL ;
1731
- stale = true;
1732
- break ;
1733
1734
}
1734
- }
1735
1735
1736
- /* Careful here: add entry to ctlist, then bump its refcount */
1737
- /* This way leaves state correct if lappend runs out of memory */
1738
- ctlist = lappend (ctlist ,ct );
1739
- ct -> refcount ++ ;
1740
- }
1736
+ /* Careful here: add entry to ctlist, then bump its refcount */
1737
+ /* This way leaves state correct if lappend runs out of memory */
1738
+ ctlist = lappend (ctlist ,ct );
1739
+ ct -> refcount ++ ;
1740
+ }
1741
1741
1742
- systable_endscan (scandesc );
1742
+ systable_endscan (scandesc );
1743
1743
}while (stale );
1744
1744
1745
1745
table_close (relation ,AccessShareLock );