88 *
99 *
1010 * IDENTIFICATION
11- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.128 2006/03/05 15:58:45 momjian Exp $
11+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.129 2006/06/15 02:08:09 tgl Exp $
1212 *
1313 *-------------------------------------------------------------------------
1414 */
3737
3838/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
3939
40- /*
41- * Constants related to size of the catcache.
42- *
43- * NCCBUCKETS must be a power of two and must be less than 64K (because
44- * SharedInvalCatcacheMsg crams hash indexes into a uint16 field).In
45- * practice it should be a lot less, anyway, to avoid chewing up too much
46- * space on hash bucket headers.
47- *
48- * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
49- * consumption is at a premium.
50- */
51- #define NCCBUCKETS 256/* Hash buckets per CatCache */
52- #define MAXCCTUPLES 5000/* Maximum # of tuples in all caches */
53-
5440/*
5541 * Given a hash value and the size of the hash table, find the bucket
5642 * in which the hash value belongs. Since the hash table must contain
@@ -89,15 +75,14 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
8975HeapTuple tuple );
9076
9177#ifdef CATCACHE_STATS
92- static void CatCachePrintStats (void );
78+ static void CatCachePrintStats (int code , Datum arg );
9379#endif
9480static void CatCacheRemoveCTup (CatCache * cache ,CatCTup * ct );
9581static void CatCacheRemoveCList (CatCache * cache ,CatCList * cl );
9682static void CatalogCacheInitializeCache (CatCache * cache );
9783static CatCTup * CatalogCacheCreateEntry (CatCache * cache ,HeapTuple ntp ,
9884uint32 hashValue ,Index hashIndex ,
9985bool negative );
100- static void CatalogCacheCleanup (CatCTup * savect );
10186static HeapTuple build_dummy_tuple (CatCache * cache ,int nkeys ,ScanKey skeys );
10287
10388
@@ -281,26 +266,22 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
281266#ifdef CATCACHE_STATS
282267
283268static void
284- CatCachePrintStats (void )
269+ CatCachePrintStats (int code , Datum arg )
285270{
286271CatCache * cache ;
287272long cc_searches = 0 ;
288273long cc_hits = 0 ;
289274long cc_neg_hits = 0 ;
290275long cc_newloads = 0 ;
291276long cc_invals = 0 ;
292- long cc_discards = 0 ;
293277long cc_lsearches = 0 ;
294278long cc_lhits = 0 ;
295279
296- elog (DEBUG2 ,"catcache stats dump: %d/%d tuples in catcaches" ,
297- CacheHdr -> ch_ntup ,CacheHdr -> ch_maxtup );
298-
299280for (cache = CacheHdr -> ch_caches ;cache ;cache = cache -> cc_next )
300281{
301282if (cache -> cc_ntup == 0 && cache -> cc_searches == 0 )
302283continue ;/* don't print unused caches */
303- elog (DEBUG2 ,"catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %lddiscards, %ld lsrch, %ld lhits" ,
284+ elog (DEBUG2 ,"catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits" ,
304285cache -> cc_relname ,
305286cache -> cc_indexoid ,
306287cache -> cc_ntup ,
@@ -312,19 +293,17 @@ CatCachePrintStats(void)
312293cache -> cc_searches - cache -> cc_hits - cache -> cc_neg_hits - cache -> cc_newloads ,
313294cache -> cc_searches - cache -> cc_hits - cache -> cc_neg_hits ,
314295cache -> cc_invals ,
315- cache -> cc_discards ,
316296cache -> cc_lsearches ,
317297cache -> cc_lhits );
318298cc_searches += cache -> cc_searches ;
319299cc_hits += cache -> cc_hits ;
320300cc_neg_hits += cache -> cc_neg_hits ;
321301cc_newloads += cache -> cc_newloads ;
322302cc_invals += cache -> cc_invals ;
323- cc_discards += cache -> cc_discards ;
324303cc_lsearches += cache -> cc_lsearches ;
325304cc_lhits += cache -> cc_lhits ;
326305}
327- elog (DEBUG2 ,"catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %lddiscards, %ld lsrch, %ld lhits" ,
306+ elog (DEBUG2 ,"catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits" ,
328307CacheHdr -> ch_ntup ,
329308cc_searches ,
330309cc_hits ,
@@ -334,7 +313,6 @@ CatCachePrintStats(void)
334313cc_searches - cc_hits - cc_neg_hits - cc_newloads ,
335314cc_searches - cc_hits - cc_neg_hits ,
336315cc_invals ,
337- cc_discards ,
338316cc_lsearches ,
339317cc_lhits );
340318}
@@ -367,8 +345,7 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
367345return ;/* nothing left to do */
368346}
369347
370- /* delink from linked lists */
371- DLRemove (& ct -> lrulist_elem );
348+ /* delink from linked list */
372349DLRemove (& ct -> cache_elem );
373350
374351/* free associated tuple data */
@@ -568,11 +545,13 @@ AtEOXact_CatCache(bool isCommit)
568545if (assert_enabled )
569546{
570547CatCache * ccp ;
571- Dlelem * elt ;
572548
573- /* Check CatCLists */
574549for (ccp = CacheHdr -> ch_caches ;ccp ;ccp = ccp -> cc_next )
575550{
551+ Dlelem * elt ;
552+ int i ;
553+
554+ /* Check CatCLists */
576555for (elt = DLGetHead (& ccp -> cc_lists );elt ;elt = DLGetSucc (elt ))
577556{
578557CatCList * cl = (CatCList * )DLE_VAL (elt );
@@ -581,16 +560,21 @@ AtEOXact_CatCache(bool isCommit)
581560Assert (cl -> refcount == 0 );
582561Assert (!cl -> dead );
583562}
584- }
585563
586- /* Check individual tuples */
587- for (elt = DLGetHead (& CacheHdr -> ch_lrulist );elt ;elt = DLGetSucc (elt ))
588- {
589- CatCTup * ct = (CatCTup * )DLE_VAL (elt );
564+ /* Check individual tuples */
565+ for (i = 0 ;i < ccp -> cc_nbuckets ;i ++ )
566+ {
567+ for (elt = DLGetHead (& ccp -> cc_bucket [i ]);
568+ elt ;
569+ elt = DLGetSucc (elt ))
570+ {
571+ CatCTup * ct = (CatCTup * )DLE_VAL (elt );
590572
591- Assert (ct -> ct_magic == CT_MAGIC );
592- Assert (ct -> refcount == 0 );
593- Assert (!ct -> dead );
573+ Assert (ct -> ct_magic == CT_MAGIC );
574+ Assert (ct -> refcount == 0 );
575+ Assert (!ct -> dead );
576+ }
577+ }
594578}
595579}
596580#endif
@@ -796,12 +780,27 @@ InitCatCache(int id,
796780Oid indexoid ,
797781int reloidattr ,
798782int nkeys ,
799- const int * key )
783+ const int * key ,
784+ int nbuckets )
800785{
801786CatCache * cp ;
802787MemoryContext oldcxt ;
803788int i ;
804789
790+ /*
791+ * nbuckets is the number of hash buckets to use in this catcache.
792+ * Currently we just use a hard-wired estimate of an appropriate size
793+ * for each cache; maybe later make them dynamically resizable?
794+ *
795+ * nbuckets must be a power of two. We check this via Assert rather than
796+ * a full runtime check because the values will be coming from constant
797+ * tables.
798+ *
799+ * If you're confused by the power-of-two check, see comments in
800+ * bitmapset.c for an explanation.
801+ */
802+ Assert (nbuckets > 0 && (nbuckets & - nbuckets )== nbuckets );
803+
805804/*
806805 * first switch to the cache context so our allocations do not vanish at
807806 * the end of a transaction
@@ -812,17 +811,15 @@ InitCatCache(int id,
812811oldcxt = MemoryContextSwitchTo (CacheMemoryContext );
813812
814813/*
815- * if first time through, initialize the cache group header, including
816- * global LRU list header
814+ * if first time through, initialize the cache group header
817815 */
818816if (CacheHdr == NULL )
819817{
820818CacheHdr = (CatCacheHeader * )palloc (sizeof (CatCacheHeader ));
821819CacheHdr -> ch_caches = NULL ;
822820CacheHdr -> ch_ntup = 0 ;
823- CacheHdr -> ch_maxtup = MAXCCTUPLES ;
824- DLInitList (& CacheHdr -> ch_lrulist );
825821#ifdef CATCACHE_STATS
822+ /* set up to dump stats at backend exit */
826823on_proc_exit (CatCachePrintStats ,0 );
827824#endif
828825}
@@ -832,7 +829,7 @@ InitCatCache(int id,
832829 *
833830 * Note: we assume zeroing initializes the Dllist headers correctly
834831 */
835- cp = (CatCache * )palloc0 (sizeof (CatCache )+ NCCBUCKETS * sizeof (Dllist ));
832+ cp = (CatCache * )palloc0 (sizeof (CatCache )+ nbuckets * sizeof (Dllist ));
836833
837834/*
838835 * initialize the cache's relation information for the relation
@@ -847,7 +844,7 @@ InitCatCache(int id,
847844cp -> cc_tupdesc = (TupleDesc )NULL ;
848845cp -> cc_reloidattr = reloidattr ;
849846cp -> cc_ntup = 0 ;
850- cp -> cc_nbuckets = NCCBUCKETS ;
847+ cp -> cc_nbuckets = nbuckets ;
851848cp -> cc_nkeys = nkeys ;
852849for (i = 0 ;i < nkeys ;++ i )
853850cp -> cc_key [i ]= key [i ];
@@ -1162,13 +1159,11 @@ SearchCatCache(CatCache *cache,
11621159continue ;
11631160
11641161/*
1165- * we found a match in the cache: move it to the front of the global
1166- * LRU list. We also move it to the front of the list for its
1167- * hashbucket, in order to speed subsequent searches. (The most
1168- * frequently accessed elements in any hashbucket will tend to be near
1169- * the front of the hashbucket's list.)
1162+ * We found a match in the cache. Move it to the front of the list
1163+ * for its hashbucket, in order to speed subsequent searches. (The
1164+ * most frequently accessed elements in any hashbucket will tend to be
1165+ * near the front of the hashbucket's list.)
11701166 */
1171- DLMoveToFront (& ct -> lrulist_elem );
11721167DLMoveToFront (& ct -> cache_elem );
11731168
11741169/*
@@ -1414,14 +1409,12 @@ SearchCatCacheList(CatCache *cache,
14141409continue ;
14151410
14161411/*
1417- * We found a matching list: mark it as touched since the last
1418- * CatalogCacheCleanup() sweep. Also move the list to the front of
1419- * the cache's list-of-lists, to speed subsequent searches. (We do not
1412+ * We found a matching list. Move the list to the front of the
1413+ * cache's list-of-lists, to speed subsequent searches. (We do not
14201414 * move the members to the fronts of their hashbucket lists, however,
14211415 * since there's no point in that unless they are searched for
14221416 * individually.)
14231417 */
1424- cl -> touched = true;
14251418DLMoveToFront (& cl -> cache_elem );
14261419
14271420/* Bump the list's refcount and return it */
@@ -1504,10 +1497,7 @@ SearchCatCacheList(CatCache *cache,
15041497if (ct -> c_list )
15051498continue ;
15061499
1507- /* Found a match, so move it to front */
1508- DLMoveToFront (& ct -> lrulist_elem );
1509-
1510- break ;
1500+ break ;/* A-OK */
15111501}
15121502
15131503if (elt == NULL )
@@ -1577,7 +1567,6 @@ SearchCatCacheList(CatCache *cache,
15771567cl -> refcount = 0 ;/* for the moment */
15781568cl -> dead = false;
15791569cl -> ordered = ordered ;
1580- cl -> touched = false;/* we already moved members to front */
15811570cl -> nkeys = nkeys ;
15821571cl -> hash_value = lHashValue ;
15831572cl -> n_members = nmembers ;
@@ -1654,109 +1643,25 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
16541643
16551644/*
16561645 * Finish initializing the CatCTup header, and add it to the cache's
1657- * linkedlists and counts.
1646+ * linkedlist and counts.
16581647 */
16591648ct -> ct_magic = CT_MAGIC ;
16601649ct -> my_cache = cache ;
1661- DLInitElem (& ct -> lrulist_elem , (void * )ct );
16621650DLInitElem (& ct -> cache_elem , (void * )ct );
16631651ct -> c_list = NULL ;
16641652ct -> refcount = 0 ;/* for the moment */
16651653ct -> dead = false;
16661654ct -> negative = negative ;
16671655ct -> hash_value = hashValue ;
16681656
1669- DLAddHead (& CacheHdr -> ch_lrulist ,& ct -> lrulist_elem );
16701657DLAddHead (& cache -> cc_bucket [hashIndex ],& ct -> cache_elem );
16711658
16721659cache -> cc_ntup ++ ;
16731660CacheHdr -> ch_ntup ++ ;
16741661
1675- /*
1676- * If we've exceeded the desired size of the caches, try to throw away the
1677- * least recently used entry(s). NB: be careful not to throw away the
1678- * newly-built entry...
1679- */
1680- if (CacheHdr -> ch_ntup > CacheHdr -> ch_maxtup )
1681- CatalogCacheCleanup (ct );
1682-
16831662return ct ;
16841663}
16851664
1686- /*
1687- * CatalogCacheCleanup
1688- *Try to reduce the size of the catcaches when they get too big
1689- *
1690- * savect can be NULL, or a specific CatCTup not to remove even if it
1691- * has zero refcount.
1692- */
1693- static void
1694- CatalogCacheCleanup (CatCTup * savect )
1695- {
1696- int tup_target ;
1697- CatCache * ccp ;
1698- Dlelem * elt ,
1699- * prevelt ;
1700-
1701- /*
1702- * Each time we have to do this, try to cut the cache size down to about
1703- * 90% of the maximum.
1704- */
1705- tup_target = (CacheHdr -> ch_maxtup * 9 ) /10 ;
1706-
1707- /*
1708- * Our strategy for managing CatCLists is that, each time we have to throw
1709- * away some cache entries, we first move-to-front all the members of
1710- * CatCLists that have been touched since the last cleanup sweep. Then we
1711- * do strict LRU elimination by individual tuples, zapping a list if any
1712- * of its members gets zapped.Before PostgreSQL 8.1, we moved members to
1713- * front each time their owning list was touched, which was arguably more
1714- * fair in balancing list members against standalone tuples --- but the
1715- * overhead for large lists was horrendous. This scheme is more heavily
1716- * biased towards preserving lists, but that is not necessarily bad
1717- * either.
1718- */
1719- for (ccp = CacheHdr -> ch_caches ;ccp ;ccp = ccp -> cc_next )
1720- {
1721- for (elt = DLGetHead (& ccp -> cc_lists );elt ;elt = DLGetSucc (elt ))
1722- {
1723- CatCList * cl = (CatCList * )DLE_VAL (elt );
1724-
1725- Assert (cl -> cl_magic == CL_MAGIC );
1726- if (cl -> touched && !cl -> dead )
1727- {
1728- int i ;
1729-
1730- for (i = 0 ;i < cl -> n_members ;i ++ )
1731- DLMoveToFront (& cl -> members [i ]-> lrulist_elem );
1732- }
1733- cl -> touched = false;
1734- }
1735- }
1736-
1737- /* Now get rid of unreferenced tuples in reverse global LRU order */
1738- for (elt = DLGetTail (& CacheHdr -> ch_lrulist );elt ;elt = prevelt )
1739- {
1740- CatCTup * ct = (CatCTup * )DLE_VAL (elt );
1741-
1742- prevelt = DLGetPred (elt );
1743-
1744- if (ct -> refcount == 0 &&
1745- (ct -> c_list == NULL || ct -> c_list -> refcount == 0 )&&
1746- ct != savect )
1747- {
1748- #ifdef CATCACHE_STATS
1749- ct -> my_cache -> cc_discards ++ ;
1750- #endif
1751- CatCacheRemoveCTup (ct -> my_cache ,ct );
1752-
1753- /* Quit when we've removed enough tuples */
1754- if (CacheHdr -> ch_ntup <=tup_target )
1755- break ;
1756- }
1757- }
1758- }
1759-
17601665/*
17611666 * build_dummy_tuple
17621667 *Generate a palloc'd HeapTuple that contains the specified key