diff -ruN linux-2.2.5/arch/i386/mm/init.c linux/arch/i386/mm/init.c
--- linux-2.2.5/arch/i386/mm/init.c	Thu Jan 21 14:28:40 1999
+++ linux/arch/i386/mm/init.c	Fri Apr  9 21:48:40 1999
@@ -17,6 +17,7 @@
 #include <linux/swap.h>
 #include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/pagemap.h>
 #ifdef CONFIG_BLK_DEV_INITRD
 #include <linux/blk.h>
 #endif
@@ -144,6 +145,9 @@
 	return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
 }
 
+extern void show_dentry_cache(void);
+extern void show_inode_cache(void);
+
 void show_mem(void)
 {
 	int i,free = 0,total = 0,reserved = 0;
@@ -169,6 +173,9 @@
 	printk("%d pages shared\n",shared);
 	printk("%d pages swap cached\n",cached);
 	printk("%ld pages in page table cache\n",pgtable_cache_size);
+	show_page_hash();
+	show_dentry_cache();
+	show_inode_cache();
 	show_buffers();
 #ifdef CONFIG_NET
 	show_net_buffers();
diff -ruN linux-2.2.5/fs/buffer.c linux/fs/buffer.c
--- linux-2.2.5/fs/buffer.c	Tue Apr 13 15:07:04 1999
+++ linux/fs/buffer.c	Tue Apr 27 21:36:45 1999
@@ -115,6 +115,28 @@
 int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   1*HZ,   1*HZ, 1, 1};
 int bdflush_max[N_PARAM] = {100,5000, 2000, 2000,100, 600*HZ, 600*HZ, 2047, 5};
 
+/* align the next struct */
+__asm__ ("       .align 16\n");
+
+struct totals {
+	unsigned long lookups;
+	unsigned long hits;
+	unsigned long fb_loops;
+	unsigned long ndirty_written;
+	unsigned long bdflush;
+	unsigned long bdflush_wait;
+	unsigned long wait_on_buffer;
+	unsigned long grow_buffers;
+	unsigned long free_buffers;
+	unsigned long forgotten;
+	unsigned long freed_pages;
+	unsigned long breads; 
+	unsigned long breadas;
+	unsigned long readpage;
+	unsigned long brw_page;
+	unsigned long brw_read;
+} buffer_counters = {0, };
+
 void wakeup_bdflush(int);
 
 /*
@@ -131,6 +153,7 @@
 	struct task_struct *tsk = current;
 	struct wait_queue wait;
 
+	buffer_counters.wait_on_buffer++;
 	bh->b_count++;
 	wait.task = tsk;
 	add_wait_queue(&bh->b_wait, &wait);
@@ -220,6 +243,7 @@
 			bh->b_count++;
 			next->b_count++;
 			bh->b_flushtime = 0;
+			buffer_counters.ndirty_written++;
 			ll_rw_block(WRITE, 1, &bh);
 			bh->b_count--;
 			next->b_count--;
@@ -578,17 +602,21 @@
 {		
 	struct buffer_head * next;
 
+	buffer_counters.lookups++;
 	next = hash(dev,block);
 	for (;;) {
 		struct buffer_head *tmp = next;
 		if (!next)
-			break;
+			return next;
 		next = tmp->b_next;
-		if (tmp->b_blocknr != block || tmp->b_size != size || tmp->b_dev != dev)
+		if (tmp->b_blocknr != block || tmp->b_size != size || tmp->b_dev != dev) {
+			buffer_counters.fb_loops++;
 			continue;
+		}
 		next = tmp;
 		break;
 	}
+	buffer_counters.hits++;
 	return next;
 }
 
@@ -725,11 +753,8 @@
 repeat:
 	bh = get_hash_table(dev, block, size);
 	if (bh) {
-		if (!buffer_dirty(bh)) {
-			if (buffer_uptodate(bh))
-				 put_last_lru(bh);
+		if (!buffer_dirty(bh))
 			bh->b_flushtime = 0;
-		}
 		return bh;
 	}
 
@@ -853,6 +878,10 @@
 		__brelse(buf);
 		return;
 	}
+
+	buffer_counters.forgotten++;
+
+	buf->b_state = 0;
 	buf->b_count = 0;
 	remove_from_queues(buf);
 	put_last_free(buf);
@@ -866,6 +895,7 @@
 {
 	struct buffer_head * bh;
 
+	buffer_counters.breads++;
 	bh = getblk(dev, block, size);
 	touch_buffer(bh);
 	if (buffer_uptodate(bh))
@@ -901,6 +931,7 @@
 	if (block < 0)
 		return NULL;
 
+	buffer_counters.breadas++;
 	bh = getblk(dev, block, bufsize);
 	index = BUFSIZE_INDEX(bh->b_size);
 
@@ -1243,6 +1274,10 @@
 	struct buffer_head *bh, *prev, *next, *arr[MAX_BUF_PER_PAGE];
 	int block, nr;
 
+	buffer_counters.brw_page++;
+	if (rw == READ)
+		buffer_counters.brw_read++;
+
 	if (!PageLocked(page))
 		panic("brw_page: page not locked for I/O");
 	clear_bit(PG_uptodate, &page->flags);
@@ -1359,6 +1394,7 @@
 	int *p, nr[PAGE_SIZE/512];
 	int i;
 
+	buffer_counters.readpage++;
 	atomic_inc(&page->count);
 	set_bit(PG_locked, &page->flags);
 	set_bit(PG_free_after, &page->flags);
@@ -1396,6 +1432,7 @@
 
 	if (!(page = __get_free_page(GFP_BUFFER)))
 		return 0;
+	buffer_counters.grow_buffers++;
 	bh = create_buffers(page, size, 0);
 	if (!bh) {
 		free_page(page);
@@ -1459,6 +1496,7 @@
 		return 0;
 	} while (tmp != bh);
 
+	buffer_counters.free_buffers++;
 	tmp = bh;
 	do {
 		struct buffer_head * p = tmp;
@@ -1472,6 +1510,7 @@
 	wake_up(&buffer_wait);
 
 	/* And free the page */
+	buffer_counters.freed_pages++;
 	buffermem -= PAGE_SIZE;
 	page_map->buffers = NULL;
 	__free_page(page_map);
@@ -1480,12 +1519,15 @@
 
 /* ================== Debugging =================== */
 
+#define HISTOGRAM_MAX 16
+
 void show_buffers(void)
 {
 	struct buffer_head * bh;
-	int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
-	int protected = 0;
-	int nlist;
+	unsigned found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
+	unsigned protected = 0;
+	unsigned nlist, nsize, max, maxsum, sofar;
+	unsigned hist[HISTOGRAM_MAX+1];
 	static char *buf_types[NR_LIST] = {"CLEAN","LOCKED","DIRTY"};
 
 	printk("Buffer memory:   %6dkB\n",buffermem>>10);
@@ -1493,6 +1535,74 @@
 	printk("Buffer blocks:   %6d\n",nr_buffers);
 	printk("Buffer hashed:   %6d\n",nr_hashed_buffers);
 
+	printk("Buffers flushed: %lu  forgotten: %lu\n",
+		buffer_counters.ndirty_written, buffer_counters.forgotten);
+	printk("bdflush() runs: %lu  times someone waited for it: %lu\n",
+		buffer_counters.bdflush, buffer_counters.bdflush_wait);
+	printk("total waits for a buffer: %lu\n",
+		buffer_counters.wait_on_buffer);
+	printk("requests to grow/shrink buffer cache: %lu/%lu\n",
+		buffer_counters.grow_buffers, buffer_counters.free_buffers);
+	printk("pages freed during shrink requests: %lu\n",
+		buffer_counters.freed_pages);
+
+	printk("bread() calls: %lu  breada() calls: %lu\n",
+	buffer_counters.breads, buffer_counters.breadas);
+	printk("generic_readpage calls: %lu\n",
+		buffer_counters.readpage);
+	printk("brw_page() calls: %lu  read/write: %lu/%lu\n",
+		buffer_counters.brw_page, buffer_counters.brw_read,
+		(buffer_counters.brw_page - buffer_counters.brw_read));
+
+	for (nlist=0; nlist <= HISTOGRAM_MAX; nlist++)
+	  hist[nlist] = 0;
+	found = 0;
+	max = 0;
+	maxsum = 0;
+	for(nlist = 0; nlist <= bh_hash_mask; nlist++) {
+	  bh = hash_table[nlist];
+	  if (!bh) {
+		hist[0]++;
+		continue;
+	  }
+
+	  used = 0;
+	  do {
+		found++;
+		used++;
+		bh = bh->b_next;
+	  } while (bh);
+	  if (max < used) max = used;
+	  if (used < HISTOGRAM_MAX)
+		hist[used]++;
+	  else {
+		maxsum += used;
+		hist[HISTOGRAM_MAX]++;
+	  }
+	}
+	printk("================================================\n");
+	printk("Buffer cache total lookups: %lu  (hit rate: %d%%)\n",
+		buffer_counters.lookups,
+		(unsigned) ((buffer_counters.hits * 100UL) /
+				buffer_counters.lookups));
+	printk(" hash table size is %lu buckets\n", bh_hash_mask + 1);
+	printk(" hash table contains %d objects\n", found);
+	printk(" largest bucket contains %d buffers\n", max);
+	printk(" find_buffer() iterations/lookup: %lu/1000\n",
+		buffer_counters.fb_loops / (buffer_counters.lookups / 1000UL));
+	printk(" hash table histogram:\n");
+	sofar = 0;
+	printk("  size  buckets  buffers  sum-pct\n");
+	for (nlist = 0; nlist < HISTOGRAM_MAX; nlist++) {
+	  sofar += (nlist * hist[nlist]);
+	  printk("   %2d   %6d   %6d     %3d\n",
+			nlist, hist[nlist], nlist * hist[nlist],
+			(sofar * 100) / found);
+	}
+	printk("  >%2d   %6d   %6d     %3d\n",
+			HISTOGRAM_MAX-1, hist[HISTOGRAM_MAX], maxsum,
+			((sofar + maxsum) * 100) / found);
+
 	for(nlist = 0; nlist < NR_LIST; nlist++) {
 	  found = locked = dirty = used = lastused = protected = 0;
 	  bh = lru_list[nlist];
@@ -1514,7 +1624,19 @@
 		 "%d locked, %d protected, %d dirty\n",
 		 buf_types[nlist], found, used, lastused,
 		 locked, protected, dirty);
-	};
+	}
+
+	for(nsize = 0; nsize < NR_SIZES; nsize++) {
+	  found = 0;
+	  bh = free_list[nsize];
+	  if(!bh) continue;
+
+	  do {
+		found++;
+		bh = bh->b_next_free;
+	  } while (bh != free_list[nsize]);
+	  printk("    FREE: %d %d-byte buffers\n", found, (nsize+1)<<9);
+	}
 }
 
 
@@ -1576,6 +1698,7 @@
 {
 	if (current == bdflush_tsk)
 		return;
+	if (wait) buffer_counters.bdflush_wait++;
 	wake_up(&bdflush_wait);
 	if (wait) {
 		run_task_queue(&tq_disk);
@@ -1649,6 +1772,7 @@
 #ifdef DEBUG
 				 if(nlist != BUF_DIRTY) ncount++;
 #endif
+				 buffer_counters.ndirty_written++;
 				 ll_rw_block(WRITE, 1, &bh);
 				 bh->b_count--;
 				 next->b_count--;
@@ -1753,6 +1877,7 @@
 #ifdef DEBUG
 		printk("bdflush() activated...");
 #endif
+		buffer_counters.bdflush++;
 
 		CHECK_EMERGENCY_SYNC
 
@@ -1798,6 +1923,7 @@
 					  next->b_count++;
 					  bh->b_count++;
 					  ndirty++;
+					  buffer_counters.ndirty_written++;
 					  bh->b_flushtime = 0;
 					  if (major == LOOP_MAJOR) {
 						  ll_rw_block(wrta_cmd,1, &bh);
diff -ruN linux-2.2.5/fs/dcache.c linux/fs/dcache.c
--- linux-2.2.5/fs/dcache.c	Tue Apr 13 15:06:51 1999
+++ linux/fs/dcache.c	Tue Apr 27 21:27:33 1999
@@ -33,6 +33,11 @@
 
 kmem_cache_t *dentry_cache; 
 
+unsigned long dentry_lookups = 0;
+unsigned long dentry_loops = 0;
+unsigned long dentry_hits = 0;
+unsigned long dentry_pruneone = 0;
+
 /*
  * This is the single most critical data structure when it comes
  * to the dcache: the hashtable for lookups. Somebody should try
@@ -269,6 +274,7 @@
 {
 	struct dentry * parent;
 
+	dentry_pruneone++;
 	list_del(&dentry->d_hash);
 	list_del(&dentry->d_child);
 	dentry_iput(dentry);
@@ -472,7 +478,7 @@
 void shrink_dcache_memory(int priority, unsigned int gfp_mask)
 {
 	if (gfp_mask & __GFP_IO)
-		prune_dcache(0);
+		prune_dcache(dentry_stat.nr_unused / (priority+1));
 }
 
 #define NAME_ALLOC_LEN(len)	((len+16) & ~15)
@@ -564,6 +570,59 @@
 	return dentry_hashtable + (hash & D_HASHMASK);
 }
 
+#define HIST_SIZE 16
+
+void show_dentry_cache(void)
+{
+	unsigned count, index, oversized, max, total, sofar;
+	unsigned long bucket;
+	unsigned hist[HIST_SIZE+1];
+	struct list_head * l, * h;
+
+	max = total = oversized = 0;
+	for (index=0; index<=HIST_SIZE; index++) hist[index] = 0;
+	for (bucket=0; bucket<D_HASHSIZE; bucket++) {
+		count = 0;
+		h = dentry_hashtable + bucket;
+		l = h;
+		for (;;) {
+			l = l->next;
+			if (l == h) break;
+			count++;
+			total++;
+		}
+		if (count > max)
+			max = count;
+		if (count >= HIST_SIZE) {
+			oversized += count;
+			count = HIST_SIZE;
+		}
+		hist[count]++;
+	}
+	printk("Dentry prune operations: %lu\n", dentry_pruneone);
+	printk("================================================\n");
+	printk("Dentry cache total lookups: %lu  (hit rate: %d%%)\n",
+		dentry_lookups,
+		(unsigned) (dentry_hits / (dentry_lookups / 100UL)));
+	printk(" hash table size is %ld buckets\n", D_HASHSIZE);
+	printk(" hash table contains %d objects\n", total);
+	printk(" largest bucket contains %d dentries\n", max);
+	printk(" d_lookup() iterations/lookup: %lu/1000\n",
+		dentry_loops / (dentry_lookups / 1000UL));
+        printk(" hash table histogram:\n");
+        sofar = 0;
+        printk("  size  buckets    dentries sum-pct\n");
+        for (index=0; index<HIST_SIZE; index++) {
+                sofar += index * hist[index];
+                printk("   %2d   %6d   %6d      %3d\n", 
+                        index, hist[index], index * hist[index],
+                          (sofar * 100) / total); 
+        }
+        printk("  >%2d   %6d   %6d      %3d\n", 
+                index-1, hist[index], oversized,
+                  ((sofar + oversized) * 100) / total);
+}
+
 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
 {
 	unsigned int len = name->len;
@@ -571,9 +630,14 @@
 	const unsigned char *str = name->name;
 	struct list_head *head = d_hash(parent,hash);
 	struct list_head *tmp = head->next;
+	struct dentry * dentry;
 
+	dentry_lookups++;
+	goto repeat;
 	for (;;) {
-		struct dentry * dentry = list_entry(tmp, struct dentry, d_hash);
+		dentry_loops++;
+repeat:
+		dentry = list_entry(tmp, struct dentry, d_hash);
 		if (tmp == head)
 			break;
 		tmp = tmp->next;
@@ -590,6 +654,7 @@
 			if (memcmp(dentry->d_name.name, str, len))
 				continue;
 		}
+		dentry_hits++;
 		return dget(dentry);
 	}
 	return NULL;
diff -ruN linux-2.2.5/fs/inode.c linux/fs/inode.c
--- linux-2.2.5/fs/inode.c	Tue Apr 13 15:06:52 1999
+++ linux/fs/inode.c	Tue Apr 27 21:27:09 1999
@@ -484,6 +484,62 @@
 	return NULL;
 }
 
+unsigned long inode_lookups = 0;
+unsigned long inode_loops = 0;
+unsigned long inode_hits = 0;
+
+#define HIST_SIZE 16
+
+void show_inode_cache(void)
+{
+	unsigned count, index, oversized, max, total, sofar;
+	unsigned long bucket;
+	unsigned hist[HIST_SIZE+1];
+	struct list_head * l, * h;
+
+	max = total = oversized = 0;
+	for (index=0; index<=HIST_SIZE; index++) hist[index] = 0;
+	for (bucket=0; bucket<HASH_SIZE; bucket++) {
+		count = 0;
+		h = inode_hashtable + bucket;
+		l = h;
+		for (;;) {
+			l = l->next;
+			if (l == h) break;
+			count++;
+			total++;
+		}
+		if (count > max)
+			max = count;
+		if (count >= HIST_SIZE) {
+			oversized += count;
+			count = HIST_SIZE;
+		}
+		hist[count]++;
+	}
+	printk("================================================\n");
+	printk("Inode cache total lookups: %lu  (hit rate: %d%%)\n",
+		inode_lookups,
+		(unsigned) (inode_hits / (inode_lookups / 100UL)));
+	printk(" hash table size is %ld buckets\n", HASH_SIZE);
+	printk(" hash table contains %d objects\n", total);
+	printk(" largest bucket contains %d inodes\n", max);
+	printk(" find_inode() iterations/lookup: %lu/1000\n",
+		inode_loops / (inode_lookups / 1000UL));
+        printk(" hash table histogram:\n");
+        sofar = 0;
+        printk("  size  buckets    inodes sum-pct\n");
+        for (index=0; index<HIST_SIZE; index++) {
+                sofar += index * hist[index];
+                printk("   %2d   %6d   %6d     %3d\n", 
+                        index, hist[index], index * hist[index],
+                          (sofar * 100) / total); 
+        }
+        printk("  >%2d   %6d   %6d     %3d\n", 
+                index-1, hist[index], oversized,
+                  ((sofar + oversized) * 100) / total);
+}
+
 /*
  * Called with the inode lock held.
  */
@@ -492,20 +548,21 @@
 	struct list_head *tmp;
 	struct inode * inode;
 
+	inode_lookups++;
 	tmp = head;
 	for (;;) {
 		tmp = tmp->next;
-		inode = NULL;
 		if (tmp == head)
-			break;
+			return NULL;
 		inode = list_entry(tmp, struct inode, i_hash);
-		if (inode->i_sb != sb)
-			continue;
-		if (inode->i_ino != ino)
+		if ((inode->i_sb != sb) || (inode->i_ino != ino)) {
+			inode_loops++;
 			continue;
+		}
 		inode->i_count++;
 		break;
 	}
+	inode_hits++;
 	return inode;
 }
 
diff -ruN linux-2.2.5/include/linux/pagemap.h linux/include/linux/pagemap.h
--- linux-2.2.5/include/linux/pagemap.h	Tue Apr 13 16:55:30 1999
+++ linux/include/linux/pagemap.h	Tue Apr 27 21:26:05 1999
@@ -24,6 +24,11 @@
 
 extern unsigned long page_cache_size; /* # of pages currently in the hash table */
 extern struct page * page_hash_table[PAGE_HASH_SIZE];
+extern unsigned long page_cache_lookups;
+extern unsigned long page_cache_loops;
+extern unsigned long page_cache_hits;
+
+extern void show_page_hash(void);
 
 /*
  * We use a power-of-two hash table to avoid a modulus,
@@ -46,9 +51,11 @@
 
 static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page)
 {
+	page_cache_lookups++;
 	goto inside;
 	for (;;) {
 		page = page->next_hash;
+		page_cache_loops++;
 inside:
 		if (!page)
 			goto not_found;
@@ -58,6 +65,7 @@
 			break;
 	}
 	/* Found the page. */
+	page_cache_hits++;
 	atomic_inc(&page->count);
 	set_bit(PG_referenced, &page->flags);
 not_found:
diff -ruN linux-2.2.5/mm/filemap.c linux/mm/filemap.c
--- linux-2.2.5/mm/filemap.c	Tue Apr 13 15:06:55 1999
+++ linux/mm/filemap.c	Sat Apr 10 00:05:21 1999
@@ -33,6 +33,62 @@
 
 unsigned long page_cache_size = 0;
 struct page * page_hash_table[PAGE_HASH_SIZE];
+unsigned long page_cache_lookups = 0;
+unsigned long page_cache_loops = 0;
+unsigned long page_cache_hits = 0;
+
+#define HIST_SIZE 16
+
+void show_page_hash(void)
+{
+	unsigned bucket, index, count, oversized, max, total, sofar;
+	unsigned hist[HIST_SIZE+1];
+	struct page * p;
+
+	max = 0;
+	total = 0;
+	oversized = 0;
+	for (index=0; index<=HIST_SIZE; index++) hist[index] = 0;
+	for (bucket=0; bucket<PAGE_HASH_SIZE; bucket++) {
+		count = 0;
+		p = page_hash_table[bucket];
+		while (p) {
+			count++;
+			total++;
+			p = p->next_hash;
+		}
+		if (count > max)
+			max = count;
+		if (count >= HIST_SIZE) {
+			oversized += count;
+			count = HIST_SIZE;
+		}
+		hist[count]++;
+	}
+
+	printk("================================================\n");
+	printk("Page cache total lookups: %lu  (hit rate: %d%%)\n",
+		page_cache_lookups,
+		(unsigned) (page_cache_hits/ (page_cache_lookups / 100UL)));
+	printk(" hash table size is %d buckets\n", PAGE_HASH_SIZE);
+	printk(" hash table contains %d objects\n", total);
+	printk(" largest bucket contains %d pages\n", max);
+	printk(" find_page() iterations/lookup: %lu/1000\n",
+		(unsigned long) (page_cache_loops /
+			(page_cache_lookups / 1000UL)));
+	printk(" hash table histogram:\n");
+	sofar = 0;
+	printk("  size  buckets    pages  sum-pct\n");
+	for (index=0; index<HIST_SIZE; index++) {
+		sofar += index * hist[index];
+		printk("   %2d   %6d   %6d     %3d\n",
+			index, hist[index], index * hist[index],
+			  (sofar * 100) / total);
+	}
+	printk("  >%2d   %6d   %6d     %3d\n",
+		index-1, hist[index], oversized,
+		  ((sofar + oversized) * 100) / total);
+}
 
 /*
  * Simple routines for both non-shared and shared mappings.
diff -ruN linux-2.2.5/mm/vmscan.c linux/mm/vmscan.c
--- linux-2.2.5/mm/vmscan.c	Tue Apr 13 15:06:18 1999
+++ linux/mm/vmscan.c	Tue Apr 27 21:38:26 1999
@@ -406,8 +406,8 @@
 			if (!--count)
 				goto done;
 		}
-
 		shrink_dcache_memory(priority, gfp_mask);
+
 	} while (--priority >= 0);
 done:
 	unlock_kernel();
