diff -pruN Linux-2.4.4/fs/nfs/file.c linux/fs/nfs/file.c
--- Linux-2.4.4/fs/nfs/file.c	Fri Jul 20 10:29:24 2001
+++ linux/fs/nfs/file.c	Tue Jul 24 17:26:12 2001
@@ -133,13 +133,11 @@ nfs_fsync(struct file *file, struct dent
 
 	dfprintk(VFS, "nfs: fsync(%x/%ld)\n", inode->i_dev, inode->i_ino);
 
-	lock_kernel();
 	status = nfs_wb_file(inode, file);
 	if (!status) {
 		status = file->f_error;
 		file->f_error = 0;
 	}
-	unlock_kernel();
 	return status;
 }
 
@@ -157,6 +155,7 @@ static int nfs_prepare_write(struct file
 	kmap(page);
 	return nfs_flush_incompatible(file, page);
 }
+
 static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
 {
 	long status;
@@ -164,9 +163,7 @@ static int nfs_commit_write(struct file 
 	struct inode *inode = page->mapping->host;
 
 	kunmap(page);
-	lock_kernel();
 	status = nfs_updatepage(file, page, offset, to-offset);
-	unlock_kernel();
 	/* most likely it's already done. CHECKME */
 	if (pos > inode->i_size)
 		inode->i_size = pos;
diff -pruN Linux-2.4.4/fs/nfs/flushd.c linux/fs/nfs/flushd.c
--- Linux-2.4.4/fs/nfs/flushd.c	Fri Jul 20 10:29:36 2001
+++ linux/fs/nfs/flushd.c	Tue Jul 24 17:48:59 2001
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
+#include <linux/list.h>
 
 #include <linux/sched.h>
 
@@ -53,12 +54,20 @@
 static struct rpc_wait_queue    flushd_queue = RPC_INIT_WAITQ("nfs_flushd");
 
 /*
+ * Spinlock taken when updating the flusher's inode list; it also
+ * protects the inode's NFS_INO_FLUSH bit and nfs_i.flush field.
+ * NB: the BKL is required only for calls to the RPC layer.
+ */
+static spinlock_t nfs_flushd_lock = SPIN_LOCK_UNLOCKED;
+
+/*
  * Local function declarations.
  */
 static void	nfs_flushd(struct rpc_task *);
 static void	nfs_flushd_exit(struct rpc_task *);
+static int	nfs_reqlist_init(struct nfs_server *server);
 
-
+static
 int nfs_reqlist_init(struct nfs_server *server)
 {
 	struct nfs_reqlist	*cache;
@@ -104,24 +113,23 @@ void nfs_reqlist_exit(struct nfs_server 
 {
 	struct nfs_reqlist      *cache;
 
-	lock_kernel();
 	cache = server->rw_requests;
 	if (!cache)
-		goto out;
+		return;
 
 	dprintk("NFS: reqlist_exit (ptr %p rpc %p)\n", cache, cache->task);
 
-	while (cache->task || cache->inodes) {
+	while (cache->task || !list_empty(&cache->inodes)) {
 		if (!cache->task) {
 			nfs_reqlist_init(server);
 		} else {
 			cache->task->tk_status = -ENOMEM;
+			lock_kernel();
 			rpc_wake_up_task(cache->task);
+			unlock_kernel();
 		}
 		interruptible_sleep_on_timeout(&cache->request_wait, 1 * HZ);
 	}
- out:
-	unlock_kernel();
 }
 
 int nfs_reqlist_alloc(struct nfs_server *server)
@@ -137,6 +145,7 @@ int nfs_reqlist_alloc(struct nfs_server 
 	memset(cache, 0, sizeof(*cache));
 	atomic_set(&cache->nr_requests, 0);
 	init_waitqueue_head(&cache->request_wait);
+	INIT_LIST_HEAD(&cache->inodes);
 	server->rw_requests = cache;
 
 	return 0;
@@ -150,59 +159,63 @@ void nfs_reqlist_free(struct nfs_server 
 	}
 }
 
-void nfs_wake_flushd()
-{
-	rpc_wake_up_status(&flushd_queue, -ENOMEM);
-}
-
+/*
+ * Push this inode onto the back of the flush list
+ * for its file system.
+ */
 static void inode_append_flushd(struct inode *inode)
 {
 	struct nfs_reqlist	*cache = NFS_REQUESTLIST(inode);
-	struct inode		**q;
 
+	spin_lock(&nfs_flushd_lock);
 	if (NFS_FLAGS(inode) & NFS_INO_FLUSH)
 		goto out;
-	inode->u.nfs_i.hash_next = NULL;
 
-	q = &cache->inodes;
-	while (*q)
-		q = &(*q)->u.nfs_i.hash_next;
-	*q = inode;
+	NFS_FLAGS(inode) |= NFS_INO_FLUSH;
+	list_add_tail(&inode->u.nfs_i.flush, &cache->inodes);
 
-	/* Note: we increase the inode i_count in order to prevent
+	/*
+	 * Note: we increase the inode i_count in order to prevent
 	 *	 it from disappearing when on the flush list
 	 */
-	NFS_FLAGS(inode) |= NFS_INO_FLUSH;
 	atomic_inc(&inode->i_count);
- out:
+
+out:
+	spin_unlock(&nfs_flushd_lock);
 }
 
-/* Protect me using the BKL */
+/*
+ * Remove an inode from its file system's flush list.
+ */
 void inode_remove_flushd(struct inode *inode)
 {
-	struct nfs_reqlist	*cache = NFS_REQUESTLIST(inode);
-	struct inode		**q;
-
-	if (!(NFS_FLAGS(inode) & NFS_INO_FLUSH))
+	spin_lock(&nfs_flushd_lock);
+	if (!(NFS_FLAGS(inode) & NFS_INO_FLUSH)) {
+		spin_unlock(&nfs_flushd_lock);
 		return;
-
-	q = &cache->inodes;
-	while (*q && *q != inode)
-		q = &(*q)->u.nfs_i.hash_next;
-	if (*q) {
-		*q = inode->u.nfs_i.hash_next;
-		NFS_FLAGS(inode) &= ~NFS_INO_FLUSH;
-		iput(inode);
 	}
+
+	NFS_FLAGS(inode) &= ~NFS_INO_FLUSH;
+	list_del_init(&inode->u.nfs_i.flush);
+	spin_unlock(&nfs_flushd_lock);
+
+	/*
+	 * iput can call back into NFS client, (e.g. nfs_dentry_iput),
+	 * so we release the spin lock before calling iput.
+	 */
+	iput(inode);
 }
 
+/*
+ * Push this inode onto its file system's flush list, and
+ * schedule the flusher to start soon.
+ */
 void inode_schedule_scan(struct inode *inode, unsigned long time)
 {
 	struct nfs_reqlist	*cache = NFS_REQUESTLIST(inode);
 	struct rpc_task		*task;
 	unsigned long		mintimeout;
 
-	lock_kernel();
 	if (time_after(NFS_NEXTSCAN(inode), time))
 		NFS_NEXTSCAN(inode) = time;
 	mintimeout = jiffies + 1 * HZ;
@@ -214,53 +227,79 @@ void inode_schedule_scan(struct inode *i
 	if (!task) {
 		nfs_reqlist_init(NFS_SERVER(inode));
 	} else {
-		if (time_after(cache->runat, mintimeout))
+		if (time_after(cache->runat, mintimeout)) {
+			lock_kernel();
 			rpc_wake_up_task(task);
+			unlock_kernel();
+		}
 	}
-	unlock_kernel();
 }
 
+static void
+flush_one_inode(struct inode *inode, int flush)
+{
+	if (flush) {
+		nfs_pagein_inode(inode, 0, 0);
+		nfs_sync_file(inode, NULL, 0, 0, FLUSH_AGING);
+	} else if (time_after(jiffies, NFS_NEXTSCAN(inode))) {
+		NFS_NEXTSCAN(inode) = jiffies + NFS_WRITEBACK_LOCKDELAY;
+		nfs_pagein_timeout(inode);
+		nfs_flush_timeout(inode, FLUSH_AGING);
+#ifdef CONFIG_NFS_V3
+		nfs_commit_timeout(inode, FLUSH_AGING);
+#endif
+	}
+}
 
 static void
 nfs_flushd(struct rpc_task *task)
 {
 	struct nfs_server	*server;
 	struct nfs_reqlist	*cache;
-	struct inode		*inode, *next;
+	struct list_head	head, *next;
 	unsigned long		delay = jiffies + NFS_WRITEBACK_LOCKDELAY;
 	int			flush = (task->tk_status == -ENOMEM);
 
+	unlock_kernel();
+
         dprintk("NFS: %4d flushd starting\n", task->tk_pid);
 	server = (struct nfs_server *) task->tk_calldata;
         cache = server->rw_requests;
 
-	next = cache->inodes;
-	cache->inodes = NULL;
+	/*
+	 * While we hold the flushd lock, transfer the whole list over
+	 * to a local list head, then make the cache's list empty.
+	 * This allows writers to continue safely adding dirty inodes
+	 * to the cache's flush list.
+	 */
+	spin_lock(&nfs_flushd_lock);
+	list_add(&head, &cache->inodes);
+	list_del_init(&cache->inodes);
+
+	next = head.next;
+	while (next != &head) {
+		struct inode *inode =
+			list_entry(next, struct inode, u.nfs_i.flush);
 
-	while ((inode = next) != NULL) {
-		next = next->u.nfs_i.hash_next;
-		inode->u.nfs_i.hash_next = NULL;
+		next = next->next;
+		list_del_init(&inode->u.nfs_i.flush);
 		NFS_FLAGS(inode) &= ~NFS_INO_FLUSH;
+		spin_unlock(&nfs_flushd_lock);
 
-		if (flush) {
-			nfs_pagein_inode(inode, 0, 0);
-			nfs_sync_file(inode, NULL, 0, 0, FLUSH_AGING);
-		} else if (time_after(jiffies, NFS_NEXTSCAN(inode))) {
-			NFS_NEXTSCAN(inode) = jiffies + NFS_WRITEBACK_LOCKDELAY;
-			nfs_pagein_timeout(inode);
-			nfs_flush_timeout(inode, FLUSH_AGING);
-#ifdef CONFIG_NFS_V3
-			nfs_commit_timeout(inode, FLUSH_AGING);
-#endif
-		}
+		flush_one_inode(inode, flush);
 
 		if (nfs_have_writebacks(inode) || nfs_have_read(inode)) {
 			inode_append_flushd(inode);
 			if (time_after(delay, NFS_NEXTSCAN(inode)))
 				delay = NFS_NEXTSCAN(inode);
 		}
+
+		/* inode_append_flushd bumps i_count; we decr it here */
 		iput(inode);
+
+		spin_lock(&nfs_flushd_lock);
 	}
+	spin_unlock(&nfs_flushd_lock);
 
 	dprintk("NFS: %4d flushd back to sleep\n", task->tk_pid);
 	if (time_after(jiffies + 1 * HZ, delay))
@@ -272,7 +311,9 @@ nfs_flushd(struct rpc_task *task)
 	task->tk_timeout = delay;
 	cache->runat = jiffies + task->tk_timeout;
 
-	if (!atomic_read(&cache->nr_requests) && !cache->inodes) {
+	lock_kernel();
+
+	if (!atomic_read(&cache->nr_requests) && list_empty(&cache->inodes)) {
 		cache->task = NULL;
 		task->tk_action = NULL;
 	} else
diff -pruN Linux-2.4.4/fs/nfs/inode.c linux/fs/nfs/inode.c
--- Linux-2.4.4/fs/nfs/inode.c	Fri Jul 20 10:29:36 2001
+++ linux/fs/nfs/inode.c	Tue Jul 24 17:42:15 2001
@@ -106,6 +106,7 @@ nfs_read_inode(struct inode * inode)
 	INIT_LIST_HEAD(&inode->u.nfs_i.dirty);
 	INIT_LIST_HEAD(&inode->u.nfs_i.commit);
 	INIT_LIST_HEAD(&inode->u.nfs_i.writeback);
+	INIT_LIST_HEAD(&inode->u.nfs_i.flush);
 	inode->u.nfs_i.nread = 0;
 	inode->u.nfs_i.ndirty = 0;
 	inode->u.nfs_i.ncommit = 0;
diff -pruN Linux-2.4.4/fs/nfs/write.c linux/fs/nfs/write.c
--- Linux-2.4.4/fs/nfs/write.c	Wed Jul 25 15:33:20 2001
+++ linux/fs/nfs/write.c	Wed Jul 25 15:02:08 2001
@@ -113,6 +113,7 @@ static __inline__ struct nfs_page *nfs_p
 	if (p) {
 		memset(p, 0, sizeof(*p));
 		INIT_LIST_HEAD(&p->wb_hash);
+		INIT_LIST_HEAD(&p->wb_cache);
 		INIT_LIST_HEAD(&p->wb_list);
 		init_waitqueue_head(&p->wb_wait);
 	}
@@ -196,8 +197,10 @@ nfs_writepage_sync(struct file *file, st
 		if (count < wsize && !IS_SWAPFILE(inode))
 			wsize = count;
 
+		lock_kernel();
 		result = NFS_PROTO(inode)->write(inode, cred, &fattr, flags,
 						 base, wsize, buffer, &verf);
+		unlock_kernel();
 		nfs_write_attributes(inode, &fattr);
 
 		if (result < 0) {
@@ -280,7 +283,6 @@ nfs_writepage(struct page *page)
 	if (page->index >= end_index+1 || !offset)
 		goto out;
 do_it:
-	lock_kernel();
 	if (NFS_SERVER(inode)->rsize >= PAGE_CACHE_SIZE) {
 		err = nfs_writepage_async(NULL, inode, page, 0, offset);
 		if (err >= 0)
@@ -290,7 +292,6 @@ do_it:
 		if (err == offset)
 			err = 0;
 	}
-	unlock_kernel();
 out:
 	UnlockPage(page);
 	return err; 
@@ -324,11 +325,91 @@ region_locked(struct inode *inode, struc
 }
 
 /*
+ * Unfortunately there is no clean way to acquire the system's physical
+ * page count here, since this is called by *module* initialization, not
+ * by system initialization.  System page count should be available via
+ * a nice call interface, don't you think?
+ */
+extern unsigned long num_physpages;
+
+static unsigned long nfs_wbreq_shift, nfs_wbreq_mask;
+static struct list_head *nfs_wbreq_hash;
+
+static int
+nfs_init_wbcache(void)
+{
+	unsigned i;
+	unsigned int nr_hash;
+	unsigned long order;
+	unsigned long mempages = num_physpages;
+	struct list_head *d;
+
+	nfs_page_cachep = kmem_cache_create("nfs_page",
+					    sizeof(struct nfs_page),
+					    0, SLAB_HWCACHE_ALIGN,
+					    NULL, NULL);
+	if (nfs_page_cachep == NULL)
+		return -ENOMEM;
+
+	mempages >>= (16 - PAGE_SHIFT);
+	mempages *= sizeof(struct list_head);
+	for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
+		;
+
+	do {
+		unsigned long tmp;
+
+		nr_hash = (1UL << order) * PAGE_SIZE /
+			sizeof(struct list_head);
+		nfs_wbreq_mask = (nr_hash - 1);
+
+		tmp = nr_hash;
+		nfs_wbreq_shift = 0;
+		while ((tmp >>= 1UL) != 0UL)
+			nfs_wbreq_shift++;
+
+		nfs_wbreq_hash = (struct list_head *)
+			__get_free_pages(GFP_ATOMIC, order);
+	} while (nfs_wbreq_hash == NULL && --order >= 0);
+
+	printk(KERN_INFO
+		"NFS writeback hash table entries: %d (order: %ld, %ld bytes)\n",
+			nr_hash, order, (PAGE_SIZE << order));
+
+	if (!nfs_wbreq_hash) {
+		printk(KERN_ERR __FUNCTION__
+			": failed to allocate NFS writeback hash table.\n");
+		return -ENOMEM;
+	}
+
+	d = nfs_wbreq_hash;
+	i = nr_hash;
+	do {
+		INIT_LIST_HEAD(d);
+		d++;
+		i--;
+	} while (i);
+
+	return 0;
+}
+
+static __inline__ struct list_head *nfs_wb_hashfn(struct inode *inode,
+	struct page *page)
+{
+	unsigned long index = (unsigned long) inode / L1_CACHE_BYTES;
+	index += (unsigned long) page / L1_CACHE_BYTES;
+	index += index >> nfs_wbreq_shift;
+	return nfs_wbreq_hash + (index & nfs_wbreq_mask);
+}
+
+/*
  * Insert a write request into an inode
  */
 static inline void
 nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
 {
+	struct list_head *head;
+
 	if (!list_empty(&req->wb_hash))
 		return;
 	if (!NFS_WBACK_BUSY(req))
@@ -337,11 +418,13 @@ nfs_inode_add_request(struct inode *inod
 		atomic_inc(&inode->i_count);
 	inode->u.nfs_i.npages++;
 	list_add(&req->wb_hash, &inode->u.nfs_i.writeback);
+	head = nfs_wb_hashfn(inode, req->wb_page);
+	list_add(&req->wb_cache, head);
 	req->wb_count++;
 }
 
 /*
- * Insert a write request into an inode
+ * Remove a write request from an inode
  */
 static inline void
 nfs_inode_remove_request(struct nfs_page *req)
@@ -355,8 +438,8 @@ nfs_inode_remove_request(struct nfs_page
 	if (!NFS_WBACK_BUSY(req))
 		printk(KERN_ERR "NFS: unlocked request attempted unhashed!\n");
 	inode = req->wb_inode;
-	list_del(&req->wb_hash);
-	INIT_LIST_HEAD(&req->wb_hash);
+	list_del_init(&req->wb_hash);
+	list_del_init(&req->wb_cache);
 	inode->u.nfs_i.npages--;
 	if ((inode->u.nfs_i.npages == 0) != list_empty(&inode->u.nfs_i.writeback))
 		printk(KERN_ERR "NFS: desynchronized value of nfs_i.npages.\n");
@@ -376,10 +459,10 @@ _nfs_find_request(struct inode *inode, s
 {
 	struct list_head	*head, *next;
 
-	head = &inode->u.nfs_i.writeback;
+	head = nfs_wb_hashfn(inode, page);
 	next = head->next;
 	while (next != head) {
-		struct nfs_page *req = nfs_inode_wb_entry(next);
+		struct nfs_page *req = list_entry(next, struct nfs_page, wb_cache);
 		next = next->next;
 		if (page_index(req->wb_page) != page_index(page))
 			continue;
@@ -425,7 +508,7 @@ void nfs_list_add_request(struct nfs_pag
 }
 
 /*
- * Insert a write request into an inode
+ * Remove a write request from a sorted list
  */
 void nfs_list_remove_request(struct nfs_page *req)
 {
@@ -504,45 +587,9 @@ struct nfs_page *nfs_create_request(stru
 				    unsigned int offset, unsigned int count)
 {
 	struct nfs_reqlist	*cache = NFS_REQUESTLIST(inode);
-	struct nfs_page		*req = NULL;
-	long			timeout;
-
-	/* Deal with hard/soft limits.
-	 */
-	do {
-		/* If we're over the global soft limit, wake up all requests */
-		if (atomic_read(&nfs_nr_requests) >= MAX_REQUEST_SOFT) {
-			dprintk("NFS:      hit soft limit (%d requests)\n",
-				atomic_read(&nfs_nr_requests));
-			if (!cache->task)
-				nfs_reqlist_init(NFS_SERVER(inode));
-			nfs_wake_flushd();
-		}
-
-		/* If we haven't reached the local hard limit yet,
-		 * try to allocate the request struct */
-		if (atomic_read(&cache->nr_requests) < MAX_REQUEST_HARD) {
-			req = nfs_page_alloc();
-			if (req != NULL)
-				break;
-		}
-
-		/* We're over the hard limit. Wait for better times */
-		dprintk("NFS:      create_request sleeping (total %d pid %d)\n",
-			atomic_read(&cache->nr_requests), current->pid);
-
-		timeout = 1 * HZ;
-		if (NFS_SERVER(inode)->flags & NFS_MOUNT_INTR) {
-			interruptible_sleep_on_timeout(&cache->request_wait,
-						       timeout);
-			if (signalled())
-				break;
-		} else
-			sleep_on_timeout(&cache->request_wait, timeout);
+	struct nfs_page		*req;
 
-		dprintk("NFS:      create_request waking up (tot %d pid %d)\n",
-			atomic_read(&cache->nr_requests), current->pid);
-	} while (!req);
+	req = nfs_page_alloc();
 	if (!req)
 		return NULL;
 
@@ -596,6 +643,10 @@ nfs_release_request(struct nfs_page *req
 		printk(KERN_ERR "NFS: Request released while still on a list!\n");
 		nfs_list_remove_request(req);
 	}
+	if (!list_empty(&req->wb_cache)) {
+		printk(KERN_ERR "NFS: Request released while still cached!\n");
+		nfs_inode_remove_request(req);
+	}
 	if (!list_empty(&req->wb_hash)) {
 		printk(KERN_ERR "NFS: Request released while still hashed!\n");
 		nfs_inode_remove_request(req);
@@ -610,10 +661,8 @@ nfs_release_request(struct nfs_page *req
 		rpcauth_releasecred(NFS_CLIENT(inode)->cl_auth, req->wb_cred);
 	page_cache_release(page);
 	nfs_page_free(req);
-	/* wake up anyone waiting to allocate a request */
 	atomic_dec(&cache->nr_requests);
 	atomic_dec(&nfs_nr_requests);
-	wake_up(&cache->request_wait);
 #ifdef NFS_PARANOIA
 	if (atomic_read(&cache->nr_requests) < 0)
 		BUG();
@@ -882,11 +931,6 @@ nfs_update_request(struct file* file, st
 		}
 		spin_unlock(&nfs_wreq_lock);
 
-		/*
-		 * If we're over the soft limit, flush out old requests
-		 */
-		if (inode->u.nfs_i.npages >= MAX_REQUEST_SOFT)
-			nfs_wb_file(inode, file);
 		new = nfs_create_request(file, inode, page, offset, bytes);
 		if (!new)
 			return ERR_PTR(-ENOMEM);
@@ -962,20 +1006,13 @@ nfs_strategy(struct inode *inode)
 	} else {
 		if (dirty >= wpages)
 			nfs_flush_file(inode, NULL, 0, 0, 0);
-		if (inode->u.nfs_i.ncommit > NFS_STRATEGY_PAGES * wpages &&
-		    atomic_read(&nfs_nr_requests) > MAX_REQUEST_SOFT)
+		if (inode->u.nfs_i.ncommit > NFS_STRATEGY_PAGES * wpages)
 			nfs_commit_file(inode, NULL, 0, 0, 0);
 	}
 #else
 	if (dirty >= NFS_STRATEGY_PAGES * wpages)
 		nfs_flush_file(inode, NULL, 0, 0, 0);
 #endif
-	/*
-	 * If we're running out of free requests, flush out everything
-	 * in order to reduce memory useage...
-	 */
-	if (inode->u.nfs_i.npages > MAX_REQUEST_SOFT)
-		nfs_wb_all(inode);
 }
 
 int
@@ -1549,13 +1586,6 @@ int nfs_sync_file(struct inode *inode, s
 
 int nfs_init_nfspagecache(void)
 {
-	nfs_page_cachep = kmem_cache_create("nfs_page",
-					    sizeof(struct nfs_page),
-					    0, SLAB_HWCACHE_ALIGN,
-					    NULL, NULL);
-	if (nfs_page_cachep == NULL)
-		return -ENOMEM;
-
 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
 					     sizeof(struct nfs_write_data),
 					     0, SLAB_HWCACHE_ALIGN,
@@ -1563,7 +1593,7 @@ int nfs_init_nfspagecache(void)
 	if (nfs_wdata_cachep == NULL)
 		return -ENOMEM;
 
-	return 0;
+	return nfs_init_wbcache();
 }
 
 void nfs_destroy_nfspagecache(void)
diff -pruN Linux-2.4.4/include/linux/nfs_flushd.h linux/include/linux/nfs_flushd.h
--- Linux-2.4.4/include/linux/nfs_flushd.h	Thu Jan  4 17:51:09 2001
+++ linux/include/linux/nfs_flushd.h	Tue Jul 24 17:44:48 2001
@@ -1,44 +1,19 @@
 #ifndef NFS_CLUSTER_H
 #define NFS_CLUSTER_H
 
-
-
 #ifdef __KERNEL__
 #include <asm/atomic.h>
+#include <linux/list.h>
 #include <linux/nfs_fs_sb.h>
 
 /*
- * Counters of total number and pending number of requests.
- * When the total number of requests exceeds the soft limit, we start
- * flushing out requests. If it exceeds the hard limit, we stall until
- * it drops again.
- */
-#define MAX_REQUEST_SOFT        192
-#define MAX_REQUEST_HARD        256
-
-/*
- * Maximum number of requests per write cluster.
- * 32 requests per cluster account for 128K of data on an intel box.
- * Note: it's a good idea to make this number smaller than MAX_REQUEST_SOFT.
- *
- * For 100Mbps Ethernet, 128 pages (i.e. 256K) per cluster gives much
- * better performance.
- */
-#define REQUEST_HASH_SIZE	16
-#define REQUEST_NR(off)		((off) >> PAGE_CACHE_SHIFT)
-#define REQUEST_HASH(ino, off)	(((ino) ^ REQUEST_NR(off)) & (REQUEST_HASH_SIZE - 1))
-
-
-/*
  * Functions
  */
 extern int		nfs_reqlist_alloc(struct nfs_server *);
 extern void		nfs_reqlist_free(struct nfs_server *);
-extern int		nfs_reqlist_init(struct nfs_server *);
 extern void		nfs_reqlist_exit(struct nfs_server *);
 extern void		inode_schedule_scan(struct inode *, unsigned long);
 extern void		inode_remove_flushd(struct inode *);
-extern void		nfs_wake_flushd(void);
 
 /*
  * This is the per-mount writeback cache.
@@ -57,7 +32,7 @@ struct nfs_reqlist {
 	struct rpc_auth		*auth;
 
 	/* The list of all inodes with pending writebacks.  */
-	struct inode		*inodes;
+	struct list_head	inodes;
 };
 
 #endif
diff -pruN Linux-2.4.4/include/linux/nfs_fs_i.h linux/include/linux/nfs_fs_i.h
--- Linux-2.4.4/include/linux/nfs_fs_i.h	Sun Dec  3 21:01:01 2000
+++ linux/include/linux/nfs_fs_i.h	Tue Jul 24 17:45:19 2001
@@ -70,8 +70,7 @@ struct nfs_inode_info {
 				npages;
 
 	/* Flush daemon info */
-	struct inode		*hash_next,
-				*hash_prev;
+	struct list_head	flush;
 	unsigned long		nextscan;
 };
 
diff -pruN Linux-2.4.4/include/linux/nfs_page.h linux/include/linux/nfs_page.h
--- Linux-2.4.4/include/linux/nfs_page.h	Thu Jan  4 17:51:10 2001
+++ linux/include/linux/nfs_page.h	Tue Jul 24 17:47:30 2001
@@ -23,6 +23,7 @@
 
 struct nfs_page {
 	struct list_head	wb_hash,	/* Inode */
+				wb_cache,	/* writeback cache bucket */
 				wb_list,	/* Defines state of page: */
 				*wb_list_head;	/*      read/write/commit */
 	struct file		*wb_file;
diff -pruN Linux-2.4.4/net/sunrpc/sched.c linux/net/sunrpc/sched.c
--- Linux-2.4.4/net/sunrpc/sched.c	Fri Jul 20 10:29:38 2001
+++ linux/net/sunrpc/sched.c	Wed Jul 25 11:47:26 2001
@@ -1053,23 +1053,19 @@ rpciod(void *ptr)
 
 	MOD_INC_USE_COUNT;
 	lock_kernel();
+	daemonize();
+
 	/*
 	 * Let our maker know we're running ...
 	 */
 	rpciod_pid = current->pid;
 	up(&rpciod_running);
 
-	exit_fs(current);
-	exit_files(current);
-	exit_mm(current);
-
 	spin_lock_irq(&current->sigmask_lock);
 	siginitsetinv(&current->blocked, sigmask(SIGKILL));
 	recalc_sigpending(current);
 	spin_unlock_irq(&current->sigmask_lock);
 
-	current->session = 1;
-	current->pgrp = 1;
 	strcpy(current->comm, "rpciod");
 
 	current->flags |= PF_MEMALLOC;
diff -pruN Linux-2.4.4/net/sunrpc/xprt.c linux/net/sunrpc/xprt.c
--- Linux-2.4.4/net/sunrpc/xprt.c	Fri Jul 20 10:29:38 2001
+++ linux/net/sunrpc/xprt.c	Wed Jul 25 11:51:52 2001
@@ -59,6 +59,7 @@
 #include <linux/unistd.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/file.h>
+#include <linux/smp_lock.h>
 
 #include <net/sock.h>
 #include <net/checksum.h>
@@ -195,6 +196,8 @@ xprt_sendmsg(struct rpc_xprt *xprt, stru
 	if (!sock)
 		return -ENOTCONN;
 
+	unlock_kernel();
+
 	xprt_pktdump("packet data:",
 				req->rq_svec->iov_base,
 				req->rq_svec->iov_len);
@@ -216,6 +219,8 @@ xprt_sendmsg(struct rpc_xprt *xprt, stru
 	set_fs(oldfs);
 
 	dprintk("RPC:      xprt_sendmsg(%d) = %d\n", slen, result);
+
+	lock_kernel();
 
 	if (result >= 0)
 		return result;
