diff -dpruN Linux-2.4.4/fs/nfs/write.c linux/fs/nfs/write.c
--- Linux-2.4.4/fs/nfs/write.c	Tue May  1 14:33:01 2001
+++ linux/fs/nfs/write.c	Mon May  7 15:17:09 2001
@@ -65,6 +65,13 @@
 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
 
 /*
+ * If an inode has more than MAX_REQUEST_INODE requests pending,
+ * flush its requests and wait for them to finish.  This throttles
+ * processes that dirty lots of pages.
+ */
+#define MAX_REQUEST_INODE	(32)
+
+/*
  * Spinlock
  */
 spinlock_t nfs_wreq_lock = SPIN_LOCK_UNLOCKED;
@@ -507,44 +514,9 @@ struct nfs_page *nfs_create_request(stru
 	struct nfs_page		*req = NULL;
 	long			timeout;
 
-	/* Deal with hard/soft limits.
-	 */
-	do {
-		/* If we're over the global soft limit, wake up all requests */
-		if (atomic_read(&nfs_nr_requests) >= MAX_REQUEST_SOFT) {
-			dprintk("NFS:      hit soft limit (%d requests)\n",
-				atomic_read(&nfs_nr_requests));
-			if (!cache->task)
-				nfs_reqlist_init(NFS_SERVER(inode));
-			nfs_wake_flushd();
-		}
-
-		/* If we haven't reached the local hard limit yet,
-		 * try to allocate the request struct */
-		if (atomic_read(&cache->nr_requests) < MAX_REQUEST_HARD) {
-			req = nfs_page_alloc();
-			if (req != NULL)
-				break;
-		}
-
-		/* We're over the hard limit. Wait for better times */
-		dprintk("NFS:      create_request sleeping (total %d pid %d)\n",
-			atomic_read(&cache->nr_requests), current->pid);
-
-		timeout = 1 * HZ;
-		if (NFS_SERVER(inode)->flags & NFS_MOUNT_INTR) {
-			interruptible_sleep_on_timeout(&cache->request_wait,
-						       timeout);
-			if (signalled())
-				break;
-		} else
-			sleep_on_timeout(&cache->request_wait, timeout);
-
-		dprintk("NFS:      create_request waking up (tot %d pid %d)\n",
-			atomic_read(&cache->nr_requests), current->pid);
-	} while (!req);
+	req = nfs_page_alloc();
 	if (!req)
-		return NULL;
+		return req;
 
 	/* Initialize the request struct. Initially, we assume a
 	 * long write-back delay. This will be adjusted in
@@ -882,11 +854,6 @@ nfs_update_request(struct file* file, st
 		}
 		spin_unlock(&nfs_wreq_lock);
 
-		/*
-		 * If we're over the soft limit, flush out old requests
-		 */
-		if (inode->u.nfs_i.npages >= MAX_REQUEST_SOFT)
-			nfs_wb_file(inode, file);
 		new = nfs_create_request(file, inode, page, offset, bytes);
 		if (!new)
 			return ERR_PTR(-ENOMEM);
@@ -955,6 +922,7 @@ nfs_strategy(struct inode *inode)
 
 	dirty  = inode->u.nfs_i.ndirty;
 	wpages = NFS_SERVER(inode)->wpages;
+
 #ifdef CONFIG_NFS_V3
 	if (NFS_PROTO(inode)->version == 2) {
 		if (dirty >= NFS_STRATEGY_PAGES * wpages)
@@ -962,20 +930,19 @@ nfs_strategy(struct inode *inode)
 	} else {
 		if (dirty >= wpages)
 			nfs_flush_file(inode, NULL, 0, 0, 0);
-		if (inode->u.nfs_i.ncommit > NFS_STRATEGY_PAGES * wpages &&
-		    atomic_read(&nfs_nr_requests) > MAX_REQUEST_SOFT)
+		if (inode->u.nfs_i.ncommit > NFS_STRATEGY_PAGES * wpages)
 			nfs_commit_file(inode, NULL, 0, 0, 0);
 	}
 #else
 	if (dirty >= NFS_STRATEGY_PAGES * wpages)
 		nfs_flush_file(inode, NULL, 0, 0, 0);
 #endif
+
 	/*
-	 * If we're running out of free requests, flush out everything
-	 * in order to reduce memory useage...
+	 * If this inode has lots of requests, slow down this process.
 	 */
-	if (inode->u.nfs_i.npages > MAX_REQUEST_SOFT)
-		nfs_wb_all(inode);
+	if (inode->u.nfs_i.npages > MAX_REQUEST_INODE)
+		nfs_wait_on_requests(inode, NULL, 0, 0);
 }
 
 int
diff -dpruN Linux-2.4.4/include/linux/nfs_flushd.h linux/include/linux/nfs_flushd.h
--- Linux-2.4.4/include/linux/nfs_flushd.h	Thu Jan  4 17:51:09 2001
+++ linux/include/linux/nfs_flushd.h	Mon May  7 15:54:57 2001
@@ -8,28 +8,6 @@
 #include <linux/nfs_fs_sb.h>
 
 /*
- * Counters of total number and pending number of requests.
- * When the total number of requests exceeds the soft limit, we start
- * flushing out requests. If it exceeds the hard limit, we stall until
- * it drops again.
- */
-#define MAX_REQUEST_SOFT        192
-#define MAX_REQUEST_HARD        256
-
-/*
- * Maximum number of requests per write cluster.
- * 32 requests per cluster account for 128K of data on an intel box.
- * Note: it's a good idea to make this number smaller than MAX_REQUEST_SOFT.
- *
- * For 100Mbps Ethernet, 128 pages (i.e. 256K) per cluster gives much
- * better performance.
- */
-#define REQUEST_HASH_SIZE	16
-#define REQUEST_NR(off)		((off) >> PAGE_CACHE_SHIFT)
-#define REQUEST_HASH(ino, off)	(((ino) ^ REQUEST_NR(off)) & (REQUEST_HASH_SIZE - 1))
-
-
-/*
  * Functions
  */
 extern int		nfs_reqlist_alloc(struct nfs_server *);
