diff -druN Linux-2.4.14/Documentation/Configure.help linux-2.4.14-odirect/Documentation/Configure.help
--- Linux-2.4.14/Documentation/Configure.help	Fri Nov  9 16:39:06 2001
+++ linux-2.4.14-odirect/Documentation/Configure.help	Fri Nov 16 10:15:30 2001
@@ -12524,6 +12524,30 @@
  
   If unsure, say N.
 
+Allow O_DIRECT on files in NFS
+CONFIG_NFS_ODIRECT
+  There are important applications whose performance or correctness
+  depends on uncached access to file data.  Database clusters (multiple
+  copies of the same instance running on separate hosts) implement their
+  own cache coherency protocol that subsumes the NFS cache protocols.
+  Applications that process datasets considerably larger than the client's
+  memory do not always benefit from a local cache.  A streaming video
+  server, for instance, has no need to cache the contents of a file.
+
+  Normally the O_DIRECT flag has no effect on NFS files.  This option
+  enables applications to use the O_DIRECT flag when opening or fcntl'ing
+  a file contained on an NFS partition.  When O_DIRECT is set for such
+  files, their data is not cached in the system's page cache.
+
+  Even without enabling NFS_ODIRECT, the "nodc" mount option is still
+  available to disable data caching for whole partitions.
+
+  Unless your program is designed to use O_DIRECT properly, you are much
+  better off allowing the NFS client to manage caching for you.  Misusing
+  O_DIRECT can cause poor server performance or network storms.
+
+  If unsure, say N.
+
 Root file system on NFS
 CONFIG_ROOT_NFS
   If you want your Linux box to mount its whole root file system (the
diff -druN Linux-2.4.14/fs/Config.in linux-2.4.14-odirect/fs/Config.in
--- Linux-2.4.14/fs/Config.in	Fri Nov  9 16:39:16 2001
+++ linux-2.4.14-odirect/fs/Config.in	Fri Nov 16 15:48:30 2001
@@ -86,6 +86,7 @@
    dep_tristate 'Coda file system support (advanced network fs)' CONFIG_CODA_FS $CONFIG_INET
    dep_tristate 'NFS file system support' CONFIG_NFS_FS $CONFIG_INET
    dep_mbool '  Provide NFSv3 client support' CONFIG_NFS_V3 $CONFIG_NFS_FS
+   dep_bool '  Allow O_DIRECT on NFS files' CONFIG_NFS_ODIRECT $CONFIG_NFS_FS
    dep_bool '  Root file system on NFS' CONFIG_ROOT_NFS $CONFIG_NFS_FS $CONFIG_IP_PNP
 
    dep_tristate 'NFS server support' CONFIG_NFSD $CONFIG_INET
diff -druN Linux-2.4.14/fs/nfs/Makefile linux-2.4.14-odirect/fs/nfs/Makefile
--- Linux-2.4.14/fs/nfs/Makefile	Fri Dec 29 17:07:23 2000
+++ linux-2.4.14-odirect/fs/nfs/Makefile	Tue Dec  4 12:40:00 2001
@@ -14,6 +14,7 @@
 
 obj-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o      
 obj-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
+obj-$(CONFIG_NFS_ODIRECT) += direct.o
 
 obj-m   := $(O_TARGET)
 
diff -druN Linux-2.4.14/fs/nfs/direct.c linux-2.4.14-odirect/fs/nfs/direct.c
--- Linux-2.4.14/fs/nfs/direct.c	Wed Dec 31 19:00:00 1969
+++ linux-2.4.14-odirect/fs/nfs/direct.c	Mon Dec  3 16:41:30 2001
@@ -0,0 +1,522 @@
+/*
+ * linux/fs/nfs/direct.c
+ *
+ * High-performance direct I/O for the NFS client
+ *
+ * When an application requests uncached I/O, all read and write requests
+ * are made directly to the server; data stored or fetched via these
+ * requests is not cached in the Linux page cache.  The client does not
+ * correct unaligned requests from applications.  All requested bytes are
+ * held on permanent storage before a direct write system call returns to
+ * an application.  Applications that manage their own data caching, such
+ * as databases, can make very good use of direct I/O.
+ *
+ * Solaris implements an uncached I/O facility called directio() that
+ * is used for backups and sequential I/O to very large files.  Solaris
+ * also supports uncaching whole NFS partitions with "-o forcedirectio,"
+ * an undocumented mount option.
+ *
+ * Note that I/O to read in executables (kernel_read, for example) cannot
+ * use direct (kiobuf) reads because there is no vma backing the passed-in
+ * data buffer.
+ *
+ * Simple-minded prototype  November 2001, by Chuck Lever <cel@netapp.com>
+ *
+ * TODO:
+ *
+ * 1.  Use concurrent asynchronous network requests rather than
+ *     serialized synchronous network requests for normal (non-sync)
+ *     direct I/O.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/nfs_fs.h>
+#include <linux/smp_lock.h>
+#include <linux/nfs_mount.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/iobuf.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#define NFSDBG_FACILITY		(NFSDBG_PAGECACHE | NFSDBG_VFS)
+#define VERF_SIZE		(2 * sizeof(__u32))
+
+/*
+ * Generate a single network request
+ */
+static /* inline */ int
+nfs_direct_read_one(struct file *file, struct inode *inode, loff_t offset,
+	size_t count, const char *dst, struct kiobuf *iobuf)
+{
+	int result, i;
+	unsigned long offset_in_page;
+	struct nfs_fattr fattr;
+        struct nfs_readres res = { &fattr, count, 0 };
+        struct nfs_readargs arg;
+	struct iovec * iovec = arg.iov;
+        struct rpc_message msg;
+
+	arg.fh = NFS_FH(inode);
+	arg.offset = offset;
+	arg.count = count;
+	arg.nriov = 0;
+
+	result = map_user_kiobuf(READ, iobuf, (unsigned long) dst, count);
+	if (result)
+		return result;
+
+	offset_in_page = ((unsigned long) dst) & (PAGE_SIZE - 1);
+	for (i = 0; i < iobuf->nr_pages; i++) {
+		struct page *page = iobuf->maplist[i];
+
+		if (!page)
+			return -EFAULT;
+
+		iovec->iov_base = kmap(page) + offset_in_page;
+		iovec->iov_len = (PAGE_SIZE - offset_in_page);
+		if ((offset_in_page + count) < PAGE_SIZE)
+			iovec->iov_len = count;
+
+		count -= iovec->iov_len;
+		offset_in_page = 0;		/* zero after the first page */
+		iovec++;
+		arg.nriov++;
+	}
+
+#ifdef CONFIG_NFS_V3
+	msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ?
+						NFS3PROC_READ : NFSPROC_READ;
+#else
+	msg.rpc_proc = NFSPROC_READ;
+#endif
+	msg.rpc_argp = &arg;
+        msg.rpc_resp = &res;
+
+	lock_kernel();
+        msg.rpc_cred = nfs_file_cred(file);
+        fattr.valid = 0;
+        result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+	nfs_refresh_inode(inode, &fattr);
+	unlock_kernel();
+
+	for (i = 0; i < iobuf->nr_pages; i++) {
+		flush_dcache_page(iobuf->maplist[i]);
+		kunmap(iobuf->maplist[i]);
+	}
+	unmap_kiobuf(iobuf);
+
+	if (result == -EISDIR)
+		return -EINVAL;
+
+	return result;
+}
+
+static /* inline */ int
+nfs_direct_write_one(struct file *file, struct inode *inode,
+	struct kiobuf *iobuf, const char *dst, loff_t offset, size_t count,
+	int flags, struct nfs_writeverf *verf)
+{
+	int result, i;
+	unsigned long offset_in_page;
+	struct nfs_fattr fattr;
+        struct nfs_writeres res = { &fattr, verf, 0 };
+        struct nfs_writeargs arg;
+	struct iovec * iovec = arg.iov;
+        struct rpc_message msg;
+
+	arg.fh = NFS_FH(inode);
+	arg.offset = offset;
+	arg.count = count;
+	arg.stable = flags;
+	arg.nriov = 0;
+
+	result = map_user_kiobuf(WRITE, iobuf, (unsigned long) dst, count);
+	if (result)
+		return result;
+
+	offset_in_page = ((unsigned long) dst) & (PAGE_SIZE - 1);
+	for (i = 0; i < iobuf->nr_pages; i++) {
+		struct page *page = iobuf->maplist[i];
+
+		if (!page)
+			return -EFAULT;
+
+		iovec->iov_base = kmap(page) + offset_in_page;
+		iovec->iov_len = (PAGE_SIZE - offset_in_page);
+		if ((offset_in_page + count) < PAGE_SIZE)
+			iovec->iov_len = count;
+
+		count -= iovec->iov_len;
+		offset_in_page = 0;		/* zero after the first page */
+		iovec++;
+		arg.nriov++;
+	}
+
+#ifdef CONFIG_NFS_V3
+	msg.rpc_proc = (NFS_PROTO(inode)->version == 3) ?
+						NFS3PROC_WRITE : NFSPROC_WRITE;
+#else
+	msg.rpc_proc = NFSPROC_WRITE;
+#endif
+	msg.rpc_argp = &arg;
+        msg.rpc_resp = &res;
+
+	lock_kernel();
+	msg.rpc_cred = get_rpccred(nfs_file_cred(file));
+	fattr.valid = 0;
+        result = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+	nfs_write_attributes(inode, &fattr);
+	put_rpccred(msg.rpc_cred);
+	unlock_kernel();
+
+	for (i = 0; i < iobuf->nr_pages; i++) {
+		flush_dcache_page(iobuf->maplist[i]);
+		kunmap(iobuf->maplist[i]);
+	}
+	unmap_kiobuf(iobuf);
+
+	if (result > 0) {
+		if ((flags == NFS_RW_SYNC) &&
+					(verf->committed != NFS_FILE_SYNC)) {
+			printk(KERN_ERR __FUNCTION__
+				": server didn't sync stable write request\n");
+			return -EIO;
+		}
+
+		if (result != count)
+			printk(KERN_INFO __FUNCTION__
+				": short write, count=%u, result=%d\n",
+								count, result);
+	}
+
+	return result;
+}
+
+#ifdef CONFIG_NFS_V3
+static /* inline */ int
+nfs_direct_commit_one(struct inode *inode, loff_t offset, size_t count,
+	struct nfs_writeverf *verf)
+{
+	int result;
+	struct nfs_fattr fattr;
+
+	lock_kernel();
+	result = NFS_PROTO(inode)->commit(inode, &fattr, offset, count, verf);
+	nfs_write_attributes(inode, &fattr);
+	unlock_kernel();
+
+	return result;
+}
+#else
+static inline int
+nfs_direct_commit_one(struct inode *inode, loff_t offset, size_t count,
+	struct nfs_writeverf *verf)
+{
+	return 0;
+}
+#endif
+
+static int
+nfs_direct_read_multiple(struct file *file, const char *dst, size_t count,
+	loff_t offset)
+{
+	int new_iobuf;
+	int total = 0;
+	struct kiobuf *iobuf;
+	struct inode *inode = file->f_dentry->d_inode;
+	int rsize = NFS_SERVER(inode)->rsize;
+
+	/*
+	 * Try to use cached iobuf.  Parallel reads on this file
+	 * descriptor will go a little slower.
+	 *
+	 * XXX: !iobuf test not needed
+	 */
+        new_iobuf = 0;
+        iobuf = file->f_iobuf;
+        if (test_and_set_bit(0, &file->f_iobuf_lock) || !iobuf) {
+		if (alloc_kiovec(1, &iobuf))
+			return -ENOMEM;
+                new_iobuf = 1;
+        }
+
+	while (count) {
+		int request, result;
+
+		request = count;
+		if (count > rsize)
+			request = rsize;
+
+		result = nfs_direct_read_one(file, inode, offset,
+							request, dst, iobuf);
+		if (result < 0) {
+			total = result;
+			break;
+		}
+
+		total += result;
+		count -= result;
+		offset += result;
+		dst += result;
+
+		if (result < request)	/* NFSv2ism */
+			break;
+	};
+
+	if (!new_iobuf)
+		clear_bit(0, &file->f_iobuf_lock);
+	else
+		free_kiovec(1, &iobuf);
+
+	return total;
+}
+
+/*
+ * Push one stable network write.
+ */
+static int
+nfs_direct_write_single(struct file *file, const char *dst, size_t count,
+	loff_t pos)
+{
+	int result, new_iobuf;
+	struct kiobuf *iobuf;
+	struct nfs_writeverf verf;
+
+	/*
+	 * Try to use cached iobuf.  Parallel writes on this file
+	 * descriptor will go a little slower.
+	 *
+	 * XXX: !iobuf test not needed
+	 */
+        new_iobuf = 0;
+        iobuf = file->f_iobuf;
+        if (test_and_set_bit(0, &file->f_iobuf_lock) || !iobuf) {
+		if (alloc_kiovec(1, &iobuf))
+			return -ENOMEM;
+                new_iobuf = 1;
+        }
+
+	result = nfs_direct_write_one(file, file->f_dentry->d_inode,
+						iobuf, dst, pos, count,
+						NFS_FILE_SYNC, &verf);
+
+	if (!new_iobuf)
+		clear_bit(0, &file->f_iobuf_lock);
+	else
+		free_kiovec(1, &iobuf);
+
+	return result;
+}
+
+/*
+ * Push multiple network writes.  If this is V3, push unstable writes
+ * and finish with a commit.
+ *
+ * The "sync" mount flag causes all V3 writes to be stable, causing
+ * partial writes to hit the server's *disk* in byte order.  This means
+ * every separate network write hits the server's disk synchronously and
+ * in byte order, providing an added degree of durability for database
+ * applications.
+ */
+static int
+nfs_direct_write_multiple(struct file *file, const char *user, size_t count,
+	loff_t pos)
+{
+	int flags, total, remaining, need_commit, new_iobuf;
+	loff_t offset;
+	const char *dst;
+	struct kiobuf *iobuf;
+	struct inode *inode = file->f_dentry->d_inode;
+	struct nfs_writeverf first_verf, ret_verf;
+
+	/*
+	 * Try to use cached iobuf.  Parallel writes on this file
+	 * descriptor will go a little slower.
+	 *
+	 * XXX: !iobuf test not needed
+	 */
+        new_iobuf = 0;
+        iobuf = file->f_iobuf;
+        if (test_and_set_bit(0, &file->f_iobuf_lock) || !iobuf) {
+		if (alloc_kiovec(1, &iobuf))
+			return -ENOMEM;
+                new_iobuf = 1;
+        }
+
+	flags = NFS_FILE_SYNC;
+#ifdef CONFIG_NFS_V3
+	if ((NFS_PROTO(inode)->version == 3) && !IS_SYNC(inode))
+		flags = NFS_UNSTABLE;
+#endif
+
+retry:
+	need_commit = 0;
+	total = 0;
+	remaining = count;
+	offset = pos;
+	dst = user;
+	while (remaining) {
+		int request, result;
+		int wsize = NFS_SERVER(inode)->wsize;
+
+		request = remaining;
+		if (remaining > wsize)
+			request = wsize;
+
+		result = nfs_direct_write_one(file, inode, iobuf, dst, offset,
+						    request, flags, &ret_verf);
+		if (result < 0) {
+			total = result;
+			break;
+		}
+
+		if (!total)
+			memcpy(&first_verf.verifier, &ret_verf.verifier,
+								VERF_SIZE);
+		if (ret_verf.committed != NFS_FILE_SYNC) {
+			need_commit = 1;
+			if (memcmp(&first_verf.verifier, &ret_verf.verifier,
+								VERF_SIZE))
+				goto print_retry;
+		}
+
+		total += result;
+		remaining -= result;
+		offset += result;
+		dst += result;
+	};
+
+	if (need_commit) {
+		if (nfs_direct_commit_one(inode, pos, count - remaining,
+								&ret_verf))
+			goto print_retry;
+		if (memcmp(&first_verf.verifier, &ret_verf.verifier,
+								VERF_SIZE))
+			goto print_retry;
+	}
+
+	if (!new_iobuf)
+		clear_bit(0, &file->f_iobuf_lock);
+	else
+		free_kiovec(1, &iobuf);
+
+	return total;
+
+print_retry:
+	printk(KERN_INFO __FUNCTION__
+		    ": detected server restart; retrying with FILE_SYNC\n");
+	flags = NFS_FILE_SYNC;
+	goto retry;
+}
+
+int
+nfs_direct_read(struct file *file, const char *dst, size_t count,
+	loff_t *ppos)
+{
+	int result;
+	loff_t pos = *ppos;
+	struct dentry *dentry = file->f_dentry;
+
+	dfprintk(VFS, "NFS: direct_read(%s/%s, %d@%Lu)\n",
+			dentry->d_parent->d_name.name, dentry->d_name.name,
+				count, pos);
+
+	if ((count < 0) || (pos < 0))
+		return -EINVAL;
+	if (!access_ok(VERIFY_WRITE, dst, count))
+		return -EFAULT;
+
+	result = nfs_direct_read_multiple(file, dst, count, pos);
+	if (result > 0)
+		*ppos = pos + result;
+
+	return result;
+}
+
+int
+nfs_direct_write(struct file *file, const char *dst, size_t count,
+	loff_t *ppos)
+{
+	int result;
+	unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+	loff_t pos = *ppos;
+	struct dentry *dentry = file->f_dentry;
+	struct inode *inode = dentry->d_inode;
+
+	dfprintk(VFS, "NFS: direct_write(%s/%s, %d@%Lu)\n",
+			dentry->d_parent->d_name.name, dentry->d_name.name,
+				count, pos);
+
+	if ((count < 0) || (pos < 0))
+		return -EINVAL;
+	if (!access_ok(VERIFY_WRITE, dst, count))
+		return -EFAULT;
+
+	down(&inode->i_sem);
+
+	/*
+	 * Flush pending cached writes for the requested range.
+	 * Don't wait -- we just need to maintain write ordering.
+	 */
+	if (nfs_have_writebacks(inode))
+		nfs_sync_file(inode, file, pos,
+					(count >> PAGE_CACHE_SHIFT) + 1, 0);
+
+	if (file->f_flags & O_APPEND)
+		pos = inode->i_size;
+
+	result = -EFBIG;
+	if (limit != RLIM_INFINITY) {
+		if (pos >= limit) {
+			send_sig(SIGXFSZ, current, 0);
+			goto out;
+		}
+		if ((pos > 0xFFFFFFFFULL) || (count > (limit - (u32)pos)))
+			count = limit - (u32)pos;
+	}
+	if (((pos + count) > MAX_NON_LFS) && !(file->f_flags & O_LARGEFILE)) {
+		if (pos >= MAX_NON_LFS) {
+			send_sig(SIGXFSZ, current, 0);
+			goto out;
+		}
+		if (count > (MAX_NON_LFS - (u32)pos))
+			count = MAX_NON_LFS - (u32)pos;
+	}
+
+	if (count <= NFS_SERVER(inode)->wsize)
+		result = nfs_direct_write_single(file, dst, count, pos);
+	else
+		result = nfs_direct_write_multiple(file, dst, count, pos);
+
+	/*
+	 * Because we didn't write into the page cache, anything cached
+	 * there is now invalid.  We used nfs_write_attributes to
+	 * update the attribute cache after each network write.
+	 * This prevents data cache purging after each network write.
+	 * We do it once here at the end of the whole write operation.
+	 */
+	invalidate_inode_pages(inode);
+
+	up(&inode->i_sem);
+
+	if (result > 0)
+		*ppos = pos + result;
+
+out:
+	return result;
+}
+
+int nfs_init_directcache(void)
+{
+	return 0;
+}
+
+void nfs_destroy_directcache(void)
+{
+	return;
+}
diff -druN Linux-2.4.14/fs/nfs/file.c linux-2.4.14-odirect/fs/nfs/file.c
--- Linux-2.4.14/fs/nfs/file.c	Fri Nov  9 16:38:19 2001
+++ linux-2.4.14-odirect/fs/nfs/file.c	Tue Dec  4 12:40:40 2001
@@ -99,6 +99,11 @@
 		dentry->d_parent->d_name.name, dentry->d_name.name,
 		(unsigned long) count, (unsigned long) *ppos);
 
+#ifdef CONFIG_NFS_ODIRECT
+	if (file->f_flags & O_DIRECT)
+		return nfs_direct_read(file, buf, count, ppos);
+#endif
+
 	result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
 	if (!result)
 		result = generic_file_read(file, buf, count, ppos);
@@ -224,6 +229,12 @@
 	result = -EBUSY;
 	if (IS_SWAPFILE(inode))
 		goto out_swapfile;
+
+#ifdef CONFIG_NFS_ODIRECT
+	if (file->f_flags & O_DIRECT)
+		return nfs_direct_write(file, buf, count, ppos);
+#endif
+
 	result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
 	if (result)
 		goto out;
diff -druN Linux-2.4.14/fs/nfs/inode.c linux-2.4.14-odirect/fs/nfs/inode.c
--- Linux-2.4.14/fs/nfs/inode.c	Fri Nov  9 16:38:45 2001
+++ linux-2.4.14-odirect/fs/nfs/inode.c	Tue Dec  4 12:41:46 2001
@@ -1071,7 +1071,9 @@
 extern int nfs_init_nfspagecache(void);
 extern void nfs_destroy_nfspagecache(void);
 extern int nfs_init_readpagecache(void);
-extern int nfs_destroy_readpagecache(void);
+extern void nfs_destroy_readpagecache(void);
+extern int nfs_init_directcache(void);
+extern void nfs_destroy_directcache(void);
 
 /*
  * Initialize NFS
@@ -1088,6 +1090,12 @@
 	if (err)
 		return err;
 
+#ifdef CONFIG_NFS_ODIRECT
+	err = nfs_init_directcache();
+	if (err)
+		return err;
+#endif
+
 #ifdef CONFIG_PROC_FS
 	rpc_proc_register(&nfs_rpcstat);
 #endif
@@ -1098,6 +1106,9 @@
 {
 	nfs_destroy_readpagecache();
 	nfs_destroy_nfspagecache();
+#ifdef CONFIG_NFS_ODIRECT
+	nfs_destroy_directcache();
+#endif
 #ifdef CONFIG_PROC_FS
 	rpc_proc_unregister("nfs");
 #endif
diff -druN Linux-2.4.14/fs/nfs/nfs3proc.c linux-2.4.14-odirect/fs/nfs/nfs3proc.c
--- Linux-2.4.14/fs/nfs/nfs3proc.c	Fri Nov  9 16:38:45 2001
+++ linux-2.4.14-odirect/fs/nfs/nfs3proc.c	Fri Nov  9 16:50:02 2001
@@ -177,10 +177,31 @@
 
 	status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
 
-	dprintk("NFS reply read: %d\n", status);
+	dprintk("NFS reply write: %d\n", status);
 	return status < 0? status : res.count;
 }
 
+static int
+nfs3_proc_commit(struct inode *inode, struct nfs_fattr *fattr, loff_t offset,
+	unsigned int count, struct nfs_writeverf *verf)
+{
+	struct nfs_writeargs	arg = { NFS_FH(inode), offset, count, 0, 0,
+					{{0, 0}, {0,0}, {0,0}, {0,0},
+					 {0,0}, {0,0}, {0,0}, {0,0}} };
+	struct nfs_writeres	res = { fattr, verf, 0 };
+	struct rpc_message	msg = { NFS3PROC_COMMIT, &arg, &res, NULL };
+	int			status;
+
+	dprintk("NFS call  commit %d @ %Ld\n", count, (long long)offset);
+
+	fattr->valid = 0;
+
+	status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+
+	dprintk("NFS reply commit: %d\n", status);
+	return status;
+}
+
 /*
  * Create a regular file.
  * For now, we don't implement O_EXCL.
@@ -495,7 +516,7 @@
 	nfs3_proc_readlink,
 	nfs3_proc_read,
 	nfs3_proc_write,
-	NULL,			/* commit */
+	nfs3_proc_commit,
 	nfs3_proc_create,
 	nfs3_proc_remove,
 	nfs3_proc_unlink_setup,
diff -druN Linux-2.4.14/fs/nfs/write.c linux-2.4.14-odirect/fs/nfs/write.c
--- Linux-2.4.14/fs/nfs/write.c	Fri Nov  9 16:39:01 2001
+++ linux-2.4.14-odirect/fs/nfs/write.c	Fri Nov  9 16:50:02 2001
@@ -147,23 +147,6 @@
 }
 
 /*
- * This function will be used to simulate weak cache consistency
- * under NFSv2 when the NFSv3 attribute patch is included.
- * For the moment, we just call nfs_refresh_inode().
- */
-static __inline__ int
-nfs_write_attributes(struct inode *inode, struct nfs_fattr *fattr)
-{
-	if ((fattr->valid & NFS_ATTR_FATTR) && !(fattr->valid & NFS_ATTR_WCC)) {
-		fattr->pre_size  = NFS_CACHE_ISIZE(inode);
-		fattr->pre_mtime = NFS_CACHE_MTIME(inode);
-		fattr->pre_ctime = NFS_CACHE_CTIME(inode);
-		fattr->valid |= NFS_ATTR_WCC;
-	}
-	return nfs_refresh_inode(inode, fattr);
-}
-
-/*
  * Write a page synchronously.
  * Offset is the data offset within the page.
  */
diff -druN Linux-2.4.14/include/linux/nfs_fs.h linux-2.4.14-odirect/include/linux/nfs_fs.h
--- Linux-2.4.14/include/linux/nfs_fs.h	Fri Nov  9 16:37:50 2001
+++ linux-2.4.14-odirect/include/linux/nfs_fs.h	Tue Nov 20 14:47:12 2001
@@ -260,6 +260,12 @@
 extern int  nfs_pagein_timeout(struct inode *);
 
 /*
+ * linux/fs/nfs/direct.c
+ */
+extern int  nfs_direct_write(struct file *, const char *, size_t, loff_t *);
+extern int  nfs_direct_read(struct file *, const char *, size_t, loff_t *);
+
+/*
  * linux/fs/mount_clnt.c
  * (Used only by nfsroot module)
  */
@@ -283,6 +289,23 @@
 	if ((fattr->valid & NFS_ATTR_FATTR) == 0)
 		return 0;
 	return __nfs_refresh_inode(inode,fattr);
+}
+
+/*
+ * This function will be used to simulate weak cache consistency
+ * under NFSv2 when the NFSv3 attribute patch is included.
+ * For the moment, we just call nfs_refresh_inode().
+ */
+static __inline__ int
+nfs_write_attributes(struct inode *inode, struct nfs_fattr *fattr)
+{
+	if ((fattr->valid & NFS_ATTR_FATTR) && !(fattr->valid & NFS_ATTR_WCC)) {
+		fattr->pre_size  = NFS_CACHE_ISIZE(inode);
+		fattr->pre_mtime = NFS_CACHE_MTIME(inode);
+		fattr->pre_ctime = NFS_CACHE_CTIME(inode);
+		fattr->valid |= NFS_ATTR_WCC;
+	}
+	return nfs_refresh_inode(inode, fattr);
 }
 
 static inline loff_t
diff -druN Linux-2.4.14/include/linux/nfs_xdr.h linux-2.4.14-odirect/include/linux/nfs_xdr.h
--- Linux-2.4.14/include/linux/nfs_xdr.h	Mon Jan 29 15:07:43 2001
+++ linux-2.4.14-odirect/include/linux/nfs_xdr.h	Fri Nov  9 16:50:02 2001
@@ -331,8 +331,8 @@
 			    struct nfs_fattr *,
 			    int, loff_t, unsigned int,
 			    void *buffer, struct nfs_writeverf *verfp);
-	int	(*commit)  (struct inode *, struct nfs_fattr *,
-			    unsigned long, unsigned int);
+	int	(*commit)  (struct inode *, struct nfs_fattr *, loff_t,
+			    unsigned int, struct nfs_writeverf *verfp);
 	int	(*create)  (struct inode *, struct qstr *, struct iattr *,
 			    int, struct nfs_fh *, struct nfs_fattr *);
 	int	(*remove)  (struct inode *, struct qstr *);
diff -druN Linux-2.4.14/net/sunrpc/xprt.c linux-2.4.14-odirect/net/sunrpc/xprt.c
--- Linux-2.4.14/net/sunrpc/xprt.c	Fri Nov  9 16:38:48 2001
+++ linux-2.4.14-odirect/net/sunrpc/xprt.c	Thu Nov 29 13:19:59 2001
@@ -630,10 +630,14 @@
 			int to_move = cur_len;
 			if (to_move > copied)
 				to_move = copied;
-			if (need_csum)
-				csum = skb_copy_and_csum_bits(skb, offset, cur_ptr,
-							      to_move, csum);
-			else
+			if (need_csum) {
+				unsigned int csum2;
+
+				csum2 = skb_copy_and_csum_bits(skb, offset,
+							       cur_ptr,
+							       to_move, 0);
+				csum = csum_block_add(csum, csum2, offset);
+			} else
 				skb_copy_bits(skb, offset, cur_ptr, to_move);
 			offset += to_move;
 			copied -= to_move;
@@ -647,8 +651,12 @@
 		}
 	}
 	if (need_csum) {
-		if (slack > 0)
-			csum = skb_checksum(skb, offset, slack, csum);
+		if (slack > 0) {
+			unsigned int csum2;
+
+			csum2 = skb_checksum(skb, offset, slack, 0);
+			csum = csum_block_add(csum, csum2, offset);
+		}
 		if ((unsigned short)csum_fold(csum))
 			return -1;
 	}
