An abstraction layer to retrieve the file layout.  The layout will be
retrieved if it not already been retrieved for the specified byte range.

Do we want a separate cache that doesn't dissapear with the inode?
ASSUMPTIONS:
a) layout applies to entire file
b) layout is immutable

---
 linux-2.6.14-pnfs-current-dhildebz/fs/nfs/nfs4proc.c      |    4 
 linux-2.6.14-pnfs-current-dhildebz/fs/nfs/pnfs.c          |  103 ++++++++++++++
 linux-2.6.14-pnfs-current-dhildebz/include/linux/nfs_fs.h |    3 
 3 files changed, 110 insertions(+)

diff -puN fs/nfs/nfs4proc.c~client-layoutcache fs/nfs/nfs4proc.c
--- linux-2.6.14-pnfs-current/fs/nfs/nfs4proc.c~client-layoutcache	2006-01-12 13:13:37.878614000 -0500
+++ linux-2.6.14-pnfs-current-dhildebz/fs/nfs/nfs4proc.c	2006-01-12 13:13:37.917614000 -0500
@@ -749,6 +749,10 @@ static int _nfs4_do_open(struct inode *d
 	update_open_stateid(state, &o_res.stateid, flags);
 	if (o_res.delegation_type != 0)
 		nfs_inode_set_delegation(inode, cred, &o_res);
+
+	/* Initialize extents */
+ 	NFS_I(inode)->current_layout = NULL;
+
 	nfs_free_seqid(o_arg.seqid);
 	nfs4_put_state_owner(sp);
 	up_read(&clp->cl_sem);
diff -puN fs/nfs/pnfs.c~client-layoutcache fs/nfs/pnfs.c
--- linux-2.6.14-pnfs-current/fs/nfs/pnfs.c~client-layoutcache	2006-01-12 13:13:37.888614000 -0500
+++ linux-2.6.14-pnfs-current-dhildebz/fs/nfs/pnfs.c	2006-01-12 13:18:08.508623000 -0500
@@ -72,6 +72,14 @@ struct pnfs_module {
 	struct list_head        pnfs_tblid;
 };
 
+/*
+*  pnfs_layout_extents: Keep track of all byte ranges for
+*  which we have requrested layout information.
+*/
+struct pnfs_layout_extents {
+    struct list_head        ple_hash;    /* hash by "struct inode *" */
+};
+
 static void
 initialize_pnfs(void)
 {
@@ -241,5 +249,100 @@ get_layout(struct inode* ino, struct nfs
 	return status;
 }
 
+/* DH: Inject layout blob into the I/O module.  This must happen before
+ *     the I/O module has its read/write methods called.
+ */
+static struct pnfs_layout_type*
+pnfs_inject_layout(struct nfs_inode* nfsi, struct layoutdriver_io_operations* io_ops, void* new_layout)
+{
+	struct pnfs_layout_type *layid;
+	struct inode* inode = &nfsi->vfs_inode;
+
+	dprintk("%s Begin\n",__FUNCTION__);
+	if (nfsi->current_layout == NULL)
+		layid = io_ops->alloc_layout(nfsi->current_layout->mountid, inode);
+	else
+		layid = nfsi->current_layout;
+	return io_ops->set_layout(layid, inode, (void*)new_layout);
+}
+
+/* Check to see if the module is handling which layouts need to be
+ * retrieved from the server.  If they are not, then use retrieve based
+ * upon the returned data ranges from get_layout.
+ */
+static int
+virtual_update_layout(struct inode* ino, struct nfs_open_context* ctx, size_t count, loff_t pos, int access_type)
+{
+	struct nfs4_pnfs_layoutget_res* res = NULL;
+	struct nfs4_pnfs_layoutget_arg* arg = NULL;
+	struct nfs_inode* nfsi = NFS_I(ino);
+	struct nfs_server* nfss = NFS_SERVER(ino);
+	struct pnfs_layout_type* layout_temp;
+	int result;
+
+	if (0) {
+		/* Check to see if the pnfs module is handling data layout
+		 * range caching Something like:
+		 *
+		 *return(nfss->pnfs_module->pnfs_io_interface->have_layout(..))
+		 */
+	}
+
+	/* Check to see if the layout for the given range already exists */
+	if (nfsi->current_layout != NULL) {
+		/* To make this generic, I would need to compare the extents
+		 * of the existing layout information.
+		 * For now, assume that whole file layouts are always returned.
+		 */
+		dprintk("%s: Using cached layout for %lu@%lu)\n",__FUNCTION__,(unsigned long)count, (unsigned long)pos);
+
+		return 0; /* Already have layout information */
+	}
+
+	res = (struct nfs4_pnfs_layoutget_res*)kmalloc(sizeof(struct nfs4_pnfs_layoutget_res),GFP_KERNEL);
+	if (!res) {
+		result = -EIO;
+		goto out;
+	}
+
+	arg = (struct nfs4_pnfs_layoutget_arg*)kmalloc(sizeof(struct nfs4_pnfs_layoutget_arg),GFP_KERNEL);
+	if (!arg) {
+		result = -EIO;
+		goto mem_res;
+	}
+
+	if ((result = get_layout(ino, ctx, count, pos, access_type, arg, res))) {
+		printk("%s: ERROR retrieving layout %d\n",__FUNCTION__,result);
+		result =  -EIO;
+		goto mem_arg;
+	}
+
+	if (res->layout.len <= 0) {
+		printk("%s: ERROR!  Layout size is ZERO!\n",__FUNCTION__);
+		result =  -EIO;
+		goto mem_arg;
+	}
+
+	/* Step 2: Inject layout blob into I/O device driver */
+	layout_temp = pnfs_inject_layout(nfsi, nfss->pnfs_curr_ld->ld_io_ops,
+							 res->layout.buf);
+	if (layout_temp == NULL) {
+		printk("%s: ERROR!  Could not inject layout (%d)\n",__FUNCTION__,result);
+		result =  -EIO;
+		goto mem_arg;
+	} else
+		nfsi->current_layout = layout_temp;
+
+	return 0;
+
+mem_arg:
+	kfree(arg);
+mem_res:
+	kfree(res);
+out:
+	dprintk("%s end (err:%d)\n",__FUNCTION__,result);
+	return result;
+}
+
 EXPORT_SYMBOL(pnfs_unregister_layoutdriver);
 EXPORT_SYMBOL(pnfs_register_layoutdriver);
diff -puN fs/nfs/pnfs.h~client-layoutcache fs/nfs/pnfs.h
diff -puN include/linux/nfs_fs.h~client-layoutcache include/linux/nfs_fs.h
--- linux-2.6.14-pnfs-current/include/linux/nfs_fs.h~client-layoutcache	2006-01-12 13:13:37.900614000 -0500
+++ linux-2.6.14-pnfs-current-dhildebz/include/linux/nfs_fs.h	2006-01-12 13:17:17.930394000 -0500
@@ -187,6 +187,9 @@ struct nfs_inode {
 	struct nfs_delegation	*delegation;
 	int			 delegation_state;
 	struct rw_semaphore	rwsem;
+
+	/* pNFS layout information */
+	struct pnfs_layout_type* current_layout;
 #endif /* CONFIG_NFS_V4*/
 	struct inode		vfs_inode;
 };
_
