aboutsummaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-06-10 18:31:00 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-07-09 12:08:45 -0400
commitefc91ed0191e3fc62bb1c556ac93fc4e661214d2 (patch)
tree291dba382da5d609c5bd35b5e369324ecbb95c00 /fs/nfs
parentb390c2b55c830eb3b64633fa8d8b8837e073e458 (diff)
NFS: Optimise append writes with holes
If a file is being extended, and we're creating a hole, we might as well declare the entire page to be up to date. This patch significantly improves the write performance for sparse files in the case where lseek(SEEK_END) is used to append several non-contiguous writes at intervals of < PAGE_SIZE. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/file.c20
-rw-r--r--fs/nfs/write.c12
2 files changed, 23 insertions, 9 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 7c73f06692b..7ac89a845a5 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -344,6 +344,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
int status;
+ /*
+ * Zero any uninitialised parts of the page, and then mark the page
+ * as up to date if it turns out that we're extending the file.
+ */
+ if (!PageUptodate(page)) {
+ unsigned pglen = nfs_page_length(page);
+ unsigned end = offset + len;
+
+ if (pglen == 0) {
+ zero_user_segments(page, 0, offset,
+ end, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ } else if (end >= pglen) {
+ zero_user_segment(page, end, PAGE_CACHE_SIZE);
+ if (offset == 0)
+ SetPageUptodate(page);
+ } else
+ zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
+ }
+
lock_kernel();
status = nfs_updatepage(file, page, offset, copied);
unlock_kernel();
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index dc62bc50469..eea2d2b5278 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -616,7 +616,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
spin_unlock(&inode->i_lock);
radix_tree_preload_end();
req = new;
- goto zero_page;
+ goto out;
}
spin_unlock(&inode->i_lock);
@@ -649,19 +649,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = max(end, rqend) - req->wb_offset;
- goto zero_page;
+ goto out;
}
if (end > rqend)
req->wb_bytes = end - req->wb_offset;
- return req;
-zero_page:
- /* If this page might potentially be marked as up to date,
- * then we need to zero any uninitalised data. */
- if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
- && !PageUptodate(req->wb_page))
- zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
+out:
return req;
}