diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-04-26 09:31:28 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-04-26 09:31:28 +0100 |
commit | ef2e58ea6b9931c3a4816c66593da49bb20e3b24 (patch) | |
tree | ce7432add3becbe78de4ea06425cd2d9e91f4ada /fs/nfs/write.c | |
parent | 06d63cc51d47f572009138a7f3ac34d95773405d (diff) | |
parent | de46c33745f5e2ad594c72f2cf5f490861b16ce1 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 301 |
1 files changed, 188 insertions, 113 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index febdade9167..79755894174 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -12,6 +12,7 @@ #include <linux/pagemap.h> #include <linux/file.h> #include <linux/writeback.h> +#include <linux/swap.h> #include <linux/sunrpc/clnt.h> #include <linux/nfs_fs.h> @@ -37,8 +38,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*, struct page *, unsigned int, unsigned int); -static void nfs_mark_request_dirty(struct nfs_page *req); -static int nfs_wait_on_write_congestion(struct address_space *, int); static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); static const struct rpc_call_ops nfs_write_partial_ops; static const struct rpc_call_ops nfs_write_full_ops; @@ -48,8 +47,6 @@ static struct kmem_cache *nfs_wdata_cachep; static mempool_t *nfs_wdata_mempool; static mempool_t *nfs_commit_mempool; -static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); - struct nfs_write_data *nfs_commit_alloc(void) { struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); @@ -211,6 +208,43 @@ static int wb_priority(struct writeback_control *wbc) } /* + * NFS congestion control + */ + +int nfs_congestion_kb; + +#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) +#define NFS_CONGESTION_OFF_THRESH \ + (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) + +static int nfs_set_page_writeback(struct page *page) +{ + int ret = test_set_page_writeback(page); + + if (!ret) { + struct inode *inode = page->mapping->host; + struct nfs_server *nfss = NFS_SERVER(inode); + + if (atomic_inc_return(&nfss->writeback) > + NFS_CONGESTION_ON_THRESH) + set_bdi_congested(&nfss->backing_dev_info, WRITE); + } + return ret; +} + +static void nfs_end_page_writeback(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct nfs_server *nfss = NFS_SERVER(inode); + + end_page_writeback(page); + if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) { + clear_bdi_congested(&nfss->backing_dev_info, WRITE); + congestion_end(WRITE); + } +} + +/* * Find an associated nfs write request, and prepare to flush it out * Returns 1 if there was no write request, or if the request was * already tagged by nfs_set_page_dirty.Returns 0 if the request @@ -220,7 +254,8 @@ static int wb_priority(struct writeback_control *wbc) static int nfs_page_mark_flush(struct page *page) { struct nfs_page *req; - spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + struct nfs_inode *nfsi = NFS_I(page->mapping->host); + spinlock_t *req_lock = &nfsi->req_lock; int ret; spin_lock(req_lock); @@ -244,11 +279,23 @@ static int nfs_page_mark_flush(struct page *page) return ret; spin_lock(req_lock); } - spin_unlock(req_lock); - if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) { - nfs_mark_request_dirty(req); - set_page_writeback(page); + if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { + /* This request is marked for commit */ + spin_unlock(req_lock); + nfs_unlock_request(req); + return 1; } + if (nfs_set_page_writeback(page) == 0) { + nfs_list_remove_request(req); + /* add the request to the inode's dirty list. */ + radix_tree_tag_set(&nfsi->nfs_page_tree, + req->wb_index, NFS_PAGE_TAG_DIRTY); + nfs_list_add_request(req, &nfsi->dirty); + nfsi->ndirty++; + spin_unlock(req_lock); + __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES); + } else + spin_unlock(req_lock); ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); nfs_unlock_request(req); return ret; @@ -302,13 +349,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) return err; } -/* - * Note: causes nfs_update_request() to block on the assumption - * that the writeback is generated due to memory pressure. - */ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { - struct backing_dev_info *bdi = mapping->backing_dev_info; struct inode *inode = mapping->host; int err; @@ -317,20 +359,12 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) err = generic_writepages(mapping, wbc); if (err) return err; - while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) { - if (wbc->nonblocking) - return 0; - nfs_wait_on_write_congestion(mapping, 0); - } err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); if (err < 0) goto out; nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); err = 0; out: - clear_bit(BDI_write_congested, &bdi->state); - wake_up_all(&nfs_write_congestion); - congestion_end(WRITE); return err; } @@ -354,13 +388,15 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) } SetPagePrivate(req->wb_page); set_page_private(req->wb_page, (unsigned long)req); + if (PageDirty(req->wb_page)) + set_bit(PG_NEED_FLUSH, &req->wb_flags); nfsi->npages++; atomic_inc(&req->wb_count); return 0; } /* - * Insert a write request into an inode + * Remove a write request from an inode */ static void nfs_inode_remove_request(struct nfs_page *req) { @@ -373,6 +409,8 @@ static void nfs_inode_remove_request(struct nfs_page *req) set_page_private(req->wb_page, 0); ClearPagePrivate(req->wb_page); radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); + if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags)) + __set_page_dirty_nobuffers(req->wb_page); nfsi->npages--; if (!nfsi->npages) { spin_unlock(&nfsi->req_lock); @@ -384,28 +422,9 @@ static void nfs_inode_remove_request(struct nfs_page *req) nfs_release_request(req); } -/* - * Add a request to the inode's dirty list. - */ -static void -nfs_mark_request_dirty(struct nfs_page *req) -{ - struct inode *inode = req->wb_context->dentry->d_inode; - struct nfs_inode *nfsi = NFS_I(inode); - - spin_lock(&nfsi->req_lock); - radix_tree_tag_set(&nfsi->nfs_page_tree, - req->wb_index, NFS_PAGE_TAG_DIRTY); - nfs_list_add_request(req, &nfsi->dirty); - nfsi->ndirty++; - spin_unlock(&nfsi->req_lock); - __mark_inode_dirty(inode, I_DIRTY_PAGES); -} - static void nfs_redirty_request(struct nfs_page *req) { - clear_bit(PG_FLUSHING, &req->wb_flags); __set_page_dirty_nobuffers(req->wb_page); } @@ -415,7 +434,11 @@ nfs_redirty_request(struct nfs_page *req) static inline int nfs_dirty_request(struct nfs_page *req) { - return test_bit(PG_FLUSHING, &req->wb_flags) == 0; + struct page *page = req->wb_page; + + if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags)) + return 0; + return !PageWriteback(req->wb_page); } #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) @@ -431,10 +454,48 @@ nfs_mark_request_commit(struct nfs_page *req) spin_lock(&nfsi->req_lock); nfs_list_add_request(req, &nfsi->commit); nfsi->ncommit++; + set_bit(PG_NEED_COMMIT, &(req)->wb_flags); spin_unlock(&nfsi->req_lock); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); } + +static inline +int nfs_write_need_commit(struct nfs_write_data *data) +{ + return data->verf.committed != NFS_FILE_SYNC; +} + +static inline +int nfs_reschedule_unstable_write(struct nfs_page *req) +{ + if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { + nfs_mark_request_commit(req); + return 1; + } + if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) { + nfs_redirty_request(req); + return 1; + } + return 0; +} +#else +static inline void +nfs_mark_request_commit(struct nfs_page *req) +{ +} + +static inline +int nfs_write_need_commit(struct nfs_write_data *data) +{ + return 0; +} + +static inline +int nfs_reschedule_unstable_write(struct nfs_page *req) +{ + return 0; +} #endif /* @@ -481,6 +542,7 @@ static void nfs_cancel_dirty_list(struct list_head *head) while(!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); + nfs_end_page_writeback(req->wb_page); nfs_inode_remove_request(req); nfs_clear_page_writeback(req); } @@ -494,6 +556,7 @@ static void nfs_cancel_commit_list(struct list_head *head) req = nfs_list_entry(head->next); dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); nfs_list_remove_request(req); + clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); nfs_inode_remove_request(req); nfs_unlock_request(req); } @@ -531,10 +594,10 @@ static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, un } #endif -static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) +static int nfs_wait_on_write_congestion(struct address_space *mapping) { + struct inode *inode = mapping->host; struct backing_dev_info *bdi = mapping->backing_dev_info; - DEFINE_WAIT(wait); int ret = 0; might_sleep(); @@ -542,31 +605,23 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) if (!bdi_write_congested(bdi)) return 0; - nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT); + nfs_inc_stats(inode, NFSIOS_CONGESTIONWAIT); - if (intr) { - struct rpc_clnt *clnt = NFS_CLIENT(mapping->host); + do { + struct rpc_clnt *clnt = NFS_CLIENT(inode); sigset_t oldset; rpc_clnt_sigmask(clnt, &oldset); - prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE); - if (bdi_write_congested(bdi)) { - if (signalled()) - ret = -ERESTARTSYS; - else - schedule(); - } + ret = congestion_wait_interruptible(WRITE, HZ/10); rpc_clnt_sigunmask(clnt, &oldset); - } else { - prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE); - if (bdi_write_congested(bdi)) - schedule(); - } - finish_wait(&nfs_write_congestion, &wait); + if (ret == -ERESTARTSYS) + break; + ret = 0; + } while (bdi_write_congested(bdi)); + return ret; } - /* * Try to update any existing write request, or create one if there is none. * In order to match, the request's credentials must match those of @@ -577,14 +632,15 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, struct page *page, unsigned int offset, unsigned int bytes) { - struct inode *inode = page->mapping->host; + struct address_space *mapping = page->mapping; + struct inode *inode = mapping->host; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req, *new = NULL; unsigned long rqend, end; end = offset + bytes; - if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR)) + if (nfs_wait_on_write_congestion(mapping)) return ERR_PTR(-ERESTARTSYS); for (;;) { /* Loop over all inode entries and see if we find @@ -727,26 +783,12 @@ int nfs_updatepage(struct file *file, struct page *page, static void nfs_writepage_release(struct nfs_page *req) { - end_page_writeback(req->wb_page); -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - if (!PageError(req->wb_page)) { - if (NFS_NEED_RESCHED(req)) { - nfs_redirty_request(req); - goto out; - } else if (NFS_NEED_COMMIT(req)) { - nfs_mark_request_commit(req); - goto out; - } - } - nfs_inode_remove_request(req); - -out: - nfs_clear_commit(req); - nfs_clear_reschedule(req); -#else - nfs_inode_remove_request(req); -#endif + if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { + nfs_end_page_writeback(req->wb_page); + nfs_inode_remove_request(req); + } else + nfs_end_page_writeback(req->wb_page); nfs_clear_page_writeback(req); } @@ -879,6 +921,7 @@ out_bad: nfs_writedata_release(data); } nfs_redirty_request(req); + nfs_end_page_writeback(req->wb_page); nfs_clear_page_writeback(req); return -ENOMEM; } @@ -924,6 +967,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) struct nfs_page *req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_redirty_request(req); + nfs_end_page_writeback(req->wb_page); nfs_clear_page_writeback(req); } return -ENOMEM; @@ -959,6 +1003,7 @@ out_err: req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_redirty_request(req); + nfs_end_page_writeback(req->wb_page); nfs_clear_page_writeback(req); } return error; @@ -986,22 +1031,28 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) nfs_set_pageerror(page); req->wb_context->error = task->tk_status; dprintk(", error = %d\n", task->tk_status); - } else { -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - if (data->verf.committed < NFS_FILE_SYNC) { - if (!NFS_NEED_COMMIT(req)) { - nfs_defer_commit(req); - memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); - dprintk(" defer commit\n"); - } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { - nfs_defer_reschedule(req); - dprintk(" server reboot detected\n"); - } - } else -#endif - dprintk(" OK\n"); + goto out; } + if (nfs_write_need_commit(data)) { + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; + + spin_lock(req_lock); + if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { + /* Do nothing we need to resend the writes */ + } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { + memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); + dprintk(" defer commit\n"); + } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { + set_bit(PG_NEED_RESCHED, &req->wb_flags); + clear_bit(PG_NEED_COMMIT, &req->wb_flags); + dprintk(" server reboot detected\n"); + } + spin_unlock(req_lock); + } else + dprintk(" OK\n"); + +out: if (atomic_dec_and_test(&req->wb_complete)) nfs_writepage_release(req); } @@ -1042,25 +1093,21 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) if (task->tk_status < 0) { nfs_set_pageerror(page); req->wb_context->error = task->tk_status; - end_page_writeback(page); - nfs_inode_remove_request(req); dprintk(", error = %d\n", task->tk_status); - goto next; + goto remove_request; } - end_page_writeback(page); -#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) - if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) { - nfs_inode_remove_request(req); - dprintk(" OK\n"); + if (nfs_write_need_commit(data)) { + memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); + nfs_mark_request_commit(req); + nfs_end_page_writeback(page); + dprintk(" marked for commit\n"); goto next; } - memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); - nfs_mark_request_commit(req); - dprintk(" marked for commit\n"); -#else + dprintk(" OK\n"); +remove_request: + nfs_end_page_writeback(page); nfs_inode_remove_request(req); -#endif next: nfs_clear_page_writeback(req); } @@ -1248,6 +1295,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) while (!list_empty(&data->pages)) { req = nfs_list_entry(data->pages.next); nfs_list_remove_request(req); + clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); dprintk("NFS: commit (%s/%Ld %d@%Ld)", @@ -1483,15 +1531,22 @@ int nfs_wb_page(struct inode *inode, struct page* page) int nfs_set_page_dirty(struct page *page) { + spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; struct nfs_page *req; + int ret; - req = nfs_page_find_request(page); + spin_lock(req_lock); + req = nfs_page_find_request_locked(page); if (req != NULL) { /* Mark any existing write requests for flushing */ - set_bit(PG_NEED_FLUSH, &req->wb_flags); + ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); + spin_unlock(req_lock); nfs_release_request(req); + return ret; } - return __set_page_dirty_nobuffers(page); + ret = __set_page_dirty_nobuffers(page); + spin_unlock(req_lock); + return ret; } @@ -1514,6 +1569,26 @@ int __init nfs_init_writepagecache(void) if (nfs_commit_mempool == NULL) return -ENOMEM; + /* + * NFS congestion size, scale with available memory. + * + * 64MB: 8192k + * 128MB: 11585k + * 256MB: 16384k + * 512MB: 23170k + * 1GB: 32768k + * 2GB: 46340k + * 4GB: 65536k + * 8GB: 92681k + * 16GB: 131072k + * + * This allows larger machines to have larger/more transfers. + * Limit the default to 256M + */ + nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); + if (nfs_congestion_kb > 256*1024) + nfs_congestion_kb = 256*1024; + return 0; } |