aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-12-10 15:40:31 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-12-10 15:40:31 +0900
commitb5c00a3a412857d6f07970984068c450429e051c (patch)
tree1fde50630cbc24e11a45169f717f281db8eb6dcc /fs
parentca6f2d7fafd2d48b2f6943f5c6787beaec2014d0 (diff)
parent3067e02f8f3ae2f3f02ba76400d03b8bcb4942b0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into sh/for-2.6.33
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/cache.c14
-rw-r--r--fs/afs/file.c15
-rw-r--r--fs/aio.c62
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/bio.c14
-rw-r--r--fs/block_dev.c12
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c130
-rw-r--r--fs/cifs/CHANGES9
-rw-r--r--fs/cifs/README2
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h2
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/cifs/smbdes.c2
-rw-r--r--fs/coda/sysctl.c10
-rw-r--r--fs/compat_ioctl.c747
-rw-r--r--fs/debugfs/inode.c6
-rw-r--r--fs/direct-io.c10
-rw-r--r--fs/dlm/plock.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext4/inode.c8
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fs-writeback.c28
-rw-r--r--fs/fscache/Kconfig7
-rw-r--r--fs/fscache/Makefile1
-rw-r--r--fs/fscache/cache.c5
-rw-r--r--fs/fscache/cookie.c26
-rw-r--r--fs/fscache/internal.h56
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object-list.c432
-rw-r--r--fs/fscache/object.c104
-rw-r--r--fs/fscache/operation.c120
-rw-r--r--fs/fscache/page.c273
-rw-r--r--fs/fscache/proc.c13
-rw-r--r--fs/fscache/stats.c94
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c357
-rw-r--r--fs/gfs2/acl.h24
-rw-r--r--fs/gfs2/aops.c20
-rw-r--r--fs/gfs2/dir.c34
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c5
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/ops_fstype.c154
-rw-r--r--fs/gfs2/quota.c393
-rw-r--r--fs/gfs2/quota.h5
-rw-r--r--fs/gfs2/recovery.c4
-rw-r--r--fs/gfs2/rgrp.c14
-rw-r--r--fs/gfs2/super.c110
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c14
-rw-r--r--fs/gfs2/xattr.c74
-rw-r--r--fs/gfs2/xattr.h8
-rw-r--r--fs/inode.c10
-rw-r--r--fs/jffs2/compr.c2
-rw-r--r--fs/jffs2/read.c9
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/jfs/jfs_dmap.c4
-rw-r--r--fs/lockd/svc.c26
-rw-r--r--fs/namespace.c20
-rw-r--r--fs/ncpfs/ioctl.c2
-rw-r--r--fs/nfs/fscache.c10
-rw-r--r--fs/nfs/sysctl.c22
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/notify/inotify/inotify_user.c18
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/ntfs/logfile.c2
-rw-r--r--fs/ntfs/sysctl.c4
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/blockcheck.c2
-rw-r--r--fs/ocfs2/cluster/netdebug.c8
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/stackglue.c15
-rw-r--r--fs/omfs/bitmap.c2
-rw-r--r--fs/open.c27
-rw-r--r--fs/partitions/check.c12
-rw-r--r--fs/partitions/efi.c30
-rw-r--r--fs/partitions/efi.h8
-rw-r--r--fs/proc/array.c23
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/stat.c19
-rw-r--r--fs/qnx4/bitmap.c2
-rw-r--r--fs/qnx4/dir.c6
-rw-r--r--fs/qnx4/inode.c26
-rw-r--r--fs/qnx4/namei.c6
-rw-r--r--fs/quota/Kconfig2
-rw-r--r--fs/quota/dquot.c128
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/Makefile2
-rw-r--r--fs/reiserfs/bitmap.c4
-rw-r--r--fs/reiserfs/dir.c10
-rw-r--r--fs/reiserfs/do_balan.c17
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/fix_node.c21
-rw-r--r--fs/reiserfs/inode.c97
-rw-r--r--fs/reiserfs/ioctl.c77
-rw-r--r--fs/reiserfs/journal.c130
-rw-r--r--fs/reiserfs/lock.c88
-rw-r--r--fs/reiserfs/namei.c20
-rw-r--r--fs/reiserfs/prints.c4
-rw-r--r--fs/reiserfs/resize.c2
-rw-r--r--fs/reiserfs/stree.c53
-rw-r--r--fs/reiserfs/super.c52
-rw-r--r--fs/reiserfs/xattr.c6
-rw-r--r--fs/splice.c24
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c62
-rw-r--r--fs/xfs/quota/xfs_dquot.h2
129 files changed, 3051 insertions, 1948 deletions
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 51c94e26a34..e777961939f 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
BUG_ON(!vcookie->fscache);
- if (PageFsCache(page)) {
- if (fscache_check_page_write(vcookie->fscache, page)) {
- if (!(gfp & __GFP_WAIT))
- return 0;
- fscache_wait_on_page_write(vcookie->fscache, page);
- }
-
- fscache_uncache_page(vcookie->fscache, page);
- ClearPageFsCache(page);
- }
-
- return 1;
+ return fscache_maybe_release_page(vcookie->fscache, page, gfp);
}
void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
fscache_wait_on_page_write(vcookie->fscache, page);
BUG_ON(!PageLocked(page));
fscache_uncache_page(vcookie->fscache, page);
- ClearPageFsCache(page);
}
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 681c2a7b013..39b301662f2 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
fscache_wait_on_page_write(vnode->cache, page);
fscache_uncache_page(vnode->cache, page);
- ClearPageFsCache(page);
}
#endif
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page)) {
- if (fscache_check_page_write(vnode->cache, page)) {
- if (!(gfp_flags & __GFP_WAIT)) {
- _leave(" = F [cache busy]");
- return 0;
- }
- fscache_wait_on_page_write(vnode->cache, page);
- }
-
- fscache_uncache_page(vnode->cache, page);
- ClearPageFsCache(page);
+ if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
+ _leave(" = F [cache busy]");
+ return 0;
}
#endif
diff --git a/fs/aio.c b/fs/aio.c
index 02a2c934057..c30dfc00610 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -15,6 +15,7 @@
#include <linux/aio_abi.h>
#include <linux/module.h>
#include <linux/syscalls.h>
+#include <linux/backing-dev.h>
#include <linux/uio.h>
#define DEBUG 0
@@ -32,6 +33,9 @@
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/eventfd.h>
+#include <linux/blkdev.h>
+#include <linux/mempool.h>
+#include <linux/hash.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -60,6 +64,14 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
+#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
+#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
+struct aio_batch_entry {
+ struct hlist_node list;
+ struct address_space *mapping;
+};
+mempool_t *abe_pool;
+
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
@@ -73,6 +85,8 @@ static int __init aio_setup(void)
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_wq = create_workqueue("aio");
+ abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
+ BUG_ON(!abe_pool);
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
@@ -1531,8 +1545,44 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
return 1;
}
+static void aio_batch_add(struct address_space *mapping,
+ struct hlist_head *batch_hash)
+{
+ struct aio_batch_entry *abe;
+ struct hlist_node *pos;
+ unsigned bucket;
+
+ bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
+ hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
+ if (abe->mapping == mapping)
+ return;
+ }
+
+ abe = mempool_alloc(abe_pool, GFP_KERNEL);
+ BUG_ON(!igrab(mapping->host));
+ abe->mapping = mapping;
+ hlist_add_head(&abe->list, &batch_hash[bucket]);
+ return;
+}
+
+static void aio_batch_free(struct hlist_head *batch_hash)
+{
+ struct aio_batch_entry *abe;
+ struct hlist_node *pos, *n;
+ int i;
+
+ for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
+ blk_run_address_space(abe->mapping);
+ iput(abe->mapping->host);
+ hlist_del(&abe->list);
+ mempool_free(abe, abe_pool);
+ }
+ }
+}
+
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- struct iocb *iocb)
+ struct iocb *iocb, struct hlist_head *batch_hash)
{
struct kiocb *req;
struct file *file;
@@ -1608,6 +1658,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
;
}
spin_unlock_irq(&ctx->ctx_lock);
+ if (req->ki_opcode == IOCB_CMD_PREAD ||
+ req->ki_opcode == IOCB_CMD_PREADV ||
+ req->ki_opcode == IOCB_CMD_PWRITE ||
+ req->ki_opcode == IOCB_CMD_PWRITEV)
+ aio_batch_add(file->f_mapping, batch_hash);
+
aio_put_req(req); /* drop extra ref to req */
return 0;
@@ -1635,6 +1691,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
struct kioctx *ctx;
long ret = 0;
int i;
+ struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
if (unlikely(nr < 0))
return -EINVAL;
@@ -1666,10 +1723,11 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
break;
}
- ret = io_submit_one(ctx, user_iocb, &tmp);
+ ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash);
if (ret)
break;
}
+ aio_batch_free(batch_hash);
put_ioctx(ctx);
return i ? i : ret;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b9b3bb51b1e..d15ea1790bf 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -767,7 +767,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->start_stack = bprm->p;
- /* Now we do a little grungy work by mmaping the ELF image into
+ /* Now we do a little grungy work by mmapping the ELF image into
the correct location in memory. */
for(i = 0, elf_ppnt = elf_phdata;
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
diff --git a/fs/bio.c b/fs/bio.c
index 12da5db8682..76e6713abf9 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -272,7 +272,7 @@ EXPORT_SYMBOL(bio_init);
* for a &struct bio to become free. If a %NULL @bs is passed in, we will
* fall back to just using @kmalloc to allocate the required memory.
*
- * Note that the caller must set ->bi_destructor on succesful return
+ * Note that the caller must set ->bi_destructor on successful return
* of a bio, to do the appropriate freeing of the bio once the reference
* count drops to zero.
**/
@@ -1393,6 +1393,18 @@ void bio_check_pages_dirty(struct bio *bio)
}
}
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+void bio_flush_dcache_pages(struct bio *bi)
+{
+ int i;
+ struct bio_vec *bvec;
+
+ bio_for_each_segment(bvec, bi, i)
+ flush_dcache_page(bvec->bv_page);
+}
+EXPORT_SYMBOL(bio_flush_dcache_pages);
+#endif
+
/**
* bio_endio - end I/O on a bio
* @bio: bio
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 8bed0557d88..73d6a735b8f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -405,7 +405,17 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
- return sync_blockdev(I_BDEV(filp->f_mapping->host));
+ struct block_device *bdev = I_BDEV(filp->f_mapping->host);
+ int error;
+
+ error = sync_blockdev(bdev);
+ if (error)
+ return error;
+
+ error = blkdev_issue_flush(bdev, NULL);
+ if (error == -EOPNOTSUPP)
+ error = 0;
+ return error;
}
/*
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index ccbdcb54ec5..46bea0f4dc7 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -256,7 +256,7 @@ out:
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
* into the tree directly, with an additional reference taken, or a
- * reference dropped if the merge attempt was sucessfull.
+ * reference dropped if the merge attempt was successfull.
*/
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 431accd475a..27089311fbe 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -114,8 +114,9 @@ nomem_lookup_data:
/*
* attempt to look up the nominated node in this cache
+ * - return -ETIMEDOUT to be scheduled again
*/
-static void cachefiles_lookup_object(struct fscache_object *_object)
+static int cachefiles_lookup_object(struct fscache_object *_object)
{
struct cachefiles_lookup_data *lookup_data;
struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
cachefiles_attr_changed(&object->fscache);
- if (ret < 0) {
- printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n",
- ret);
+ if (ret < 0 && ret != -ETIMEDOUT) {
+ if (ret != -ENOBUFS)
+ printk(KERN_WARNING
+ "CacheFiles: Lookup failed error %d\n", ret);
fscache_object_lookup_error(&object->fscache);
}
_leave(" [%d]", ret);
+ return ret;
}
/*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
}
cache = object->fscache.cache;
+ fscache_object_destroy(&object->fscache);
kmem_cache_free(cachefiles_object_jar, object);
fscache_object_destroyed(cache);
}
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
if (oi_size == ni_size)
return 0;
- newattrs.ia_size = ni_size;
- newattrs.ia_valid = ATTR_SIZE;
-
cachefiles_begin_secure(cache, &saved_cred);
mutex_lock(&object->backer->d_inode->i_mutex);
+
+ /* if there's an extension to a partial page at the end of the backing
+ * file, we need to discard the partial page so that we pick up new
+ * data after it */
+ if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
+ _debug("discard tail %llx", oi_size);
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = oi_size & PAGE_MASK;
+ ret = notify_change(object->backer, &newattrs);
+ if (ret < 0)
+ goto truncate_failed;
+ }
+
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = ni_size;
ret = notify_change(object->backer, &newattrs);
+
+truncate_failed:
mutex_unlock(&object->backer->d_inode->i_mutex);
cachefiles_end_secure(cache, saved_cred);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ce818ae39e..14ac4806e29 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -21,17 +21,81 @@
#include <linux/security.h>
#include "internal.h"
-static int cachefiles_wait_bit(void *flags)
+#define CACHEFILES_KEYBUF_SIZE 512
+
+/*
+ * dump debugging info about an object
+ */
+static noinline
+void __cachefiles_printk_object(struct cachefiles_object *object,
+ const char *prefix,
+ u8 *keybuf)
{
- schedule();
- return 0;
+ struct fscache_cookie *cookie;
+ unsigned keylen, loop;
+
+ printk(KERN_ERR "%sobject: OBJ%x\n",
+ prefix, object->fscache.debug_id);
+ printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
+ prefix, fscache_object_states[object->fscache.state],
+ object->fscache.flags, object->fscache.work.flags,
+ object->fscache.events,
+ object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
+ printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
+ prefix, object->fscache.n_ops, object->fscache.n_in_progress,
+ object->fscache.n_exclusive);
+ printk(KERN_ERR "%sparent=%p\n",
+ prefix, object->fscache.parent);
+
+ spin_lock(&object->fscache.lock);
+ cookie = object->fscache.cookie;
+ if (cookie) {
+ printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
+ prefix,
+ object->fscache.cookie,
+ object->fscache.cookie->parent,
+ object->fscache.cookie->netfs_data,
+ object->fscache.cookie->flags);
+ if (keybuf)
+ keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
+ CACHEFILES_KEYBUF_SIZE);
+ else
+ keylen = 0;
+ } else {
+ printk(KERN_ERR "%scookie=NULL\n", prefix);
+ keylen = 0;
+ }
+ spin_unlock(&object->fscache.lock);
+
+ if (keylen) {
+ printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
+ for (loop = 0; loop < keylen; loop++)
+ printk("%02x", keybuf[loop]);
+ printk("'\n");
+ }
+}
+
+/*
+ * dump debugging info about a pair of objects
+ */
+static noinline void cachefiles_printk_object(struct cachefiles_object *object,
+ struct cachefiles_object *xobject)
+{
+ u8 *keybuf;
+
+ keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
+ if (object)
+ __cachefiles_printk_object(object, "", keybuf);
+ if (xobject)
+ __cachefiles_printk_object(xobject, "x", keybuf);
+ kfree(keybuf);
}
/*
* record the fact that an object is now active
*/
-static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
- struct cachefiles_object *object)
+static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
+ struct cachefiles_object *object)
{
struct cachefiles_object *xobject;
struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
try_again:
write_lock(&cache->active_lock);
- if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
+ if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
+ printk(KERN_ERR "CacheFiles: Error: Object already active\n");
+ cachefiles_printk_object(object, NULL);
BUG();
+ }
dentry = object->dentry;
_p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
rb_insert_color(&object->active_node, &cache->active_nodes);
write_unlock(&cache->active_lock);
- _leave("");
- return;
+ _leave(" = 0");
+ return 0;
/* an old object from a previous incarnation is hogging the slot - we
* need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error:"
" Unexpected object collision\n");
- printk(KERN_ERR "xobject: OBJ%x\n",
- xobject->fscache.debug_id);
- printk(KERN_ERR "xobjstate=%s\n",
- fscache_object_states[xobject->fscache.state]);
- printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
- printk(KERN_ERR "xobjevent=%lx [%lx]\n",
- xobject->fscache.events, xobject->fscache.event_mask);
- printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
- xobject->fscache.n_ops, xobject->fscache.n_in_progress,
- xobject->fscache.n_exclusive);
- printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
- xobject->fscache.cookie,
- xobject->fscache.cookie->parent,
- xobject->fscache.cookie->netfs_data,
- xobject->fscache.cookie->flags);
- printk(KERN_ERR "xparent=%p\n",
- xobject->fscache.parent);
- printk(KERN_ERR "object: OBJ%x\n",
- object->fscache.debug_id);
- printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
- object->fscache.cookie,
- object->fscache.cookie->parent,
- object->fscache.cookie->netfs_data,
- object->fscache.cookie->flags);
- printk(KERN_ERR "parent=%p\n",
- object->fscache.parent);
+ cachefiles_printk_object(object, xobject);
BUG();
}
atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock);
- _debug(">>> wait");
- wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE,
- cachefiles_wait_bit, TASK_UNINTERRUPTIBLE);
- _debug("<<< waited");
+ if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
+ wait_queue_head_t *wq;
+
+ signed long timeout = 60 * HZ;
+ wait_queue_t wait;
+ bool requeue;
+
+ /* if the object we're waiting for is queued for processing,
+ * then just put ourselves on the queue behind it */
+ if (slow_work_is_queued(&xobject->fscache.work)) {
+ _debug("queue OBJ%x behind OBJ%x immediately",
+ object->fscache.debug_id,
+ xobject->fscache.debug_id);
+ goto requeue;
+ }
+
+ /* otherwise we sleep until either the object we're waiting for
+ * is done, or the slow-work facility wants the thread back to
+ * do other work */
+ wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
+ init_wait(&wait);
+ requeue = false;
+ do {
+ prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
+ break;
+ requeue = slow_work_sleep_till_thread_needed(
+ &object->fscache.work, &timeout);
+ } while (timeout > 0 && !requeue);
+ finish_wait(wq, &wait);
+
+ if (requeue &&
+ test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
+ _debug("queue OBJ%x behind OBJ%x after wait",
+ object->fscache.debug_id,
+ xobject->fscache.debug_id);
+ goto requeue;
+ }
+
+ if (timeout <= 0) {
+ printk(KERN_ERR "\n");
+ printk(KERN_ERR "CacheFiles: Error: Overlong"
+ " wait for old active object to go away\n");
+ cachefiles_printk_object(object, xobject);
+ goto requeue;
+ }
+ }
+
+ ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
cache->cache.ops->put_object(&xobject->fscache);
goto try_again;
+
+requeue:
+ clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
+ cache->cache.ops->put_object(&xobject->fscache);
+ _leave(" = -ETIMEDOUT");
+ return -ETIMEDOUT;
}
/*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
dir = dget_parent(object->dentry);
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
ret = cachefiles_bury_object(cache, dir, object->dentry);
dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
/* search the current directory for the element name */
_debug("lookup '%s'", name);
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
}
/* note that we're now using this object */
- cachefiles_mark_object_active(cache, object);
+ ret = cachefiles_mark_object_active(cache, object);
mutex_unlock(&dir->d_inode->i_mutex);
dput(dir);
dir = NULL;
+ if (ret == -ETIMEDOUT)
+ goto mark_active_timed_out;
+
_debug("=== OBTAINED_OBJECT ===");
if (object->new) {
@@ -467,6 +563,10 @@ create_error:
cachefiles_io_error(cache, "Create/mkdir failed");
goto error;
+mark_active_timed_out:
+ _debug("mark active timed out");
+ goto release_dentry;
+
check_error:
_debug("check error %d", ret);
write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
write_unlock(&cache->active_lock);
-
+release_dentry:
dput(object->dentry);
object->dentry = NULL;
goto error_out;
@@ -495,9 +595,6 @@ error:
error_out2:
dput(dir);
error_out:
- if (ret == -ENOSPC)
- ret = -ENOBUFS;
-
_leave(" = error %d", -ret);
return ret;
}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a69787e7dd9..a6c8c6fe8df 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -11,6 +11,7 @@
#include <linux/mount.h>
#include <linux/file.h>
+#include <linux/ima.h>
#include "internal.h"
/*
@@ -40,8 +41,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
_debug("--- monitor %p %lx ---", page, page->flags);
- if (!PageUptodate(page) && !PageError(page))
- dump_stack();
+ if (!PageUptodate(page) && !PageError(page)) {
+ /* unlocked, not uptodate and not erronous? */
+ _debug("page probably truncated");
+ }
/* remove from the waitqueue */
list_del(&wait->task_list);
@@ -61,6 +64,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
}
/*
+ * handle a probably truncated page
+ * - check to see if the page is still relevant and reissue the read if
+ * possible
+ * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
+ * must wait again and 0 if successful
+ */
+static int cachefiles_read_reissue(struct cachefiles_object *object,
+ struct cachefiles_one_read *monitor)
+{
+ struct address_space *bmapping = object->backer->d_inode->i_mapping;
+ struct page *backpage = monitor->back_page, *backpage2;
+ int ret;
+
+ kenter("{ino=%lx},{%lx,%lx}",
+ object->backer->d_inode->i_ino,
+ backpage->index, backpage->flags);
+
+ /* skip if the page was truncated away completely */
+ if (backpage->mapping != bmapping) {
+ kleave(" = -ENODATA [mapping]");
+ return -ENODATA;
+ }
+
+ backpage2 = find_get_page(bmapping, backpage->index);
+ if (!backpage2) {
+ kleave(" = -ENODATA [gone]");
+ return -ENODATA;
+ }
+
+ if (backpage != backpage2) {
+ put_page(backpage2);
+ kleave(" = -ENODATA [different]");
+ return -ENODATA;
+ }
+
+ /* the page is still there and we already have a ref on it, so we don't
+ * need a second */
+ put_page(backpage2);
+
+ INIT_LIST_HEAD(&monitor->op_link);
+ add_page_wait_queue(backpage, &monitor->monitor);
+
+ if (trylock_page(backpage)) {
+ ret = -EIO;
+ if (PageError(backpage))
+ goto unlock_discard;
+ ret = 0;
+ if (PageUptodate(backpage))
+ goto unlock_discard;
+
+ kdebug("reissue read");
+ ret = bmapping->a_ops->readpage(NULL, backpage);
+ if (ret < 0)
+ goto unlock_discard;
+ }
+
+ /* but the page may have been read before the monitor was installed, so
+ * the monitor may miss the event - so we have to ensure that we do get
+ * one in such a case */
+ if (trylock_page(backpage)) {
+ _debug("jumpstart %p {%lx}", backpage, backpage->flags);
+ unlock_page(backpage);
+ }
+
+ /* it'll reappear on the todo list */
+ kleave(" = -EINPROGRESS");
+ return -EINPROGRESS;
+
+unlock_discard:
+ unlock_page(backpage);
+ spin_lock_irq(&object->work_lock);
+ list_del(&monitor->op_link);
+ spin_unlock_irq(&object->work_lock);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
* copy data from backing pages to netfs pages to complete a read operation
* - driven by FS-Cache's thread pool
*/
@@ -92,20 +173,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
_debug("- copy {%lu}", monitor->back_page->index);
- error = -EIO;
+ recheck:
if (PageUptodate(monitor->back_page)) {
copy_highpage(monitor->netfs_page, monitor->back_page);
pagevec_add(&pagevec, monitor->netfs_page);
fscache_mark_pages_cached(monitor->op, &pagevec);
error = 0;
- }
-
- if (error)
+ } else if (!PageError(monitor->back_page)) {
+ /* the page has probably been truncated */
+ error = cachefiles_read_reissue(object, monitor);
+ if (error == -EINPROGRESS)
+ goto next;
+ goto recheck;
+ } else {
cachefiles_io_error_obj(
object,
"Readpage failed on backing file %lx",
(unsigned long) monitor->back_page->flags);
+ error = -EIO;
+ }
page_cache_release(monitor->back_page);
@@ -114,6 +201,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
fscache_put_retrieval(op);
kfree(monitor);
+ next:
/* let the thread pool have some air occasionally */
max--;
if (max < 0 || need_resched()) {
@@ -333,7 +421,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
- op->op.flags = FSCACHE_OP_FAST;
+ op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
+ op->op.flags |= FSCACHE_OP_FAST;
op->op.processor = cachefiles_read_copier;
pagevec_init(&pagevec, 0);
@@ -639,7 +728,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
pagevec_init(&pagevec, 0);
- op->op.flags = FSCACHE_OP_FAST;
+ op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
+ op->op.flags |= FSCACHE_OP_FAST;
op->op.processor = cachefiles_read_copier;
INIT_LIST_HEAD(&backpages);
@@ -801,7 +891,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
struct cachefiles_cache *cache;
mm_segment_t old_fs;
struct file *file;
- loff_t pos;
+ loff_t pos, eof;
+ size_t len;
void *data;
int ret;
@@ -832,18 +923,33 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
if (IS_ERR(file)) {
ret = PTR_ERR(file);
} else {
+ ima_counts_get(file);
ret = -EIO;
if (file->f_op->write) {
pos = (loff_t) page->index << PAGE_SHIFT;
+
+ /* we mustn't write more data than we have, so we have
+ * to beware of a partial page at EOF */
+ eof = object->fscache.store_limit_l;
+ len = PAGE_SIZE;
+ if (eof & ~PAGE_MASK) {
+ ASSERTCMP(pos, <, eof);
+ if (eof - pos < PAGE_SIZE) {
+ _debug("cut short %llx to %llx",
+ pos, eof);
+ len = eof - pos;
+ ASSERTCMP(pos + len, ==, eof);
+ }
+ }
+
data = kmap(page);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = file->f_op->write(
- file, (const void __user *) data, PAGE_SIZE,
- &pos);
+ file, (const void __user *) data, len, &pos);
set_fs(old_fs);
kunmap(page);
- if (ret != PAGE_SIZE)
+ if (ret != len)
ret = -EIO;
}
fput(file);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 145540a316a..094ea65afc8 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,12 @@
+Version 1.61
+------------
+Fix append problem to Samba servers (files opened with O_APPEND could
+have duplicated data). Fix oops in cifs_lookup. Workaround problem
+mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
+Disable use of server inode numbers when server only
+partially supports them (e.g. for one server querying inode numbers on
+FindFirst fails but QPathInfo queries works).
+
Version 1.60
-------------
Fix memory leak in reconnect. Fix oops in DFS mount error path.
diff --git a/fs/cifs/README b/fs/cifs/README
index 79c1a93400b..a727b7cb075 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -423,7 +423,7 @@ A partial list of the supported mount options follows:
source name to use to represent the client netbios machine
name when doing the RFC1001 netbios session initialize.
direct Do not do inode data caching on files opened on this mount.
- This precludes mmaping files on this mount. In some cases
+ This precludes mmapping files on this mount. In some cases
with fast networks and little or no caching benefits on the
client (e.g. when the application is doing large sequential
reads bigger than page size without rereading the same data)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9a5e4f5f312..29f1da761bb 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1037,7 +1037,7 @@ init_cifs(void)
if (rc)
goto out_unregister_key_type;
#endif
- rc = slow_work_register_user();
+ rc = slow_work_register_user(THIS_MODULE);
if (rc)
goto out_unregister_resolver_key;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 5d0fde18039..4b35f7ec058 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -39,7 +39,7 @@
/*
* MAX_REQ is the maximum number of requests that WE will send
- * on one socket concurently. It also matches the most common
+ * on one socket concurrently. It also matches the most common
* value of max multiplex returned by servers. We may
* eventually want to use the negotiated value (in case
* future servers can handle more) when we are more confident that
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 2d07f890a84..3877737f96a 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1227,7 +1227,7 @@ typedef struct smb_com_setattr_rsp {
/* empty wct response to setattr */
/*******************************************************/
-/* NT Transact structure defintions follow */
+/* NT Transact structure definitions follow */
/* Currently only ioctl, acl (get security descriptor) */
/* and notify are implemented */
/*******************************************************/
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 627a60a6c1b..1f42f772865 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -214,8 +214,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
posix_flags |= SMB_O_EXCL;
if (oflags & O_TRUNC)
posix_flags |= SMB_O_TRUNC;
- if (oflags & O_APPEND)
- posix_flags |= SMB_O_APPEND;
if (oflags & O_SYNC)
posix_flags |= SMB_O_SYNC;
if (oflags & O_DIRECTORY)
@@ -643,9 +641,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* O_EXCL: optimize away the lookup, but don't hash the dentry. Let
* the VFS handle the create.
*/
- if (nd->flags & LOOKUP_EXCL) {
+ if (nd && (nd->flags & LOOKUP_EXCL)) {
d_instantiate(direntry, NULL);
- return 0;
+ return NULL;
}
/* can not grab the rename sem here since it would
@@ -675,7 +673,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
* reduction in network traffic in the other paths.
*/
if (pTcon->unix_ext) {
- if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
+ if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cababd8a52d..cf18ee76559 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -914,8 +914,8 @@ undo_setattr:
/*
* If dentry->d_inode is null (usually meaning the cached dentry
* is a negative dentry) then we would attempt a standard SMB delete, but
- * if that fails we can not attempt the fall back mechanisms on EACESS
- * but will return the EACESS to the caller. Note that the VFS does not call
+ * if that fails we can not attempt the fall back mechanisms on EACCESS
+ * but will return the EACCESS to the caller. Note that the VFS does not call
* unlink on negative dentries currently.
*/
int cifs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 224a1f47896..b6b6dcb500b 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -371,7 +371,7 @@ E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
smbhash(p24 + 16, c8, p21 + 14, 1);
}
-#if 0 /* currently unsued */
+#if 0 /* currently unused */
static void
D_P16(unsigned char *p14, unsigned char *in, unsigned char *out)
{
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index 43c96ce2961..c6405ce3c50 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -17,28 +17,25 @@ static struct ctl_table_header *fs_table_header;
static ctl_table coda_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "timeout",
.data = &coda_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "hard",
.data = &coda_hard,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "fake_statfs",
.data = &coda_fake_statfs,
.maxlen = sizeof(int),
.mode = 0600,
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{}
};
@@ -46,7 +43,6 @@ static ctl_table coda_table[] = {
#ifdef CONFIG_SYSCTL
static ctl_table fs_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "coda",
.mode = 0555,
.child = coda_table
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index d84e7058c29..2346895b3a7 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -246,428 +246,6 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
return err;
}
-#ifdef CONFIG_NET
-static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct compat_timeval __user *up = compat_ptr(arg);
- struct timeval ktv;
- mm_segment_t old_fs = get_fs();
- int err;
-
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, cmd, (unsigned long)&ktv);
- set_fs(old_fs);
- if(!err) {
- err = put_user(ktv.tv_sec, &up->tv_sec);
- err |= __put_user(ktv.tv_usec, &up->tv_usec);
- }
- return err;
-}
-
-static int do_siocgstampns(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct compat_timespec __user *up = compat_ptr(arg);
- struct timespec kts;
- mm_segment_t old_fs = get_fs();
- int err;
-
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, cmd, (unsigned long)&kts);
- set_fs(old_fs);
- if (!err) {
- err = put_user(kts.tv_sec, &up->tv_sec);
- err |= __put_user(kts.tv_nsec, &up->tv_nsec);
- }
- return err;
-}
-
-struct ifmap32 {
- compat_ulong_t mem_start;
- compat_ulong_t mem_end;
- unsigned short base_addr;
- unsigned char irq;
- unsigned char dma;
- unsigned char port;
-};
-
-struct ifreq32 {
-#define IFHWADDRLEN 6
-#define IFNAMSIZ 16
- union {
- char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
- } ifr_ifrn;
- union {
- struct sockaddr ifru_addr;
- struct sockaddr ifru_dstaddr;
- struct sockaddr ifru_broadaddr;
- struct sockaddr ifru_netmask;
- struct sockaddr ifru_hwaddr;
- short ifru_flags;
- compat_int_t ifru_ivalue;
- compat_int_t ifru_mtu;
- struct ifmap32 ifru_map;
- char ifru_slave[IFNAMSIZ]; /* Just fits the size */
- char ifru_newname[IFNAMSIZ];
- compat_caddr_t ifru_data;
- /* XXXX? ifru_settings should be here */
- } ifr_ifru;
-};
-
-struct ifconf32 {
- compat_int_t ifc_len; /* size of buffer */
- compat_caddr_t ifcbuf;
-};
-
-static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct ifreq __user *uifr;
- int err;
-
- uifr = compat_alloc_user_space(sizeof(struct ifreq));
- if (copy_in_user(uifr, compat_ptr(arg), sizeof(struct ifreq32)))
- return -EFAULT;
-
- err = sys_ioctl(fd, SIOCGIFNAME, (unsigned long)uifr);
- if (err)
- return err;
-
- if (copy_in_user(compat_ptr(arg), uifr, sizeof(struct ifreq32)))
- return -EFAULT;
-
- return 0;
-}
-
-static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct ifconf32 ifc32;
- struct ifconf ifc;
- struct ifconf __user *uifc;
- struct ifreq32 __user *ifr32;
- struct ifreq __user *ifr;
- unsigned int i, j;
- int err;
-
- if (copy_from_user(&ifc32, compat_ptr(arg), sizeof(struct ifconf32)))
- return -EFAULT;
-
- if (ifc32.ifcbuf == 0) {
- ifc32.ifc_len = 0;
- ifc.ifc_len = 0;
- ifc.ifc_req = NULL;
- uifc = compat_alloc_user_space(sizeof(struct ifconf));
- } else {
- size_t len =((ifc32.ifc_len / sizeof (struct ifreq32)) + 1) *
- sizeof (struct ifreq);
- uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
- ifc.ifc_len = len;
- ifr = ifc.ifc_req = (void __user *)(uifc + 1);
- ifr32 = compat_ptr(ifc32.ifcbuf);
- for (i = 0; i < ifc32.ifc_len; i += sizeof (struct ifreq32)) {
- if (copy_in_user(ifr, ifr32, sizeof(struct ifreq32)))
- return -EFAULT;
- ifr++;
- ifr32++;
- }
- }
- if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
- return -EFAULT;
-
- err = sys_ioctl (fd, SIOCGIFCONF, (unsigned long)uifc);
- if (err)
- return err;
-
- if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
- return -EFAULT;
-
- ifr = ifc.ifc_req;
- ifr32 = compat_ptr(ifc32.ifcbuf);
- for (i = 0, j = 0;
- i + sizeof (struct ifreq32) <= ifc32.ifc_len && j < ifc.ifc_len;
- i += sizeof (struct ifreq32), j += sizeof (struct ifreq)) {
- if (copy_in_user(ifr32, ifr, sizeof (struct ifreq32)))
- return -EFAULT;
- ifr32++;
- ifr++;
- }
-
- if (ifc32.ifcbuf == 0) {
- /* Translate from 64-bit structure multiple to
- * a 32-bit one.
- */
- i = ifc.ifc_len;
- i = ((i / sizeof(struct ifreq)) * sizeof(struct ifreq32));
- ifc32.ifc_len = i;
- } else {
- ifc32.ifc_len = i;
- }
- if (copy_to_user(compat_ptr(arg), &ifc32, sizeof(struct ifconf32)))
- return -EFAULT;
-
- return 0;
-}
-
-static int ethtool_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct ifreq __user *ifr;
- struct ifreq32 __user *ifr32;
- u32 data;
- void __user *datap;
-
- ifr = compat_alloc_user_space(sizeof(*ifr));
- ifr32 = compat_ptr(arg);
-
- if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
- return -EFAULT;
-
- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
- return -EFAULT;
-
- datap = compat_ptr(data);
- if (put_user(datap, &ifr->ifr_ifru.ifru_data))
- return -EFAULT;
-
- return sys_ioctl(fd, cmd, (unsigned long) ifr);
-}
-
-static int bond_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct ifreq kifr;
- struct ifreq __user *uifr;
- struct ifreq32 __user *ifr32 = compat_ptr(arg);
- mm_segment_t old_fs;
- int err;
- u32 data;
- void __user *datap;
-
- switch (cmd) {
- case SIOCBONDENSLAVE:
- case SIOCBONDRELEASE:
- case SIOCBONDSETHWADDR:
- case SIOCBONDCHANGEACTIVE:
- if (copy_from_user(&kifr, ifr32, sizeof(struct ifreq32)))
- return -EFAULT;
-
- old_fs = get_fs();
- set_fs (KERNEL_DS);
- err = sys_ioctl (fd, cmd, (unsigned long)&kifr);
- set_fs (old_fs);
-
- return err;
- case SIOCBONDSLAVEINFOQUERY:
- case SIOCBONDINFOQUERY:
- uifr = compat_alloc_user_space(sizeof(*uifr));
- if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
- return -EFAULT;
-
- if (get_user(data, &ifr32->ifr_ifru.ifru_data))
- return -EFAULT;
-
- datap = compat_ptr(data);
- if (put_user(datap, &uifr->ifr_ifru.ifru_data))
- return -EFAULT;
-
- return sys_ioctl (fd, cmd, (unsigned long)uifr);
- default:
- return -EINVAL;
- };
-}
-
-static int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct ifreq __user *u_ifreq64;
- struct ifreq32 __user *u_ifreq32 = compat_ptr(arg);
- char tmp_buf[IFNAMSIZ];
- void __user *data64;
- u32 data32;
-
- if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
- IFNAMSIZ))
- return -EFAULT;
- if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
- return -EFAULT;
- data64 = compat_ptr(data32);
-
- u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
-
- /* Don't check these user accesses, just let that get trapped
- * in the ioctl handler instead.
- */
- if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
- IFNAMSIZ))
- return -EFAULT;
- if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
- return -EFAULT;
-
- return sys_ioctl(fd, cmd, (unsigned long) u_ifreq64);
-}
-
-static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct ifreq ifr;
- struct ifreq32 __user *uifr32;
- struct ifmap32 __user *uifmap32;
- mm_segment_t old_fs;
- int err;
-
- uifr32 = compat_ptr(arg);
- uifmap32 = &uifr32->ifr_ifru.ifru_map;
- switch (cmd) {
- case SIOCSIFMAP:
- err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
- err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
- err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
- err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
- err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
- err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
- err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
- if (err)
- return -EFAULT;
- break;
- case SIOCSHWTSTAMP:
- if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
- return -EFAULT;
- ifr.ifr_data = compat_ptr(uifr32->ifr_ifru.ifru_data);
- break;
- default:
- if (copy_from_user(&ifr, uifr32, sizeof(*uifr32)))
- return -EFAULT;
- break;
- }
- old_fs = get_fs();
- set_fs (KERNEL_DS);
- err = sys_ioctl (fd, cmd, (unsigned long)&ifr);
- set_fs (old_fs);
- if (!err) {
- switch (cmd) {
- /* TUNSETIFF is defined as _IOW, it should be _IORW
- * as the data is copied back to user space, but that
- * cannot be fixed without breaking all existing apps.
- */
- case TUNSETIFF:
- case TUNGETIFF:
- case SIOCGIFFLAGS:
- case SIOCGIFMETRIC:
- case SIOCGIFMTU:
- case SIOCGIFMEM:
- case SIOCGIFHWADDR:
- case SIOCGIFINDEX:
- case SIOCGIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCGIFDSTADDR:
- case SIOCGIFNETMASK:
- case SIOCGIFTXQLEN:
- if (copy_to_user(uifr32, &ifr, sizeof(*uifr32)))
- return -EFAULT;
- break;
- case SIOCGIFMAP:
- err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
- err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
- err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
- err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
- err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
- err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
- err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
- if (err)
- err = -EFAULT;
- break;
- }
- }
- return err;
-}
-
-struct rtentry32 {
- u32 rt_pad1;
- struct sockaddr rt_dst; /* target address */
- struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
- struct sockaddr rt_genmask; /* target network mask (IP) */
- unsigned short rt_flags;
- short rt_pad2;
- u32 rt_pad3;
- unsigned char rt_tos;
- unsigned char rt_class;
- short rt_pad4;
- short rt_metric; /* +1 for binary compatibility! */
- /* char * */ u32 rt_dev; /* forcing the device at add */
- u32 rt_mtu; /* per route MTU/Window */
- u32 rt_window; /* Window clamping */
- unsigned short rt_irtt; /* Initial RTT */
-
-};
-
-struct in6_rtmsg32 {
- struct in6_addr rtmsg_dst;
- struct in6_addr rtmsg_src;
- struct in6_addr rtmsg_gateway;
- u32 rtmsg_type;
- u16 rtmsg_dst_len;
- u16 rtmsg_src_len;
- u32 rtmsg_metric;
- u32 rtmsg_info;
- u32 rtmsg_flags;
- s32 rtmsg_ifindex;
-};
-
-static int routing_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- int ret;
- void *r = NULL;
- struct in6_rtmsg r6;
- struct rtentry r4;
- char devname[16];
- u32 rtdev;
- mm_segment_t old_fs = get_fs();
-
- struct socket *mysock = sockfd_lookup(fd, &ret);
-
- if (mysock && mysock->sk && mysock->sk->sk_family == AF_INET6) { /* ipv6 */
- struct in6_rtmsg32 __user *ur6 = compat_ptr(arg);
- ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
- 3 * sizeof(struct in6_addr));
- ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
- ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
- ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
- ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
- ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
- ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
- ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
-
- r = (void *) &r6;
- } else { /* ipv4 */
- struct rtentry32 __user *ur4 = compat_ptr(arg);
- ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
- 3 * sizeof(struct sockaddr));
- ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
- ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
- ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
- ret |= __get_user (r4.rt_window, &(ur4->rt_window));
- ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
- ret |= __get_user (rtdev, &(ur4->rt_dev));
- if (rtdev) {
- ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
- r4.rt_dev = devname; devname[15] = 0;
- } else
- r4.rt_dev = NULL;
-
- r = (void *) &r4;
- }
-
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
- set_fs (KERNEL_DS);
- ret = sys_ioctl (fd, cmd, (unsigned long) r);
- set_fs (old_fs);
-
-out:
- if (mysock)
- sockfd_put(mysock);
-
- return ret;
-}
-#endif
-
#ifdef CONFIG_BLOCK
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
@@ -1212,170 +790,6 @@ static int do_smb_getmountuid(unsigned int fd, unsigned int cmd, unsigned long a
return err;
}
-struct atmif_sioc32 {
- compat_int_t number;
- compat_int_t length;
- compat_caddr_t arg;
-};
-
-struct atm_iobuf32 {
- compat_int_t length;
- compat_caddr_t buffer;
-};
-
-#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct atmif_sioc32)
-#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct atm_iobuf32)
-#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct atmif_sioc32)
-#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct atmif_sioc32)
-#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct atmif_sioc32)
-#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct atmif_sioc32)
-#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct atmif_sioc32)
-#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct atmif_sioc32)
-#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct atmif_sioc32)
-#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct atmif_sioc32)
-#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct atmif_sioc32)
-#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct atmif_sioc32)
-#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct atmif_sioc32)
-#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct atmif_sioc32)
-#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct atmif_sioc32)
-#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct atmif_sioc32)
-#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct atmif_sioc32)
-
-static struct {
- unsigned int cmd32;
- unsigned int cmd;
-} atm_ioctl_map[] = {
- { ATM_GETLINKRATE32, ATM_GETLINKRATE },
- { ATM_GETNAMES32, ATM_GETNAMES },
- { ATM_GETTYPE32, ATM_GETTYPE },
- { ATM_GETESI32, ATM_GETESI },
- { ATM_GETADDR32, ATM_GETADDR },
- { ATM_RSTADDR32, ATM_RSTADDR },
- { ATM_ADDADDR32, ATM_ADDADDR },
- { ATM_DELADDR32, ATM_DELADDR },
- { ATM_GETCIRANGE32, ATM_GETCIRANGE },
- { ATM_SETCIRANGE32, ATM_SETCIRANGE },
- { ATM_SETESI32, ATM_SETESI },
- { ATM_SETESIF32, ATM_SETESIF },
- { ATM_GETSTAT32, ATM_GETSTAT },
- { ATM_GETSTATZ32, ATM_GETSTATZ },
- { ATM_GETLOOP32, ATM_GETLOOP },
- { ATM_SETLOOP32, ATM_SETLOOP },
- { ATM_QUERYLOOP32, ATM_QUERYLOOP }
-};
-
-#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
-
-static int do_atm_iobuf(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct atm_iobuf __user *iobuf;
- struct atm_iobuf32 __user *iobuf32;
- u32 data;
- void __user *datap;
- int len, err;
-
- iobuf = compat_alloc_user_space(sizeof(*iobuf));
- iobuf32 = compat_ptr(arg);
-
- if (get_user(len, &iobuf32->length) ||
- get_user(data, &iobuf32->buffer))
- return -EFAULT;
- datap = compat_ptr(data);
- if (put_user(len, &iobuf->length) ||
- put_user(datap, &iobuf->buffer))
- return -EFAULT;
-
- err = sys_ioctl(fd, cmd, (unsigned long)iobuf);
-
- if (!err) {
- if (copy_in_user(&iobuf32->length, &iobuf->length,
- sizeof(int)))
- err = -EFAULT;
- }
-
- return err;
-}
-
-static int do_atmif_sioc(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct atmif_sioc __user *sioc;
- struct atmif_sioc32 __user *sioc32;
- u32 data;
- void __user *datap;
- int err;
-
- sioc = compat_alloc_user_space(sizeof(*sioc));
- sioc32 = compat_ptr(arg);
-
- if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
- get_user(data, &sioc32->arg))
- return -EFAULT;
- datap = compat_ptr(data);
- if (put_user(datap, &sioc->arg))
- return -EFAULT;
-
- err = sys_ioctl(fd, cmd, (unsigned long) sioc);
-
- if (!err) {
- if (copy_in_user(&sioc32->length, &sioc->length,
- sizeof(int)))
- err = -EFAULT;
- }
- return err;
-}
-
-static int do_atm_ioctl(unsigned int fd, unsigned int cmd32, unsigned long arg)
-{
- int i;
- unsigned int cmd = 0;
-
- switch (cmd32) {
- case SONET_GETSTAT:
- case SONET_GETSTATZ:
- case SONET_GETDIAG:
- case SONET_SETDIAG:
- case SONET_CLRDIAG:
- case SONET_SETFRAMING:
- case SONET_GETFRAMING:
- case SONET_GETFRSENSE:
- return do_atmif_sioc(fd, cmd32, arg);
- }
-
- for (i = 0; i < NR_ATM_IOCTL; i++) {
- if (cmd32 == atm_ioctl_map[i].cmd32) {
- cmd = atm_ioctl_map[i].cmd;
- break;
- }
- }
- if (i == NR_ATM_IOCTL)
- return -EINVAL;
-
- switch (cmd) {
- case ATM_GETNAMES:
- return do_atm_iobuf(fd, cmd, arg);
-
- case ATM_GETLINKRATE:
- case ATM_GETTYPE:
- case ATM_GETESI:
- case ATM_GETADDR:
- case ATM_RSTADDR:
- case ATM_ADDADDR:
- case ATM_DELADDR:
- case ATM_GETCIRANGE:
- case ATM_SETCIRANGE:
- case ATM_SETESI:
- case ATM_SETESIF:
- case ATM_GETSTAT:
- case ATM_GETSTATZ:
- case ATM_GETLOOP:
- case ATM_SETLOOP:
- case ATM_QUERYLOOP:
- return do_atmif_sioc(fd, cmd, arg);
- }
-
- return -EINVAL;
-}
-
static __used int
ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
{
@@ -1718,21 +1132,6 @@ static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd, unsigned long a
return sys_ioctl(fd, cmd, (unsigned long)tdata);
}
-/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
- * for some operations; this forces use of the newer bridge-utils that
- * use compatible ioctls
- */
-static int old_bridge_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- u32 tmp;
-
- if (get_user(tmp, (u32 __user *) arg))
- return -EFAULT;
- if (tmp == BRCTL_GET_VERSION)
- return BRCTL_VERSION + 1;
- return -EINVAL;
-}
-
#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
@@ -1979,18 +1378,6 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
-/* Big T */
-COMPATIBLE_IOCTL(TUNSETNOCSUM)
-COMPATIBLE_IOCTL(TUNSETDEBUG)
-COMPATIBLE_IOCTL(TUNSETPERSIST)
-COMPATIBLE_IOCTL(TUNSETOWNER)
-COMPATIBLE_IOCTL(TUNSETLINK)
-COMPATIBLE_IOCTL(TUNSETGROUP)
-COMPATIBLE_IOCTL(TUNGETFEATURES)
-COMPATIBLE_IOCTL(TUNSETOFFLOAD)
-COMPATIBLE_IOCTL(TUNSETTXFILTER)
-COMPATIBLE_IOCTL(TUNGETSNDBUF)
-COMPATIBLE_IOCTL(TUNSETSNDBUF)
/* Big V */
COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE)
@@ -2032,30 +1419,6 @@ COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
COMPATIBLE_IOCTL(MTIOCTOP)
/* Socket level stuff */
COMPATIBLE_IOCTL(FIOQSIZE)
-COMPATIBLE_IOCTL(FIOSETOWN)
-COMPATIBLE_IOCTL(SIOCSPGRP)
-COMPATIBLE_IOCTL(FIOGETOWN)
-COMPATIBLE_IOCTL(SIOCGPGRP)
-COMPATIBLE_IOCTL(SIOCATMARK)
-COMPATIBLE_IOCTL(SIOCSIFLINK)
-COMPATIBLE_IOCTL(SIOCSIFENCAP)
-COMPATIBLE_IOCTL(SIOCGIFENCAP)
-COMPATIBLE_IOCTL(SIOCSIFNAME)
-COMPATIBLE_IOCTL(SIOCSARP)
-COMPATIBLE_IOCTL(SIOCGARP)
-COMPATIBLE_IOCTL(SIOCDARP)
-COMPATIBLE_IOCTL(SIOCSRARP)
-COMPATIBLE_IOCTL(SIOCGRARP)
-COMPATIBLE_IOCTL(SIOCDRARP)
-COMPATIBLE_IOCTL(SIOCADDDLCI)
-COMPATIBLE_IOCTL(SIOCDELDLCI)
-COMPATIBLE_IOCTL(SIOCGMIIPHY)
-COMPATIBLE_IOCTL(SIOCGMIIREG)
-COMPATIBLE_IOCTL(SIOCSMIIREG)
-COMPATIBLE_IOCTL(SIOCGIFVLAN)
-COMPATIBLE_IOCTL(SIOCSIFVLAN)
-COMPATIBLE_IOCTL(SIOCBRADDBR)
-COMPATIBLE_IOCTL(SIOCBRDELBR)
#ifdef CONFIG_BLOCK
/* SG stuff */
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
@@ -2311,22 +1674,6 @@ COMPATIBLE_IOCTL(RAW_SETBIND)
COMPATIBLE_IOCTL(RAW_GETBIND)
/* SMB ioctls which do not need any translations */
COMPATIBLE_IOCTL(SMB_IOC_NEWCONN)
-/* Little a */
-COMPATIBLE_IOCTL(ATMSIGD_CTRL)
-COMPATIBLE_IOCTL(ATMARPD_CTRL)
-COMPATIBLE_IOCTL(ATMLEC_CTRL)
-COMPATIBLE_IOCTL(ATMLEC_MCAST)
-COMPATIBLE_IOCTL(ATMLEC_DATA)
-COMPATIBLE_IOCTL(ATM_SETSC)
-COMPATIBLE_IOCTL(SIOCSIFATMTCP)
-COMPATIBLE_IOCTL(SIOCMKCLIP)
-COMPATIBLE_IOCTL(ATMARP_MKIP)
-COMPATIBLE_IOCTL(ATMARP_SETENTRY)
-COMPATIBLE_IOCTL(ATMARP_ENCAP)
-COMPATIBLE_IOCTL(ATMTCP_CREATE)
-COMPATIBLE_IOCTL(ATMTCP_REMOVE)
-COMPATIBLE_IOCTL(ATMMPC_CTRL)
-COMPATIBLE_IOCTL(ATMMPC_DATA)
/* Watchdog */
COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
@@ -2532,63 +1879,6 @@ COMPATIBLE_IOCTL(JSIOCGBUTTONS)
COMPATIBLE_IOCTL(JSIOCGNAME(0))
/* now things that need handlers */
-#ifdef CONFIG_NET
-HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
-HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
-HANDLE_IOCTL(SIOCGIFFLAGS, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFFLAGS, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFMETRIC, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFMETRIC, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFMTU, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFMTU, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFMEM, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFMEM, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFHWADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFHWADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCADDMULTI, dev_ifsioc)
-HANDLE_IOCTL(SIOCDELMULTI, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFINDEX, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc)
-HANDLE_IOCTL(SIOCSHWTSTAMP, dev_ifsioc)
-
-/* ioctls used by appletalk ddp.c */
-HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCDIFADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCSARP, dev_ifsioc)
-HANDLE_IOCTL(SIOCDARP, dev_ifsioc)
-
-HANDLE_IOCTL(SIOCGIFBRDADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFBRDADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFDSTADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFDSTADDR, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFNETMASK, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFNETMASK, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFPFLAGS, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
-HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
-HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
-HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
-HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
-HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
-HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
-HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
-HANDLE_IOCTL(SIOCBONDSETHWADDR, bond_ioctl)
-HANDLE_IOCTL(SIOCBONDSLAVEINFOQUERY, bond_ioctl)
-HANDLE_IOCTL(SIOCBONDINFOQUERY, bond_ioctl)
-HANDLE_IOCTL(SIOCBONDCHANGEACTIVE, bond_ioctl)
-HANDLE_IOCTL(SIOCADDRT, routing_ioctl)
-HANDLE_IOCTL(SIOCDELRT, routing_ioctl)
-HANDLE_IOCTL(SIOCBRADDIF, dev_ifsioc)
-HANDLE_IOCTL(SIOCBRDELIF, dev_ifsioc)
-/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
-HANDLE_IOCTL(SIOCRTMSG, ret_einval)
-HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
-HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
-#endif
#ifdef CONFIG_BLOCK
HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
@@ -2613,31 +1903,6 @@ HANDLE_IOCTL(KDFONTOP, do_kdfontop_ioctl)
/* One SMB ioctl needs translations. */
#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid)
-HANDLE_IOCTL(ATM_GETLINKRATE32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETNAMES32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETTYPE32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETESI32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETADDR32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_RSTADDR32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_ADDADDR32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_DELADDR32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETCIRANGE32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_SETCIRANGE32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_SETESI32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_SETESIF32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETSTAT32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETSTATZ32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_GETLOOP32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_SETLOOP32, do_atm_ioctl)
-HANDLE_IOCTL(ATM_QUERYLOOP32, do_atm_ioctl)
-HANDLE_IOCTL(SONET_GETSTAT, do_atm_ioctl)
-HANDLE_IOCTL(SONET_GETSTATZ, do_atm_ioctl)
-HANDLE_IOCTL(SONET_GETDIAG, do_atm_ioctl)
-HANDLE_IOCTL(SONET_SETDIAG, do_atm_ioctl)
-HANDLE_IOCTL(SONET_CLRDIAG, do_atm_ioctl)
-HANDLE_IOCTL(SONET_SETFRAMING, do_atm_ioctl)
-HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
-HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
/* block stuff */
#ifdef CONFIG_BLOCK
/* loop */
@@ -2655,7 +1920,7 @@ COMPATIBLE_IOCTL(TIOCSLTC)
#endif
#ifdef TIOCSTART
/*
- * For these two we have defintions in ioctls.h and/or termios.h on
+ * For these two we have definitions in ioctls.h and/or termios.h on
* some architectures but no actual implemention. Some applications
* like bash call them if they are defined in the headers, so we provide
* entries here to avoid syslog message spew.
@@ -2672,11 +1937,7 @@ COMPATIBLE_IOCTL(USBDEVFS_IOCTL32)
HANDLE_IOCTL(I2C_FUNCS, w_long)
HANDLE_IOCTL(I2C_RDWR, do_i2c_rdwr_ioctl)
HANDLE_IOCTL(I2C_SMBUS, do_i2c_smbus_ioctl)
-/* bridge */
-HANDLE_IOCTL(SIOCSIFBR, old_bridge_ioctl)
-HANDLE_IOCTL(SIOCGIFBR, old_bridge_ioctl)
/* Not implemented in the native kernel */
-IGNORE_IOCTL(SIOCGIFCOUNT)
HANDLE_IOCTL(RTC_IRQP_READ32, rtc_ioctl)
HANDLE_IOCTL(RTC_IRQP_SET32, rtc_ioctl)
HANDLE_IOCTL(RTC_EPOCH_READ32, rtc_ioctl)
@@ -2831,12 +2092,6 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
goto found_handler;
}
-#ifdef CONFIG_NET
- if (S_ISSOCK(filp->f_path.dentry->d_inode->i_mode) &&
- cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
- error = siocdevprivate_ioctl(fd, cmd, arg);
- } else
-#endif
{
static int count;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index d22438ef767..0d23b52dd22 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -184,7 +184,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
/**
* debugfs_create_file - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
- * @mode: the permission that the file should have
+ * @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this paramater is NULL, then the
* file will be created in the root of the debugfs filesystem.
@@ -195,8 +195,8 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
* this file.
*
* This is the basic "create a file" function for debugfs. It allows for a
- * wide range of flexibility in createing a file, or a directory (if you
- * want to create a directory, the debugfs_create_dir() function is
+ * wide range of flexibility in creating a file, or a directory (if you want
+ * to create a directory, the debugfs_create_dir() function is
* recommended to be used instead.)
*
* This function will return a pointer to a dentry if it succeeds. This
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 8b10b87dc01..b912270942f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1028,9 +1028,6 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
if (dio->bio)
dio_bio_submit(dio);
- /* All IO is now issued, send it on its way */
- blk_run_address_space(inode->i_mapping);
-
/*
* It is possible that, we return short IO due to end of file.
* In that case, we need to release all the pages we got hold on.
@@ -1057,8 +1054,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
((rw & READ) || (dio->result == dio->size)))
ret = -EIOCBQUEUED;
- if (ret != -EIOCBQUEUED)
+ if (ret != -EIOCBQUEUED) {
+ /* All IO is now issued, send it on its way */
+ blk_run_address_space(inode->i_mapping);
dio_await_completion(dio);
+ }
/*
* Sync will always be dropping the final ref and completing the
@@ -1124,7 +1124,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
int acquire_i_mutex = 0;
if (rw & WRITE)
- rw = WRITE_ODIRECT;
+ rw = WRITE_ODIRECT_PLUG;
if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 16f682e26c0..b540aa5d1f6 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -143,7 +143,7 @@ out:
}
EXPORT_SYMBOL_GPL(dlm_posix_lock);
-/* Returns failure iff a succesful lock operation should be canceled */
+/* Returns failure iff a successful lock operation should be canceled */
static int dlm_plock_callback(struct plock_op *op)
{
struct file *file;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 085c5c06342..366c503f965 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -251,10 +251,10 @@ ctl_table epoll_table[] = {
.data = &max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
- { .ctl_name = 0 }
+ { }
};
#endif /* CONFIG_SYSCTL */
diff --git a/fs/exec.c b/fs/exec.c
index ba112bd4a33..c0c636e34f6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -46,7 +46,6 @@
#include <linux/proc_fs.h>
#include <linux/mount.h>
#include <linux/security.h>
-#include <linux/ima.h>
#include <linux/syscalls.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
@@ -1209,9 +1208,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
retval = security_bprm_check(bprm);
if (retval)
return retval;
- retval = ima_bprm_check(bprm);
- if (retval)
- return retval;
/* kernel module loader fixup */
/* so we don't try to load run modprobe in kernel space. */
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 354ed3b47b3..2db95777890 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2033,7 +2033,7 @@ static Indirect *ext3_find_shared(struct inode *inode, int depth,
int k, err;
*top = 0;
- /* Make k index the deepest non-null offest + 1 */
+ /* Make k index the deepest non-null offset + 1 */
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
partial = ext3_get_branch(inode, k, offsets, chain, &err);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2c8caa51add..4e8e2f15b8b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2933,7 +2933,7 @@ retry:
ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
&mpd);
/*
- * If we have a contigous extent of pages and we
+ * If we have a contiguous extent of pages and we
* haven't done the I/O yet, map the blocks and submit
* them for I/O.
*/
@@ -4064,7 +4064,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth,
int k, err;
*top = 0;
- /* Make k index the deepest non-null offest + 1 */
+ /* Make k index the deepest non-null offset + 1 */
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
partial = ext4_get_branch(inode, k, offsets, chain, &err);
@@ -5376,7 +5376,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
* worse case, the indexs blocks spread over different block groups
*
* If datablocks are discontiguous, they are possible to spread over
- * different block groups too. If they are contiugous, with flexbg,
+ * different block groups too. If they are contiuguous, with flexbg,
* they could still across block group boundary.
*
* Also account for superblock, inode, quota and xattr blocks
@@ -5452,7 +5452,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
* Calculate the journal credits for a chunk of data modification.
*
* This is called from DIO, fallocate or whoever calling
- * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks.
+ * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
*
* journal buffers for data blocks are not included here, as DIO
* and fallocate do no need to journal data buffers.
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index bba12824def..74e495dabe0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -142,7 +142,7 @@
* 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
* value of s_mb_order2_reqs can be tuned via
* /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
- * stripe size (sbi->s_stripe), we try to search for contigous block in
+ * stripe size (sbi->s_stripe), we try to search for contiguous block in
* stripe size. This should result in better allocation on RAID setups. If
* not, we search in the specific group using bitmap for best extents. The
* tunable min_to_scan and max_to_scan control the behaviour here.
diff --git a/fs/file_table.c b/fs/file_table.c
index 8eb44042e00..4bef4c01ec6 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/security.h>
-#include <linux/ima.h>
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
@@ -280,7 +279,6 @@ void __fput(struct file *file)
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
- ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 9d5360c4c2a..49bc1b8e8f1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -614,7 +614,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
struct writeback_control *wbc)
{
struct super_block *sb = wbc->sb, *pin_sb = NULL;
- const int is_blkdev_sb = sb_is_blkdev_sb(sb);
const unsigned long start = jiffies; /* livelock avoidance */
spin_lock(&inode_lock);
@@ -635,36 +634,11 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
continue;
}
- if (!bdi_cap_writeback_dirty(wb->bdi)) {
- redirty_tail(inode);
- if (is_blkdev_sb) {
- /*
- * Dirty memory-backed blockdev: the ramdisk
- * driver does this. Skip just this inode
- */
- continue;
- }
- /*
- * Dirty memory-backed inode against a filesystem other
- * than the kernel-internal bdev filesystem. Skip the
- * entire superblock.
- */
- break;
- }
-
if (inode->i_state & (I_NEW | I_WILL_FREE)) {
requeue_io(inode);
continue;
}
- if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
- wbc->encountered_congestion = 1;
- if (!is_blkdev_sb)
- break; /* Skip a congested fs */
- requeue_io(inode);
- continue; /* Skip a congested blockdev */
- }
-
/*
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
@@ -756,6 +730,7 @@ static long wb_writeback(struct bdi_writeback *wb,
.sync_mode = args->sync_mode,
.older_than_this = NULL,
.for_kupdate = args->for_kupdate,
+ .for_background = args->for_background,
.range_cyclic = args->range_cyclic,
};
unsigned long oldest_jif;
@@ -787,7 +762,6 @@ static long wb_writeback(struct bdi_writeback *wb,
break;
wbc.more_io = 0;
- wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
writeback_inodes_wb(wb, &wbc);
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index 9bbb8ce7bea..864dac20a24 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.txt for more information.
+
+config FSCACHE_OBJECT_LIST
+ bool "Maintain global object list for debugging purposes"
+ depends on FSCACHE && PROC_FS
+ help
+ Maintain a global list of active fscache objects that can be
+ retrieved through /proc/fs/fscache/objects for debugging purposes
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
index 91571b95aac..6d561531cb3 100644
--- a/fs/fscache/Makefile
+++ b/fs/fscache/Makefile
@@ -15,5 +15,6 @@ fscache-y := \
fscache-$(CONFIG_PROC_FS) += proc.o
fscache-$(CONFIG_FSCACHE_STATS) += stats.o
fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
+fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index e21985bbb1f..6a3c48abd67 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
spin_lock(&cache->object_list_lock);
list_add_tail(&ifsdef->cache_link, &cache->object_list);
spin_unlock(&cache->object_list_lock);
+ fscache_objlist_add(ifsdef);
/* add the cache's netfs definition index object to the top level index
* cookie as a known backing object */
@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
/* make sure all pages pinned by operations on behalf of the netfs are
* written to disk */
+ fscache_stat(&fscache_n_cop_sync_cache);
cache->ops->sync_cache(cache);
+ fscache_stat_d(&fscache_n_cop_sync_cache);
/* dissociate all the netfs pages backed by this cache from the block
* mappings in the cache */
+ fscache_stat(&fscache_n_cop_dissociate_pages);
cache->ops->dissociate_pages(cache);
+ fscache_stat_d(&fscache_n_cop_dissociate_pages);
/* we now have to destroy all the active objects pertaining to this
* cache - which we do by passing them off to thread pool to be
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 72fd18f6c71..990535071a8 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
memset(cookie, 0, sizeof(*cookie));
spin_lock_init(&cookie->lock);
+ spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects);
}
@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
cookie->netfs_data = netfs_data;
cookie->flags = 0;
- INIT_RADIX_TREE(&cookie->stores, GFP_NOFS);
+ /* radix tree insertion won't use the preallocation pool unless it's
+ * told it may not wait */
+ INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
switch (cookie->def->type) {
case FSCACHE_COOKIE_TYPE_INDEX:
@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
/* ask the cache to allocate an object (we may end up with duplicate
* objects at this stage, but we sort that out later) */
+ fscache_stat(&fscache_n_cop_alloc_object);
object = cache->ops->alloc_object(cache, cookie);
+ fscache_stat_d(&fscache_n_cop_alloc_object);
if (IS_ERR(object)) {
fscache_stat(&fscache_n_object_no_alloc);
ret = PTR_ERR(object);
@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
/* only attach if we managed to allocate all we needed, otherwise
* discard the object we just allocated and instead use the one
* attached to the cookie */
- if (fscache_attach_object(cookie, object) < 0)
+ if (fscache_attach_object(cookie, object) < 0) {
+ fscache_stat(&fscache_n_cop_put_object);
cache->ops->put_object(object);
+ fscache_stat_d(&fscache_n_cop_put_object);
+ }
_leave(" = 0");
return 0;
@@ -287,7 +295,9 @@ object_already_extant:
return 0;
error_put:
+ fscache_stat(&fscache_n_cop_put_object);
cache->ops->put_object(object);
+ fscache_stat_d(&fscache_n_cop_put_object);
error:
_leave(" = %d", ret);
return ret;
@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
object->cookie = cookie;
atomic_inc(&cookie->usage);
hlist_add_head(&object->cookie_link, &cookie->backing_objects);
+
+ fscache_objlist_add(object);
ret = 0;
cant_attach_object:
@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
unsigned long event;
fscache_stat(&fscache_n_relinquishes);
+ if (retire)
+ fscache_stat(&fscache_n_relinquishes_retire);
if (!cookie) {
fscache_stat(&fscache_n_relinquishes_null);
@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
- /* detach pointers back to the netfs */
spin_lock(&cookie->lock);
- cookie->netfs_data = NULL;
- cookie->def = NULL;
-
/* break links with all the active objects */
while (!hlist_empty(&cookie->backing_objects)) {
object = hlist_entry(cookie->backing_objects.first,
@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
BUG();
}
+ /* detach pointers back to the netfs */
+ cookie->netfs_data = NULL;
+ cookie->def = NULL;
+
spin_unlock(&cookie->lock);
if (cookie->parent) {
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 1c341304621..edd7434ab6e 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -17,6 +17,7 @@
* - cache->object_list_lock
* - object->lock
* - object->parent->lock
+ * - cookie->stores_lock
* - fscache_thread_lock
*
*/
@@ -88,17 +89,31 @@ extern int fscache_wait_bit_interruptible(void *);
/*
* object.c
*/
+extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
+
extern void fscache_withdrawing_object(struct fscache_cache *,
struct fscache_object *);
extern void fscache_enqueue_object(struct fscache_object *);
/*
+ * object-list.c
+ */
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+extern const struct file_operations fscache_objlist_fops;
+
+extern void fscache_objlist_add(struct fscache_object *);
+#else
+#define fscache_objlist_add(object) do {} while(0)
+#endif
+
+/*
* operation.c
*/
extern int fscache_submit_exclusive_op(struct fscache_object *,
struct fscache_operation *);
extern int fscache_submit_op(struct fscache_object *,
struct fscache_operation *);
+extern int fscache_cancel_op(struct fscache_operation *);
extern void fscache_abort_object(struct fscache_object *);
extern void fscache_start_operations(struct fscache_object *);
extern void fscache_operation_gc(struct work_struct *);
@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
extern atomic_t fscache_n_op_deferred_release;
extern atomic_t fscache_n_op_release;
extern atomic_t fscache_n_op_gc;
+extern atomic_t fscache_n_op_cancelled;
+extern atomic_t fscache_n_op_rejected;
extern atomic_t fscache_n_attr_changed;
extern atomic_t fscache_n_attr_changed_ok;
@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
extern atomic_t fscache_n_allocs_ok;
extern atomic_t fscache_n_allocs_wait;
extern atomic_t fscache_n_allocs_nobufs;
+extern atomic_t fscache_n_allocs_intr;
+extern atomic_t fscache_n_allocs_object_dead;
extern atomic_t fscache_n_alloc_ops;
extern atomic_t fscache_n_alloc_op_waits;
@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
extern atomic_t fscache_n_retrievals_nobufs;
extern atomic_t fscache_n_retrievals_intr;
extern atomic_t fscache_n_retrievals_nomem;
+extern atomic_t fscache_n_retrievals_object_dead;
extern atomic_t fscache_n_retrieval_ops;
extern atomic_t fscache_n_retrieval_op_waits;
@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
extern atomic_t fscache_n_stores_oom;
extern atomic_t fscache_n_store_ops;
extern atomic_t fscache_n_store_calls;
+extern atomic_t fscache_n_store_pages;
+extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_pages_over_limit;
+
+extern atomic_t fscache_n_store_vmscan_not_storing;
+extern atomic_t fscache_n_store_vmscan_gone;
+extern atomic_t fscache_n_store_vmscan_busy;
+extern atomic_t fscache_n_store_vmscan_cancelled;
extern atomic_t fscache_n_marks;
extern atomic_t fscache_n_uncaches;
@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
extern atomic_t fscache_n_relinquishes;
extern atomic_t fscache_n_relinquishes_null;
extern atomic_t fscache_n_relinquishes_waitcrt;
+extern atomic_t fscache_n_relinquishes_retire;
extern atomic_t fscache_n_cookie_index;
extern atomic_t fscache_n_cookie_data;
@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
extern atomic_t fscache_n_object_lookups;
extern atomic_t fscache_n_object_lookups_negative;
extern atomic_t fscache_n_object_lookups_positive;
+extern atomic_t fscache_n_object_lookups_timed_out;
extern atomic_t fscache_n_object_created;
extern atomic_t fscache_n_object_avail;
extern atomic_t fscache_n_object_dead;
@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
extern atomic_t fscache_n_checkaux_update;
extern atomic_t fscache_n_checkaux_obsolete;
+extern atomic_t fscache_n_cop_alloc_object;
+extern atomic_t fscache_n_cop_lookup_object;
+extern atomic_t fscache_n_cop_lookup_complete;
+extern atomic_t fscache_n_cop_grab_object;
+extern atomic_t fscache_n_cop_update_object;
+extern atomic_t fscache_n_cop_drop_object;
+extern atomic_t fscache_n_cop_put_object;
+extern atomic_t fscache_n_cop_sync_cache;
+extern atomic_t fscache_n_cop_attr_changed;
+extern atomic_t fscache_n_cop_read_or_alloc_page;
+extern atomic_t fscache_n_cop_read_or_alloc_pages;
+extern atomic_t fscache_n_cop_allocate_page;
+extern atomic_t fscache_n_cop_allocate_pages;
+extern atomic_t fscache_n_cop_write_page;
+extern atomic_t fscache_n_cop_uncache_page;
+extern atomic_t fscache_n_cop_dissociate_pages;
+
static inline void fscache_stat(atomic_t *stat)
{
atomic_inc(stat);
}
+static inline void fscache_stat_d(atomic_t *stat)
+{
+ atomic_dec(stat);
+}
+
+#define __fscache_stat(stat) (stat)
+
extern const struct file_operations fscache_stats_fops;
#else
+#define __fscache_stat(stat) (NULL)
#define fscache_stat(stat) do {} while (0)
+#define fscache_stat_d(stat) do {} while (0)
#endif
/*
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4de41b59749..add6bdb53f0 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
{
int ret;
- ret = slow_work_register_user();
+ ret = slow_work_register_user(THIS_MODULE);
if (ret < 0)
goto error_slow_work;
@@ -80,7 +80,7 @@ error_kobj:
error_cookie_jar:
fscache_proc_cleanup();
error_proc:
- slow_work_unregister_user();
+ slow_work_unregister_user(THIS_MODULE);
error_slow_work:
return ret;
}
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
kobject_put(fscache_root);
kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup();
- slow_work_unregister_user();
+ slow_work_unregister_user(THIS_MODULE);
printk(KERN_NOTICE "FS-Cache: Unloaded\n");
}
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
new file mode 100644
index 00000000000..e590242fa41
--- /dev/null
+++ b/fs/fscache/object-list.c
@@ -0,0 +1,432 @@
+/* Global fscache object list maintainer and viewer
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/key.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static struct rb_root fscache_object_list;
+static DEFINE_RWLOCK(fscache_object_list_lock);
+
+struct fscache_objlist_data {
+ unsigned long config; /* display configuration */
+#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
+#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
+#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
+#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
+#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
+#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
+#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
+#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
+#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
+#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
+#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
+#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
+#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */
+#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */
+
+ u8 buf[512]; /* key and aux data buffer */
+};
+
+/*
+ * Add an object to the object list
+ * - we use the address of the fscache_object structure as the key into the
+ * tree
+ */
+void fscache_objlist_add(struct fscache_object *obj)
+{
+ struct fscache_object *xobj;
+ struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
+
+ write_lock(&fscache_object_list_lock);
+
+ while (*p) {
+ parent = *p;
+ xobj = rb_entry(parent, struct fscache_object, objlist_link);
+
+ if (obj < xobj)
+ p = &(*p)->rb_left;
+ else if (obj > xobj)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&obj->objlist_link, parent, p);
+ rb_insert_color(&obj->objlist_link, &fscache_object_list);
+
+ write_unlock(&fscache_object_list_lock);
+}
+
+/**
+ * fscache_object_destroy - Note that a cache object is about to be destroyed
+ * @object: The object to be destroyed
+ *
+ * Note the imminent destruction and deallocation of a cache object record.
+ */
+void fscache_object_destroy(struct fscache_object *obj)
+{
+ write_lock(&fscache_object_list_lock);
+
+ BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
+ rb_erase(&obj->objlist_link, &fscache_object_list);
+
+ write_unlock(&fscache_object_list_lock);
+}
+EXPORT_SYMBOL(fscache_object_destroy);
+
+/*
+ * find the object in the tree on or after the specified index
+ */
+static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
+{
+ struct fscache_object *pobj, *obj, *minobj = NULL;
+ struct rb_node *p;
+ unsigned long pos;
+
+ if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
+ return NULL;
+ pos = *_pos;
+
+ /* banners (can't represent line 0 by pos 0 as that would involve
+ * returning a NULL pointer) */
+ if (pos == 0)
+ return (struct fscache_object *) ++(*_pos);
+ if (pos < 3)
+ return (struct fscache_object *)pos;
+
+ pobj = (struct fscache_object *)pos;
+ p = fscache_object_list.rb_node;
+ while (p) {
+ obj = rb_entry(p, struct fscache_object, objlist_link);
+ if (pobj < obj) {
+ if (!minobj || minobj > obj)
+ minobj = obj;
+ p = p->rb_left;
+ } else if (pobj > obj) {
+ p = p->rb_right;
+ } else {
+ minobj = obj;
+ break;
+ }
+ obj = NULL;
+ }
+
+ if (!minobj)
+ *_pos = (unsigned long) ERR_PTR(-ENOENT);
+ else if (minobj != obj)
+ *_pos = (unsigned long) minobj;
+ return minobj;
+}
+
+/*
+ * set up the iterator to start reading from the first line
+ */
+static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
+ __acquires(&fscache_object_list_lock)
+{
+ read_lock(&fscache_object_list_lock);
+ return fscache_objlist_lookup(_pos);
+}
+
+/*
+ * move to the next line
+ */
+static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ (*_pos)++;
+ return fscache_objlist_lookup(_pos);
+}
+
+/*
+ * clean up after reading
+ */
+static void fscache_objlist_stop(struct seq_file *m, void *v)
+ __releases(&fscache_object_list_lock)
+{
+ read_unlock(&fscache_object_list_lock);
+}
+
+/*
+ * display an object
+ */
+static int fscache_objlist_show(struct seq_file *m, void *v)
+{
+ struct fscache_objlist_data *data = m->private;
+ struct fscache_object *obj = v;
+ unsigned long config = data->config;
+ uint16_t keylen, auxlen;
+ char _type[3], *type;
+ bool no_cookie;
+ u8 *buf = data->buf, *p;
+
+ if ((unsigned long) v == 1) {
+ seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
+ " EM EV F S"
+ " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
+ if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
+ FSCACHE_OBJLIST_CONFIG_AUX))
+ seq_puts(m, " ");
+ if (config & FSCACHE_OBJLIST_CONFIG_KEY)
+ seq_puts(m, "OBJECT_KEY");
+ if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
+ FSCACHE_OBJLIST_CONFIG_AUX)) ==
+ (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
+ seq_puts(m, ", ");
+ if (config & FSCACHE_OBJLIST_CONFIG_AUX)
+ seq_puts(m, "AUX_DATA");
+ seq_puts(m, "\n");
+ return 0;
+ }
+
+ if ((unsigned long) v == 2) {
+ seq_puts(m, "======== ======== ==== ===== === === === == ====="
+ " == == = ="
+ " | ================ == == ================");
+ if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
+ FSCACHE_OBJLIST_CONFIG_AUX))
+ seq_puts(m, " ================");
+ seq_puts(m, "\n");
+ return 0;
+ }
+
+ /* filter out any unwanted objects */
+#define FILTER(criterion, _yes, _no) \
+ do { \
+ unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
+ unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
+ if (criterion) { \
+ if (!(config & yes)) \
+ return 0; \
+ } else { \
+ if (!(config & no)) \
+ return 0; \
+ } \
+ } while(0)
+
+ if (~config) {
+ FILTER(obj->cookie,
+ COOKIE, NOCOOKIE);
+ FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
+ obj->n_ops != 0 ||
+ obj->n_obj_ops != 0 ||
+ obj->flags ||
+ !list_empty(&obj->dependents),
+ BUSY, IDLE);
+ FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
+ PENDWR, NOPENDWR);
+ FILTER(atomic_read(&obj->n_reads),
+ READS, NOREADS);
+ FILTER(obj->events & obj->event_mask,
+ EVENTS, NOEVENTS);
+ FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
+ WORK, NOWORK);
+ }
+
+ seq_printf(m,
+ "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
+ obj->debug_id,
+ obj->parent ? obj->parent->debug_id : -1,
+ fscache_object_states_short[obj->state],
+ obj->n_children,
+ obj->n_ops,
+ obj->n_obj_ops,
+ obj->n_in_progress,
+ obj->n_exclusive,
+ atomic_read(&obj->n_reads),
+ obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
+ obj->events,
+ obj->flags,
+ obj->work.flags);
+
+ no_cookie = true;
+ keylen = auxlen = 0;
+ if (obj->cookie) {
+ spin_lock(&obj->lock);
+ if (obj->cookie) {
+ switch (obj->cookie->def->type) {
+ case 0:
+ type = "IX";
+ break;
+ case 1:
+ type = "DT";
+ break;
+ default:
+ sprintf(_type, "%02u",
+ obj->cookie->def->type);
+ type = _type;
+ break;
+ }
+
+ seq_printf(m, "%-16s %s %2lx %16p",
+ obj->cookie->def->name,
+ type,
+ obj->cookie->flags,
+ obj->cookie->netfs_data);
+
+ if (obj->cookie->def->get_key &&
+ config & FSCACHE_OBJLIST_CONFIG_KEY)
+ keylen = obj->cookie->def->get_key(
+ obj->cookie->netfs_data,
+ buf, 400);
+
+ if (obj->cookie->def->get_aux &&
+ config & FSCACHE_OBJLIST_CONFIG_AUX)
+ auxlen = obj->cookie->def->get_aux(
+ obj->cookie->netfs_data,
+ buf + keylen, 512 - keylen);
+
+ no_cookie = false;
+ }
+ spin_unlock(&obj->lock);
+
+ if (!no_cookie && (keylen > 0 || auxlen > 0)) {
+ seq_printf(m, " ");
+ for (p = buf; keylen > 0; keylen--)
+ seq_printf(m, "%02x", *p++);
+ if (auxlen > 0) {
+ if (config & FSCACHE_OBJLIST_CONFIG_KEY)
+ seq_printf(m, ", ");
+ for (; auxlen > 0; auxlen--)
+ seq_printf(m, "%02x", *p++);
+ }
+ }
+ }
+
+ if (no_cookie)
+ seq_printf(m, "<no_cookie>\n");
+ else
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static const struct seq_operations fscache_objlist_ops = {
+ .start = fscache_objlist_start,
+ .stop = fscache_objlist_stop,
+ .next = fscache_objlist_next,
+ .show = fscache_objlist_show,
+};
+
+/*
+ * get the configuration for filtering the list
+ */
+static void fscache_objlist_config(struct fscache_objlist_data *data)
+{
+#ifdef CONFIG_KEYS
+ struct user_key_payload *confkey;
+ unsigned long config;
+ struct key *key;
+ const char *buf;
+ int len;
+
+ key = request_key(&key_type_user, "fscache:objlist", NULL);
+ if (IS_ERR(key))
+ goto no_config;
+
+ config = 0;
+ rcu_read_lock();
+
+ confkey = key->payload.data;
+ buf = confkey->data;
+
+ for (len = confkey->datalen - 1; len >= 0; len--) {
+ switch (buf[len]) {
+ case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
+ case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
+ case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
+ case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
+ case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
+ case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
+ case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
+ case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
+ case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
+ case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
+ case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
+ case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
+ }
+ }
+
+ rcu_read_unlock();
+ key_put(key);
+
+ if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
+ config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
+ if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
+ config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
+ if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
+ config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
+ if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
+ config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
+ if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
+ config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
+ if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
+ config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
+
+ data->config = config;
+ return;
+
+no_config:
+#endif
+ data->config = ULONG_MAX;
+}
+
+/*
+ * open "/proc/fs/fscache/objects" to provide a list of active objects
+ * - can be configured by a user-defined key added to the caller's keyrings
+ */
+static int fscache_objlist_open(struct inode *inode, struct file *file)
+{
+ struct fscache_objlist_data *data;
+ struct seq_file *m;
+ int ret;
+
+ ret = seq_open(file, &fscache_objlist_ops);
+ if (ret < 0)
+ return ret;
+
+ m = file->private_data;
+
+ /* buffer for key extraction */
+ data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
+ if (!data) {
+ seq_release(inode, file);
+ return -ENOMEM;
+ }
+
+ /* get the configuration key */
+ fscache_objlist_config(data);
+
+ m->private = data;
+ return 0;
+}
+
+/*
+ * clean up on close
+ */
+static int fscache_objlist_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+
+ kfree(m->private);
+ m->private = NULL;
+ return seq_release(inode, file);
+}
+
+const struct file_operations fscache_objlist_fops = {
+ .owner = THIS_MODULE,
+ .open = fscache_objlist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = fscache_objlist_release,
+};
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 392a41b1b79..e513ac599c8 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,9 +14,10 @@
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
+#include <linux/seq_file.h>
#include "internal.h"
-const char *fscache_object_states[] = {
+const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
[FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
[FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
[FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
};
EXPORT_SYMBOL(fscache_object_states);
+const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
+ [FSCACHE_OBJECT_INIT] = "INIT",
+ [FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
+ [FSCACHE_OBJECT_CREATING] = "CRTN",
+ [FSCACHE_OBJECT_AVAILABLE] = "AVBL",
+ [FSCACHE_OBJECT_ACTIVE] = "ACTV",
+ [FSCACHE_OBJECT_UPDATING] = "UPDT",
+ [FSCACHE_OBJECT_DYING] = "DYNG",
+ [FSCACHE_OBJECT_LC_DYING] = "LCDY",
+ [FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
+ [FSCACHE_OBJECT_RELEASING] = "RELS",
+ [FSCACHE_OBJECT_RECYCLING] = "RCYC",
+ [FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
+ [FSCACHE_OBJECT_DEAD] = "DEAD",
+};
+
static void fscache_object_slow_work_put_ref(struct slow_work *);
static int fscache_object_slow_work_get_ref(struct slow_work *);
static void fscache_object_slow_work_execute(struct slow_work *);
+#ifdef CONFIG_SLOW_WORK_PROC
+static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
+#endif
static void fscache_initialise_object(struct fscache_object *);
static void fscache_lookup_object(struct fscache_object *);
static void fscache_object_available(struct fscache_object *);
@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
static void fscache_dequeue_object(struct fscache_object *);
const struct slow_work_ops fscache_object_slow_work_ops = {
+ .owner = THIS_MODULE,
.get_ref = fscache_object_slow_work_get_ref,
.put_ref = fscache_object_slow_work_put_ref,
.execute = fscache_object_slow_work_execute,
+#ifdef CONFIG_SLOW_WORK_PROC
+ .desc = fscache_object_slow_work_desc,
+#endif
};
EXPORT_SYMBOL(fscache_object_slow_work_ops);
@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
static void fscache_object_state_machine(struct fscache_object *object)
{
enum fscache_object_state new_state;
+ struct fscache_cookie *cookie;
ASSERT(object != NULL);
@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
case FSCACHE_OBJECT_UPDATING:
clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
fscache_stat(&fscache_n_updates_run);
+ fscache_stat(&fscache_n_cop_update_object);
object->cache->ops->update_object(object);
+ fscache_stat_d(&fscache_n_cop_update_object);
goto active_transit;
/* handle an object dying during lookup or creation */
case FSCACHE_OBJECT_LC_DYING:
object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
+ fscache_stat(&fscache_n_cop_lookup_complete);
object->cache->ops->lookup_complete(object);
+ fscache_stat_d(&fscache_n_cop_lookup_complete);
spin_lock(&object->lock);
object->state = FSCACHE_OBJECT_DYING;
- if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
- &object->cookie->flags))
- wake_up_bit(&object->cookie->flags,
- FSCACHE_COOKIE_CREATING);
+ cookie = object->cookie;
+ if (cookie) {
+ if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
+ &cookie->flags))
+ wake_up_bit(&cookie->flags,
+ FSCACHE_COOKIE_LOOKING_UP);
+ if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
+ &cookie->flags))
+ wake_up_bit(&cookie->flags,
+ FSCACHE_COOKIE_CREATING);
+ }
spin_unlock(&object->lock);
fscache_done_parent_op(object);
@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
}
spin_unlock(&object->lock);
fscache_enqueue_dependents(object);
+ fscache_start_operations(object);
goto terminal_transit;
/* handle an abort during initialisation */
@@ -316,14 +353,29 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
_enter("{OBJ%x}", object->debug_id);
- clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
-
start = jiffies;
fscache_object_state_machine(object);
fscache_hist(fscache_objs_histogram, start);
if (object->events & object->event_mask)
fscache_enqueue_object(object);
+ clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+}
+
+/*
+ * describe an object for slow-work debugging
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+static void fscache_object_slow_work_desc(struct slow_work *work,
+ struct seq_file *m)
+{
+ struct fscache_object *object =
+ container_of(work, struct fscache_object, work);
+
+ seq_printf(m, "FSC: OBJ%x: %s",
+ object->debug_id,
+ fscache_object_states_short[object->state]);
}
+#endif
/*
* initialise an object
@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
* binding on to us, so we need to make sure we don't
* add ourself to the list multiple times */
if (list_empty(&object->dep_link)) {
+ fscache_stat(&fscache_n_cop_grab_object);
object->cache->ops->grab_object(object);
+ fscache_stat_d(&fscache_n_cop_grab_object);
list_add(&object->dep_link,
&parent->dependents);
@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
struct fscache_object *parent;
+ int ret;
_enter("");
@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
object->cache->tag->name);
fscache_stat(&fscache_n_object_lookups);
- object->cache->ops->lookup_object(object);
+ fscache_stat(&fscache_n_cop_lookup_object);
+ ret = object->cache->ops->lookup_object(object);
+ fscache_stat_d(&fscache_n_cop_lookup_object);
if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
+ if (ret == -ETIMEDOUT) {
+ /* probably stuck behind another object, so move this one to
+ * the back of the queue */
+ fscache_stat(&fscache_n_object_lookups_timed_out);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ }
+
_leave("");
}
@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
spin_lock(&object->lock);
- if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
+ if (object->cookie &&
+ test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
fscache_done_parent_op(object);
@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
}
spin_unlock(&object->lock);
+ fscache_stat(&fscache_n_cop_lookup_complete);
object->cache->ops->lookup_complete(object);
+ fscache_stat_d(&fscache_n_cop_lookup_complete);
fscache_enqueue_dependents(object);
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
_enter("{OBJ%x,%d}", object->debug_id, object->n_children);
+ ASSERTCMP(object->cookie, ==, NULL);
+ ASSERT(hlist_unhashed(&object->cookie_link));
+
spin_lock(&cache->object_list_lock);
list_del_init(&object->cache_link);
spin_unlock(&cache->object_list_lock);
+ fscache_stat(&fscache_n_cop_drop_object);
cache->ops->drop_object(object);
+ fscache_stat_d(&fscache_n_cop_drop_object);
if (parent) {
_debug("release parent OBJ%x {%d}",
@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
}
/* this just shifts the object release to the slow work processor */
+ fscache_stat(&fscache_n_cop_put_object);
object->cache->ops->put_object(object);
+ fscache_stat_d(&fscache_n_cop_put_object);
_leave("");
}
@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
+ int ret;
- return object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
+ fscache_stat(&fscache_n_cop_grab_object);
+ ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
+ fscache_stat_d(&fscache_n_cop_grab_object);
+ return ret;
}
/*
@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
struct fscache_object *object =
container_of(work, struct fscache_object, work);
- return object->cache->ops->put_object(object);
+ fscache_stat(&fscache_n_cop_put_object);
+ object->cache->ops->put_object(object);
+ fscache_stat_d(&fscache_n_cop_put_object);
}
/*
@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
/* sort onto appropriate lists */
fscache_enqueue_object(dep);
+ fscache_stat(&fscache_n_cop_put_object);
dep->cache->ops->put_object(dep);
+ fscache_stat_d(&fscache_n_cop_put_object);
if (!list_empty(&object->dependents))
cond_resched_lock(&object->lock);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e7f8d53b8b6..313e79a1426 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -13,6 +13,7 @@
#define FSCACHE_DEBUG_LEVEL OPERATION
#include <linux/module.h>
+#include <linux/seq_file.h>
#include "internal.h"
atomic_t fscache_op_debug_id;
@@ -31,32 +32,33 @@ void fscache_enqueue_operation(struct fscache_operation *op)
_enter("{OBJ%x OP%x,%u}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+ fscache_set_op_state(op, "EnQ");
+
+ ASSERT(list_empty(&op->pend_link));
ASSERT(op->processor != NULL);
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
ASSERTCMP(atomic_read(&op->usage), >, 0);
- if (list_empty(&op->pend_link)) {
- switch (op->flags & FSCACHE_OP_TYPE) {
- case FSCACHE_OP_FAST:
- _debug("queue fast");
- atomic_inc(&op->usage);
- if (!schedule_work(&op->fast_work))
- fscache_put_operation(op);
- break;
- case FSCACHE_OP_SLOW:
- _debug("queue slow");
- slow_work_enqueue(&op->slow_work);
- break;
- case FSCACHE_OP_MYTHREAD:
- _debug("queue for caller's attention");
- break;
- default:
- printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
- op->flags);
- BUG();
- break;
- }
- fscache_stat(&fscache_n_op_enqueue);
+ fscache_stat(&fscache_n_op_enqueue);
+ switch (op->flags & FSCACHE_OP_TYPE) {
+ case FSCACHE_OP_FAST:
+ _debug("queue fast");
+ atomic_inc(&op->usage);
+ if (!schedule_work(&op->fast_work))
+ fscache_put_operation(op);
+ break;
+ case FSCACHE_OP_SLOW:
+ _debug("queue slow");
+ slow_work_enqueue(&op->slow_work);
+ break;
+ case FSCACHE_OP_MYTHREAD:
+ _debug("queue for caller's attention");
+ break;
+ default:
+ printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
+ op->flags);
+ BUG();
+ break;
}
}
EXPORT_SYMBOL(fscache_enqueue_operation);
@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
static void fscache_run_op(struct fscache_object *object,
struct fscache_operation *op)
{
+ fscache_set_op_state(op, "Run");
+
object->n_in_progress++;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
+ fscache_set_op_state(op, "SubmitX");
+
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
+ ASSERT(list_empty(&op->pend_link));
ret = -ENOBUFS;
if (fscache_object_is_active(object)) {
@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
ASSERTCMP(atomic_read(&op->usage), >, 0);
+ fscache_set_op_state(op, "Submit");
+
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
+ ASSERT(list_empty(&op->pend_link));
ostate = object->state;
smp_rmb();
@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
ret = 0;
+ } else if (object->state == FSCACHE_OBJECT_DYING ||
+ object->state == FSCACHE_OBJECT_LC_DYING ||
+ object->state == FSCACHE_OBJECT_WITHDRAWING) {
+ fscache_stat(&fscache_n_op_rejected);
+ ret = -ENOBUFS;
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
fscache_report_unexpected_submission(object, op, ostate);
ASSERT(!fscache_object_is_active(object));
@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
stop = true;
}
list_del_init(&op->pend_link);
- object->n_in_progress++;
-
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- if (op->processor)
- fscache_enqueue_operation(op);
+ fscache_run_op(object, op);
/* the pending queue was holding a ref on the object */
fscache_put_operation(op);
@@ -282,6 +292,36 @@ void fscache_start_operations(struct fscache_object *object)
}
/*
+ * cancel an operation that's pending on an object
+ */
+int fscache_cancel_op(struct fscache_operation *op)
+{
+ struct fscache_object *object = op->object;
+ int ret;
+
+ _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
+
+ spin_lock(&object->lock);
+
+ ret = -EBUSY;
+ if (!list_empty(&op->pend_link)) {
+ fscache_stat(&fscache_n_op_cancelled);
+ list_del_init(&op->pend_link);
+ object->n_ops--;
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+ object->n_exclusive--;
+ if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ fscache_put_operation(op);
+ ret = 0;
+ }
+
+ spin_unlock(&object->lock);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
* release an operation
* - queues pending ops if this is the last in-progress op
*/
@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
if (!atomic_dec_and_test(&op->usage))
return;
+ fscache_set_op_state(op, "Put");
+
_debug("PUT OP");
if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
BUG();
@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
object = op->object;
+ if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
+ atomic_dec(&object->n_reads);
+
/* now... we may get called with the object spinlock held, so we
* complete the cleanup here only if we can immediately acquire the
* lock, and defer it otherwise */
@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
_leave("");
}
+/*
+ * describe an operation for slow-work debugging
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
+{
+ struct fscache_operation *op =
+ container_of(work, struct fscache_operation, slow_work);
+
+ seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
+ op->object->debug_id, op->debug_id,
+ op->name, op->state, op->flags);
+}
+#endif
+
const struct slow_work_ops fscache_op_slow_work_ops = {
+ .owner = THIS_MODULE,
.get_ref = fscache_op_get_ref,
.put_ref = fscache_op_put_ref,
.execute = fscache_op_execute,
+#ifdef CONFIG_SLOW_WORK_PROC
+ .desc = fscache_op_desc,
+#endif
};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2568e0eb644..c598ea4c4e7 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -43,18 +43,102 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
EXPORT_SYMBOL(__fscache_wait_on_page_write);
/*
- * note that a page has finished being written to the cache
+ * decide whether a page can be released, possibly by cancelling a store to it
+ * - we're allowed to sleep if __GFP_WAIT is flagged
*/
-static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page)
+bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
{
struct page *xpage;
+ void *val;
+
+ _enter("%p,%p,%x", cookie, page, gfp);
+
+ rcu_read_lock();
+ val = radix_tree_lookup(&cookie->stores, page->index);
+ if (!val) {
+ rcu_read_unlock();
+ fscache_stat(&fscache_n_store_vmscan_not_storing);
+ __fscache_uncache_page(cookie, page);
+ return true;
+ }
+
+ /* see if the page is actually undergoing storage - if so we can't get
+ * rid of it till the cache has finished with it */
+ if (radix_tree_tag_get(&cookie->stores, page->index,
+ FSCACHE_COOKIE_STORING_TAG)) {
+ rcu_read_unlock();
+ goto page_busy;
+ }
+
+ /* the page is pending storage, so we attempt to cancel the store and
+ * discard the store request so that the page can be reclaimed */
+ spin_lock(&cookie->stores_lock);
+ rcu_read_unlock();
+
+ if (radix_tree_tag_get(&cookie->stores, page->index,
+ FSCACHE_COOKIE_STORING_TAG)) {
+ /* the page started to undergo storage whilst we were looking,
+ * so now we can only wait or return */
+ spin_unlock(&cookie->stores_lock);
+ goto page_busy;
+ }
- spin_lock(&cookie->lock);
xpage = radix_tree_delete(&cookie->stores, page->index);
- spin_unlock(&cookie->lock);
- ASSERT(xpage != NULL);
+ spin_unlock(&cookie->stores_lock);
+
+ if (xpage) {
+ fscache_stat(&fscache_n_store_vmscan_cancelled);
+ fscache_stat(&fscache_n_store_radix_deletes);
+ ASSERTCMP(xpage, ==, page);
+ } else {
+ fscache_stat(&fscache_n_store_vmscan_gone);
+ }
wake_up_bit(&cookie->flags, 0);
+ if (xpage)
+ page_cache_release(xpage);
+ __fscache_uncache_page(cookie, page);
+ return true;
+
+page_busy:
+ /* we might want to wait here, but that could deadlock the allocator as
+ * the slow-work threads writing to the cache may all end up sleeping
+ * on memory allocation */
+ fscache_stat(&fscache_n_store_vmscan_busy);
+ return false;
+}
+EXPORT_SYMBOL(__fscache_maybe_release_page);
+
+/*
+ * note that a page has finished being written to the cache
+ */
+static void fscache_end_page_write(struct fscache_object *object,
+ struct page *page)
+{
+ struct fscache_cookie *cookie;
+ struct page *xpage = NULL;
+
+ spin_lock(&object->lock);
+ cookie = object->cookie;
+ if (cookie) {
+ /* delete the page from the tree if it is now no longer
+ * pending */
+ spin_lock(&cookie->stores_lock);
+ radix_tree_tag_clear(&cookie->stores, page->index,
+ FSCACHE_COOKIE_STORING_TAG);
+ if (!radix_tree_tag_get(&cookie->stores, page->index,
+ FSCACHE_COOKIE_PENDING_TAG)) {
+ fscache_stat(&fscache_n_store_radix_deletes);
+ xpage = radix_tree_delete(&cookie->stores, page->index);
+ }
+ spin_unlock(&cookie->stores_lock);
+ wake_up_bit(&cookie->flags, 0);
+ }
+ spin_unlock(&object->lock);
+ if (xpage)
+ page_cache_release(xpage);
}
/*
@@ -63,14 +147,21 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p
static void fscache_attr_changed_op(struct fscache_operation *op)
{
struct fscache_object *object = op->object;
+ int ret;
_enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
fscache_stat(&fscache_n_attr_changed_calls);
- if (fscache_object_is_active(object) &&
- object->cache->ops->attr_changed(object) < 0)
- fscache_abort_object(object);
+ if (fscache_object_is_active(object)) {
+ fscache_set_op_state(op, "CallFS");
+ fscache_stat(&fscache_n_cop_attr_changed);
+ ret = object->cache->ops->attr_changed(object);
+ fscache_stat_d(&fscache_n_cop_attr_changed);
+ fscache_set_op_state(op, "Done");
+ if (ret < 0)
+ fscache_abort_object(object);
+ }
_leave("");
}
@@ -99,6 +190,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
fscache_operation_init(op, NULL);
fscache_operation_init_slow(op, fscache_attr_changed_op);
op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
+ fscache_set_op_name(op, "Attr");
spin_lock(&cookie->lock);
@@ -184,6 +276,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
op->start_time = jiffies;
INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
INIT_LIST_HEAD(&op->to_do);
+ fscache_set_op_name(&op->op, "Retr");
return op;
}
@@ -221,6 +314,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
}
/*
+ * wait for an object to become active (or dead)
+ */
+static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+ struct fscache_retrieval *op,
+ atomic_t *stat_op_waits,
+ atomic_t *stat_object_dead)
+{
+ int ret;
+
+ if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
+ goto check_if_dead;
+
+ _debug(">>> WT");
+ fscache_stat(stat_op_waits);
+ if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) < 0) {
+ ret = fscache_cancel_op(&op->op);
+ if (ret == 0)
+ return -ERESTARTSYS;
+
+ /* it's been removed from the pending queue by another party,
+ * so we should get to run shortly */
+ wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ }
+ _debug("<<< GO");
+
+check_if_dead:
+ if (unlikely(fscache_object_is_dead(object))) {
+ fscache_stat(stat_object_dead);
+ return -ENOBUFS;
+ }
+ return 0;
+}
+
+/*
* read a page from the cache or allocate a block in which to store it
* - we return:
* -ENOMEM - out of memory, nothing done
@@ -257,6 +387,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
_leave(" = -ENOMEM");
return -ENOMEM;
}
+ fscache_set_op_name(&op->op, "RetrRA1");
spin_lock(&cookie->lock);
@@ -267,6 +398,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
+ atomic_inc(&object->n_reads);
+ set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+
if (fscache_submit_op(object, &op->op) < 0)
goto nobufs_unlock;
spin_unlock(&cookie->lock);
@@ -279,23 +413,27 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
/* we wait for the operation to become active, and then process it
* *here*, in this thread, and not in the thread pool */
- if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
- _debug(">>> WT");
- fscache_stat(&fscache_n_retrieval_op_waits);
- wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
- fscache_wait_bit, TASK_UNINTERRUPTIBLE);
- _debug("<<< GO");
- }
+ ret = fscache_wait_for_retrieval_activation(
+ object, op,
+ __fscache_stat(&fscache_n_retrieval_op_waits),
+ __fscache_stat(&fscache_n_retrievals_object_dead));
+ if (ret < 0)
+ goto error;
/* ask the cache to honour the operation */
if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
+ fscache_stat(&fscache_n_cop_allocate_page);
ret = object->cache->ops->allocate_page(op, page, gfp);
+ fscache_stat_d(&fscache_n_cop_allocate_page);
if (ret == 0)
ret = -ENODATA;
} else {
+ fscache_stat(&fscache_n_cop_read_or_alloc_page);
ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
+ fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
}
+error:
if (ret == -ENOMEM)
fscache_stat(&fscache_n_retrievals_nomem);
else if (ret == -ERESTARTSYS)
@@ -347,7 +485,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
void *context,
gfp_t gfp)
{
- fscache_pages_retrieval_func_t func;
struct fscache_retrieval *op;
struct fscache_object *object;
int ret;
@@ -369,6 +506,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
op = fscache_alloc_retrieval(mapping, end_io_func, context);
if (!op)
return -ENOMEM;
+ fscache_set_op_name(&op->op, "RetrRAN");
spin_lock(&cookie->lock);
@@ -377,6 +515,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
object = hlist_entry(cookie->backing_objects.first,
struct fscache_object, cookie_link);
+ atomic_inc(&object->n_reads);
+ set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+
if (fscache_submit_op(object, &op->op) < 0)
goto nobufs_unlock;
spin_unlock(&cookie->lock);
@@ -389,21 +530,27 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
/* we wait for the operation to become active, and then process it
* *here*, in this thread, and not in the thread pool */
- if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
- _debug(">>> WT");
- fscache_stat(&fscache_n_retrieval_op_waits);
- wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
- fscache_wait_bit, TASK_UNINTERRUPTIBLE);
- _debug("<<< GO");
- }
+ ret = fscache_wait_for_retrieval_activation(
+ object, op,
+ __fscache_stat(&fscache_n_retrieval_op_waits),
+ __fscache_stat(&fscache_n_retrievals_object_dead));
+ if (ret < 0)
+ goto error;
/* ask the cache to honour the operation */
- if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags))
- func = object->cache->ops->allocate_pages;
- else
- func = object->cache->ops->read_or_alloc_pages;
- ret = func(op, pages, nr_pages, gfp);
+ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
+ fscache_stat(&fscache_n_cop_allocate_pages);
+ ret = object->cache->ops->allocate_pages(
+ op, pages, nr_pages, gfp);
+ fscache_stat_d(&fscache_n_cop_allocate_pages);
+ } else {
+ fscache_stat(&fscache_n_cop_read_or_alloc_pages);
+ ret = object->cache->ops->read_or_alloc_pages(
+ op, pages, nr_pages, gfp);
+ fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
+ }
+error:
if (ret == -ENOMEM)
fscache_stat(&fscache_n_retrievals_nomem);
else if (ret == -ERESTARTSYS)
@@ -461,6 +608,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
if (!op)
return -ENOMEM;
+ fscache_set_op_name(&op->op, "RetrAL1");
spin_lock(&cookie->lock);
@@ -475,18 +623,22 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
fscache_stat(&fscache_n_alloc_ops);
- if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
- _debug(">>> WT");
- fscache_stat(&fscache_n_alloc_op_waits);
- wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
- fscache_wait_bit, TASK_UNINTERRUPTIBLE);
- _debug("<<< GO");
- }
+ ret = fscache_wait_for_retrieval_activation(
+ object, op,
+ __fscache_stat(&fscache_n_alloc_op_waits),
+ __fscache_stat(&fscache_n_allocs_object_dead));
+ if (ret < 0)
+ goto error;
/* ask the cache to honour the operation */
+ fscache_stat(&fscache_n_cop_allocate_page);
ret = object->cache->ops->allocate_page(op, page, gfp);
+ fscache_stat_d(&fscache_n_cop_allocate_page);
- if (ret < 0)
+error:
+ if (ret == -ERESTARTSYS)
+ fscache_stat(&fscache_n_allocs_intr);
+ else if (ret < 0)
fscache_stat(&fscache_n_allocs_nobufs);
else
fscache_stat(&fscache_n_allocs_ok);
@@ -521,7 +673,7 @@ static void fscache_write_op(struct fscache_operation *_op)
struct fscache_storage *op =
container_of(_op, struct fscache_storage, op);
struct fscache_object *object = op->op.object;
- struct fscache_cookie *cookie = object->cookie;
+ struct fscache_cookie *cookie;
struct page *page;
unsigned n;
void *results[1];
@@ -529,16 +681,19 @@ static void fscache_write_op(struct fscache_operation *_op)
_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
- spin_lock(&cookie->lock);
+ fscache_set_op_state(&op->op, "GetPage");
+
spin_lock(&object->lock);
+ cookie = object->cookie;
- if (!fscache_object_is_active(object)) {
+ if (!fscache_object_is_active(object) || !cookie) {
spin_unlock(&object->lock);
- spin_unlock(&cookie->lock);
_leave("");
return;
}
+ spin_lock(&cookie->stores_lock);
+
fscache_stat(&fscache_n_store_calls);
/* find a page to store */
@@ -549,23 +704,35 @@ static void fscache_write_op(struct fscache_operation *_op)
goto superseded;
page = results[0];
_debug("gang %d [%lx]", n, page->index);
- if (page->index > op->store_limit)
+ if (page->index > op->store_limit) {
+ fscache_stat(&fscache_n_store_pages_over_limit);
goto superseded;
+ }
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
+ if (page) {
+ radix_tree_tag_set(&cookie->stores, page->index,
+ FSCACHE_COOKIE_STORING_TAG);
+ radix_tree_tag_clear(&cookie->stores, page->index,
+ FSCACHE_COOKIE_PENDING_TAG);
+ }
+ spin_unlock(&cookie->stores_lock);
spin_unlock(&object->lock);
- spin_unlock(&cookie->lock);
if (page) {
+ fscache_set_op_state(&op->op, "Store");
+ fscache_stat(&fscache_n_store_pages);
+ fscache_stat(&fscache_n_cop_write_page);
ret = object->cache->ops->write_page(op, page);
- fscache_end_page_write(cookie, page);
- page_cache_release(page);
- if (ret < 0)
+ fscache_stat_d(&fscache_n_cop_write_page);
+ fscache_set_op_state(&op->op, "EndWrite");
+ fscache_end_page_write(object, page);
+ if (ret < 0) {
+ fscache_set_op_state(&op->op, "Abort");
fscache_abort_object(object);
- else
+ } else {
fscache_enqueue_operation(&op->op);
+ }
}
_leave("");
@@ -575,9 +742,9 @@ superseded:
/* this writer is going away and there aren't any more things to
* write */
_debug("cease");
+ spin_unlock(&cookie->stores_lock);
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
spin_unlock(&object->lock);
- spin_unlock(&cookie->lock);
_leave("");
}
@@ -634,6 +801,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
fscache_operation_init(&op->op, fscache_release_write_op);
fscache_operation_init_slow(&op->op, fscache_write_op);
op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
+ fscache_set_op_name(&op->op, "Write1");
ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
if (ret < 0)
@@ -652,6 +820,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
/* add the page to the pending-storage radix tree on the backing
* object */
spin_lock(&object->lock);
+ spin_lock(&cookie->stores_lock);
_debug("store limit %llx", (unsigned long long) object->store_limit);
@@ -672,6 +841,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
goto already_pending;
+ spin_unlock(&cookie->stores_lock);
spin_unlock(&object->lock);
op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -693,6 +863,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
already_queued:
fscache_stat(&fscache_n_stores_again);
already_pending:
+ spin_unlock(&cookie->stores_lock);
spin_unlock(&object->lock);
spin_unlock(&cookie->lock);
radix_tree_preload_end();
@@ -702,7 +873,9 @@ already_pending:
return 0;
submit_failed:
+ spin_lock(&cookie->stores_lock);
radix_tree_delete(&cookie->stores, page->index);
+ spin_unlock(&cookie->stores_lock);
page_cache_release(page);
ret = -ENOBUFS;
goto nobufs;
@@ -763,7 +936,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
if (TestClearPageFsCache(page) &&
object->cache->ops->uncache_page) {
/* the cache backend releases the cookie lock */
+ fscache_stat(&fscache_n_cop_uncache_page);
object->cache->ops->uncache_page(object, page);
+ fscache_stat_d(&fscache_n_cop_uncache_page);
goto done;
}
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index beeab44bc31..1d9e4951a59 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
goto error_histogram;
#endif
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+ if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
+ &fscache_objlist_fops))
+ goto error_objects;
+#endif
+
_leave(" = 0");
return 0;
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+error_objects:
+#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
+ remove_proc_entry("fs/fscache/histogram", NULL);
error_histogram:
#endif
#ifdef CONFIG_FSCACHE_STATS
@@ -58,6 +68,9 @@ error_dir:
*/
void fscache_proc_cleanup(void)
{
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+ remove_proc_entry("fs/fscache/objects", NULL);
+#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
#endif
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 65deb99e756..46435f3aae6 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
atomic_t fscache_n_op_deferred_release;
atomic_t fscache_n_op_release;
atomic_t fscache_n_op_gc;
+atomic_t fscache_n_op_cancelled;
+atomic_t fscache_n_op_rejected;
atomic_t fscache_n_attr_changed;
atomic_t fscache_n_attr_changed_ok;
@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
atomic_t fscache_n_allocs_ok;
atomic_t fscache_n_allocs_wait;
atomic_t fscache_n_allocs_nobufs;
+atomic_t fscache_n_allocs_intr;
+atomic_t fscache_n_allocs_object_dead;
atomic_t fscache_n_alloc_ops;
atomic_t fscache_n_alloc_op_waits;
@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
atomic_t fscache_n_retrievals_nobufs;
atomic_t fscache_n_retrievals_intr;
atomic_t fscache_n_retrievals_nomem;
+atomic_t fscache_n_retrievals_object_dead;
atomic_t fscache_n_retrieval_ops;
atomic_t fscache_n_retrieval_op_waits;
@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
atomic_t fscache_n_stores_oom;
atomic_t fscache_n_store_ops;
atomic_t fscache_n_store_calls;
+atomic_t fscache_n_store_pages;
+atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_pages_over_limit;
+
+atomic_t fscache_n_store_vmscan_not_storing;
+atomic_t fscache_n_store_vmscan_gone;
+atomic_t fscache_n_store_vmscan_busy;
+atomic_t fscache_n_store_vmscan_cancelled;
atomic_t fscache_n_marks;
atomic_t fscache_n_uncaches;
@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
atomic_t fscache_n_relinquishes;
atomic_t fscache_n_relinquishes_null;
atomic_t fscache_n_relinquishes_waitcrt;
+atomic_t fscache_n_relinquishes_retire;
atomic_t fscache_n_cookie_index;
atomic_t fscache_n_cookie_data;
@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
atomic_t fscache_n_object_lookups;
atomic_t fscache_n_object_lookups_negative;
atomic_t fscache_n_object_lookups_positive;
+atomic_t fscache_n_object_lookups_timed_out;
atomic_t fscache_n_object_created;
atomic_t fscache_n_object_avail;
atomic_t fscache_n_object_dead;
@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
atomic_t fscache_n_checkaux_update;
atomic_t fscache_n_checkaux_obsolete;
+atomic_t fscache_n_cop_alloc_object;
+atomic_t fscache_n_cop_lookup_object;
+atomic_t fscache_n_cop_lookup_complete;
+atomic_t fscache_n_cop_grab_object;
+atomic_t fscache_n_cop_update_object;
+atomic_t fscache_n_cop_drop_object;
+atomic_t fscache_n_cop_put_object;
+atomic_t fscache_n_cop_sync_cache;
+atomic_t fscache_n_cop_attr_changed;
+atomic_t fscache_n_cop_read_or_alloc_page;
+atomic_t fscache_n_cop_read_or_alloc_pages;
+atomic_t fscache_n_cop_allocate_page;
+atomic_t fscache_n_cop_allocate_pages;
+atomic_t fscache_n_cop_write_page;
+atomic_t fscache_n_cop_uncache_page;
+atomic_t fscache_n_cop_dissociate_pages;
+
/*
* display the general statistics
*/
@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_acquires_nobufs),
atomic_read(&fscache_n_acquires_oom));
- seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n",
+ seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
atomic_read(&fscache_n_object_lookups),
atomic_read(&fscache_n_object_lookups_negative),
atomic_read(&fscache_n_object_lookups_positive),
+ atomic_read(&fscache_n_object_lookups_timed_out),
atomic_read(&fscache_n_object_created));
seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_updates_null),
atomic_read(&fscache_n_updates_run));
- seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n",
+ seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
atomic_read(&fscache_n_relinquishes),
atomic_read(&fscache_n_relinquishes_null),
- atomic_read(&fscache_n_relinquishes_waitcrt));
+ atomic_read(&fscache_n_relinquishes_waitcrt),
+ atomic_read(&fscache_n_relinquishes_retire));
seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
atomic_read(&fscache_n_attr_changed),
@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_attr_changed_nomem),
atomic_read(&fscache_n_attr_changed_calls));
- seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n",
+ seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
atomic_read(&fscache_n_allocs),
atomic_read(&fscache_n_allocs_ok),
atomic_read(&fscache_n_allocs_wait),
- atomic_read(&fscache_n_allocs_nobufs));
- seq_printf(m, "Allocs : ops=%u owt=%u\n",
+ atomic_read(&fscache_n_allocs_nobufs),
+ atomic_read(&fscache_n_allocs_intr));
+ seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
atomic_read(&fscache_n_alloc_ops),
- atomic_read(&fscache_n_alloc_op_waits));
+ atomic_read(&fscache_n_alloc_op_waits),
+ atomic_read(&fscache_n_allocs_object_dead));
seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
" int=%u oom=%u\n",
@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_retrievals_nobufs),
atomic_read(&fscache_n_retrievals_intr),
atomic_read(&fscache_n_retrievals_nomem));
- seq_printf(m, "Retrvls: ops=%u owt=%u\n",
+ seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
atomic_read(&fscache_n_retrieval_ops),
- atomic_read(&fscache_n_retrieval_op_waits));
+ atomic_read(&fscache_n_retrieval_op_waits),
+ atomic_read(&fscache_n_retrievals_object_dead));
seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
atomic_read(&fscache_n_stores),
@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
atomic_read(&fscache_n_stores_again),
atomic_read(&fscache_n_stores_nobufs),
atomic_read(&fscache_n_stores_oom));
- seq_printf(m, "Stores : ops=%u run=%u\n",
+ seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
atomic_read(&fscache_n_store_ops),
- atomic_read(&fscache_n_store_calls));
+ atomic_read(&fscache_n_store_calls),
+ atomic_read(&fscache_n_store_pages),
+ atomic_read(&fscache_n_store_radix_deletes),
+ atomic_read(&fscache_n_store_pages_over_limit));
- seq_printf(m, "Ops : pend=%u run=%u enq=%u\n",
+ seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
+ atomic_read(&fscache_n_store_vmscan_not_storing),
+ atomic_read(&fscache_n_store_vmscan_gone),
+ atomic_read(&fscache_n_store_vmscan_busy),
+ atomic_read(&fscache_n_store_vmscan_cancelled));
+
+ seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
atomic_read(&fscache_n_op_pend),
atomic_read(&fscache_n_op_run),
- atomic_read(&fscache_n_op_enqueue));
+ atomic_read(&fscache_n_op_enqueue),
+ atomic_read(&fscache_n_op_cancelled),
+ atomic_read(&fscache_n_op_rejected));
seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
atomic_read(&fscache_n_op_deferred_release),
atomic_read(&fscache_n_op_release),
atomic_read(&fscache_n_op_gc));
+
+ seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+ atomic_read(&fscache_n_cop_alloc_object),
+ atomic_read(&fscache_n_cop_lookup_object),
+ atomic_read(&fscache_n_cop_lookup_complete),
+ atomic_read(&fscache_n_cop_grab_object));
+ seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
+ atomic_read(&fscache_n_cop_update_object),
+ atomic_read(&fscache_n_cop_drop_object),
+ atomic_read(&fscache_n_cop_put_object),
+ atomic_read(&fscache_n_cop_attr_changed),
+ atomic_read(&fscache_n_cop_sync_cache));
+ seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
+ atomic_read(&fscache_n_cop_read_or_alloc_page),
+ atomic_read(&fscache_n_cop_read_or_alloc_pages),
+ atomic_read(&fscache_n_cop_allocate_page),
+ atomic_read(&fscache_n_cop_allocate_pages),
+ atomic_read(&fscache_n_cop_write_page),
+ atomic_read(&fscache_n_cop_uncache_page),
+ atomic_read(&fscache_n_cop_dissociate_pages));
return 0;
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8ada78aade5..4787ae6c5c1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -385,6 +385,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
if (fc->no_create)
return -ENOSYS;
+ if (flags & O_DIRECT)
+ return -EINVAL;
+
forget_req = fuse_get_req(fc);
if (IS_ERR(forget_req))
return PTR_ERR(forget_req);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 5971359d209..4dcddf83326 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -8,6 +8,8 @@ config GFS2_FS
select FS_POSIX_ACL
select CRC32
select SLOW_WORK
+ select QUOTA
+ select QUOTACTL
help
A cluster filesystem.
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3fc4e3ac7d8..3eb1ea84617 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
+#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/gfs2_ondisk.h>
@@ -26,108 +27,44 @@
#include "trans.h"
#include "util.h"
-#define ACL_ACCESS 1
-#define ACL_DEFAULT 0
-
-int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
- struct gfs2_ea_request *er, int *remove, mode_t *mode)
+static const char *gfs2_acl_name(int type)
{
- struct posix_acl *acl;
- int error;
-
- error = gfs2_acl_validate_remove(ip, access);
- if (error)
- return error;
-
- if (!er->er_data)
- return -EINVAL;
-
- acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
- if (!acl) {
- *remove = 1;
- return 0;
- }
-
- error = posix_acl_valid(acl);
- if (error)
- goto out;
-
- if (access) {
- error = posix_acl_equiv_mode(acl, mode);
- if (!error)
- *remove = 1;
- else if (error > 0)
- error = 0;
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ return GFS2_POSIX_ACL_ACCESS;
+ case ACL_TYPE_DEFAULT:
+ return GFS2_POSIX_ACL_DEFAULT;
}
-
-out:
- posix_acl_release(acl);
- return error;
-}
-
-int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
-{
- if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
- return -EOPNOTSUPP;
- if (!is_owner_or_cap(&ip->i_inode))
- return -EPERM;
- if (S_ISLNK(ip->i_inode.i_mode))
- return -EOPNOTSUPP;
- if (!access && !S_ISDIR(ip->i_inode.i_mode))
- return -EACCES;
-
- return 0;
+ return NULL;
}
-static int acl_get(struct gfs2_inode *ip, const char *name,
- struct posix_acl **acl, struct gfs2_ea_location *el,
- char **datap, unsigned int *lenp)
+static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
{
+ struct posix_acl *acl;
+ const char *name;
char *data;
- unsigned int len;
- int error;
-
- el->el_bh = NULL;
+ int len;
if (!ip->i_eattr)
- return 0;
-
- error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el);
- if (error)
- return error;
- if (!el->el_ea)
- return 0;
- if (!GFS2_EA_DATA_LEN(el->el_ea))
- goto out;
+ return NULL;
- len = GFS2_EA_DATA_LEN(el->el_ea);
- data = kmalloc(len, GFP_NOFS);
- error = -ENOMEM;
- if (!data)
- goto out;
+ acl = get_cached_acl(&ip->i_inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
- error = gfs2_ea_get_copy(ip, el, data, len);
- if (error < 0)
- goto out_kfree;
- error = 0;
+ name = gfs2_acl_name(type);
+ if (name == NULL)
+ return ERR_PTR(-EINVAL);
- if (acl) {
- *acl = posix_acl_from_xattr(data, len);
- if (IS_ERR(*acl))
- error = PTR_ERR(*acl);
- }
+ len = gfs2_xattr_acl_get(ip, name, &data);
+ if (len < 0)
+ return ERR_PTR(len);
+ if (len == 0)
+ return NULL;
-out_kfree:
- if (error || !datap) {
- kfree(data);
- } else {
- *datap = data;
- *lenp = len;
- }
-out:
- return error;
+ acl = posix_acl_from_xattr(data, len);
+ kfree(data);
+ return acl;
}
/**
@@ -140,14 +77,12 @@ out:
int gfs2_check_acl(struct inode *inode, int mask)
{
- struct gfs2_ea_location el;
- struct posix_acl *acl = NULL;
+ struct posix_acl *acl;
int error;
- error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL);
- brelse(el.el_bh);
- if (error)
- return error;
+ acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
if (acl) {
error = posix_acl_permission(inode, acl, mask);
@@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask)
return -EAGAIN;
}
-static int munge_mode(struct gfs2_inode *ip, mode_t mode)
+static int gfs2_set_mode(struct inode *inode, mode_t mode)
{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct buffer_head *dibh;
- int error;
+ int error = 0;
- error = gfs2_trans_begin(sdp, RES_DINODE, 0);
- if (error)
- return error;
+ if (mode != inode->i_mode) {
+ struct iattr iattr;
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- gfs2_assert_withdraw(sdp,
- (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
- ip->i_inode.i_mode = mode;
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
+ iattr.ia_valid = ATTR_MODE;
+ iattr.ia_mode = mode;
+
+ error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
}
- gfs2_trans_end(sdp);
+ return error;
+}
+
+static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
+{
+ int error;
+ int len;
+ char *data;
+ const char *name = gfs2_acl_name(type);
- return 0;
+ BUG_ON(name == NULL);
+ len = posix_acl_to_xattr(acl, NULL, 0);
+ if (len == 0)
+ return 0;
+ data = kmalloc(len, GFP_NOFS);
+ if (data == NULL)
+ return -ENOMEM;
+ error = posix_acl_to_xattr(acl, data, len);
+ if (error < 0)
+ goto out;
+ error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
+ if (!error)
+ set_cached_acl(inode, type, acl);
+out:
+ kfree(data);
+ return error;
}
-int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
+int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
{
- struct gfs2_ea_location el;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct posix_acl *acl = NULL, *clone;
- mode_t mode = ip->i_inode.i_mode;
- char *data = NULL;
- unsigned int len;
- int error;
+ struct posix_acl *acl, *clone;
+ mode_t mode = inode->i_mode;
+ int error = 0;
if (!sdp->sd_args.ar_posix_acl)
return 0;
- if (S_ISLNK(ip->i_inode.i_mode))
+ if (S_ISLNK(inode->i_mode))
return 0;
- error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len);
- brelse(el.el_bh);
- if (error)
- return error;
+ acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
if (!acl) {
mode &= ~current_umask();
- if (mode != ip->i_inode.i_mode)
- error = munge_mode(ip, mode);
+ if (mode != inode->i_mode)
+ error = gfs2_set_mode(inode, mode);
return error;
}
+ if (S_ISDIR(inode->i_mode)) {
+ error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
+ if (error)
+ goto out;
+ }
+
clone = posix_acl_clone(acl, GFP_NOFS);
error = -ENOMEM;
if (!clone)
@@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
posix_acl_release(acl);
acl = clone;
- if (S_ISDIR(ip->i_inode.i_mode)) {
- error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
- GFS2_POSIX_ACL_DEFAULT, data, len, 0);
- if (error)
- goto out;
- }
-
error = posix_acl_create_masq(acl, &mode);
if (error < 0)
goto out;
if (error == 0)
goto munge;
- posix_acl_to_xattr(acl, data, len);
- error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
- GFS2_POSIX_ACL_ACCESS, data, len, 0);
+ error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
if (error)
goto out;
munge:
- error = munge_mode(ip, mode);
+ error = gfs2_set_mode(inode, mode);
out:
posix_acl_release(acl);
- kfree(data);
return error;
}
int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
{
- struct posix_acl *acl = NULL, *clone;
- struct gfs2_ea_location el;
+ struct posix_acl *acl, *clone;
char *data;
unsigned int len;
int error;
- error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len);
- if (error)
- goto out_brelse;
+ acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
if (!acl)
return gfs2_setattr_simple(ip, attr);
@@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
error = posix_acl_chmod_masq(acl, attr->ia_mode);
if (!error) {
+ len = posix_acl_to_xattr(acl, NULL, 0);
+ data = kmalloc(len, GFP_NOFS);
+ error = -ENOMEM;
+ if (data == NULL)
+ goto out;
posix_acl_to_xattr(acl, data, len);
- error = gfs2_ea_acl_chmod(ip, &el, attr, data);
+ error = gfs2_xattr_acl_chmod(ip, attr, data);
+ kfree(data);
+ set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
}
out:
posix_acl_release(acl);
- kfree(data);
-out_brelse:
- brelse(el.el_bh);
return error;
}
+static int gfs2_acl_type(const char *name)
+{
+ if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
+ return ACL_TYPE_ACCESS;
+ if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
+ return ACL_TYPE_DEFAULT;
+ return -EINVAL;
+}
+
+static int gfs2_xattr_system_get(struct inode *inode, const char *name,
+ void *buffer, size_t size)
+{
+ struct posix_acl *acl;
+ int type;
+ int error;
+
+ type = gfs2_acl_type(name);
+ if (type < 0)
+ return type;
+
+ acl = gfs2_acl_get(GFS2_I(inode), type);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl == NULL)
+ return -ENODATA;
+
+ error = posix_acl_to_xattr(acl, buffer, size);
+ posix_acl_release(acl);
+
+ return error;
+}
+
+static int gfs2_xattr_system_set(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct posix_acl *acl = NULL;
+ int error = 0, type;
+
+ if (!sdp->sd_args.ar_posix_acl)
+ return -EOPNOTSUPP;
+
+ type = gfs2_acl_type(name);
+ if (type < 0)
+ return type;
+ if (flags & XATTR_CREATE)
+ return -EINVAL;
+ if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ return value ? -EACCES : 0;
+ if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
+ return -EPERM;
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
+ if (!value)
+ goto set_acl;
+
+ acl = posix_acl_from_xattr(value, size);
+ if (!acl) {
+ /*
+ * acl_set_file(3) may request that we set default ACLs with
+ * zero length -- defend (gracefully) against that here.
+ */
+ goto out;
+ }
+ if (IS_ERR(acl)) {
+ error = PTR_ERR(acl);
+ goto out;
+ }
+
+ error = posix_acl_valid(acl);
+ if (error)
+ goto out_release;
+
+ error = -EINVAL;
+ if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
+ goto out_release;
+
+ if (type == ACL_TYPE_ACCESS) {
+ mode_t mode = inode->i_mode;
+ error = posix_acl_equiv_mode(acl, &mode);
+
+ if (error <= 0) {
+ posix_acl_release(acl);
+ acl = NULL;
+
+ if (error < 0)
+ return error;
+ }
+
+ error = gfs2_set_mode(inode, mode);
+ if (error)
+ goto out_release;
+ }
+
+set_acl:
+ error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
+ if (!error) {
+ if (acl)
+ set_cached_acl(inode, type, acl);
+ else
+ forget_cached_acl(inode, type);
+ }
+out_release:
+ posix_acl_release(acl);
+out:
+ return error;
+}
+
+struct xattr_handler gfs2_xattr_system_handler = {
+ .prefix = XATTR_SYSTEM_PREFIX,
+ .get = gfs2_xattr_system_get,
+ .set = gfs2_xattr_system_set,
+};
+
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 6751930bfb6..9306a2e6620 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,26 +13,12 @@
#include "incore.h"
#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
-#define GFS2_POSIX_ACL_ACCESS_LEN 16
#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
-#define GFS2_POSIX_ACL_DEFAULT_LEN 17
+#define GFS2_ACL_MAX_ENTRIES 25
-#define GFS2_ACL_IS_ACCESS(name, len) \
- ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
- !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
-
-#define GFS2_ACL_IS_DEFAULT(name, len) \
- ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
- !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
-
-struct gfs2_ea_request;
-
-int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
- struct gfs2_ea_request *er,
- int *remove, mode_t *mode);
-int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
-int gfs2_check_acl(struct inode *inode, int mask);
-int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
-int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
+extern int gfs2_check_acl(struct inode *inode, int mask);
+extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
+extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
+extern struct xattr_handler gfs2_xattr_system_handler;
#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 694b5d48f03..7b8da941526 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
- struct backing_dev_info *bdi = mapping->backing_dev_info;
int i;
int ret;
@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
if (ret || (--(wbc->nr_to_write) <= 0))
ret = 1;
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- ret = 1;
- }
-
}
gfs2_trans_end(sdp);
return ret;
@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
static int gfs2_write_cache_jdata(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
int ret = 0;
int done = 0;
struct pagevec pvec;
@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
int scanned = 0;
int range_whole = 0;
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- return 0;
- }
-
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
mark_inode_dirty(inode);
}
- if (inode == sdp->sd_rindex)
+ if (inode == sdp->sd_rindex) {
adjust_fs_space(inode);
+ ip->i_gh.gh_flags |= GL_NOCACHE;
+ }
brelse(dibh);
gfs2_trans_end(sdp);
@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
mark_inode_dirty(inode);
}
- if (inode == sdp->sd_rindex)
+ if (inode == sdp->sd_rindex) {
adjust_fs_space(inode);
+ ip->i_gh.gh_flags |= GL_NOCACHE;
+ }
brelse(dibh);
gfs2_trans_end(sdp);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 297d7e5ceba..25fddc100f1 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -525,38 +525,6 @@ consist_inode:
return ERR_PTR(-EIO);
}
-
-/**
- * dirent_first - Return the first dirent
- * @dip: the directory
- * @bh: The buffer
- * @dent: Pointer to list of dirents
- *
- * return first dirent whether bh points to leaf or stuffed dinode
- *
- * Returns: IS_LEAF, IS_DINODE, or -errno
- */
-
-static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
- struct gfs2_dirent **dent)
-{
- struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
-
- if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
- if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
- return -EIO;
- *dent = (struct gfs2_dirent *)(bh->b_data +
- sizeof(struct gfs2_leaf));
- return IS_LEAF;
- } else {
- if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
- return -EIO;
- *dent = (struct gfs2_dirent *)(bh->b_data +
- sizeof(struct gfs2_dinode));
- return IS_DINODE;
- }
-}
-
static int dirent_check_reclen(struct gfs2_inode *dip,
const struct gfs2_dirent *d, const void *end_p)
{
@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
divider = (start + half_len) << (32 - dip->i_depth);
/* Copy the entries */
- dirent_first(dip, obh, &dent);
+ dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
do {
next = dent;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8b674b1f3a5..f455a03a09e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
int rv = 0;
write_lock(gl_lock_addr(gl->gl_hash));
- if (atomic_dec_and_test(&gl->gl_ref)) {
+ if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
hlist_del(&gl->gl_list);
- write_unlock(gl_lock_addr(gl->gl_hash));
- spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
}
spin_unlock(&lru_lock);
+ write_unlock(gl_lock_addr(gl->gl_hash));
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
glock_free(gl);
rv = 1;
@@ -513,7 +512,6 @@ retry:
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_spin);
- gfs2_glock_put(gl);
return;
}
@@ -524,8 +522,6 @@ retry:
if (glops->go_xmote_bh) {
spin_unlock(&gl->gl_spin);
rv = glops->go_xmote_bh(gl, gh);
- if (rv == -EAGAIN)
- return;
spin_lock(&gl->gl_spin);
if (rv) {
do_error(gl, rv);
@@ -540,7 +536,6 @@ out:
clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked:
spin_unlock(&gl->gl_spin);
- gfs2_glock_put(gl);
}
static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
if (!(ret & LM_OUT_ASYNC)) {
finish_xmote(gl, ret);
- gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
} else {
@@ -672,12 +666,17 @@ out:
return;
out_sched:
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ smp_mb__after_clear_bit();
gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put_nolock(gl);
+ return;
+
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
- goto out;
+ smp_mb__after_clear_bit();
+ return;
}
static void delete_work_func(struct work_struct *work)
@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
{
unsigned long delay = 0;
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+ int drop_ref = 0;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
+ if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
finish_xmote(gl, gl->gl_reply);
+ drop_ref = 1;
+ }
down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
+ if (drop_ref)
+ gfs2_glock_put(gl);
}
/**
@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
- /* Check if glock is about to be freed */
- if (atomic_read(&gl->gl_ref) == 0)
- continue;
-
/* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl);
@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
handle_callback(gl, LM_ST_UNLOCKED, 0);
nr--;
}
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ smp_mb__after_clear_bit();
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put_nolock(gl);
spin_unlock(&gl->gl_spin);
- clear_bit(GLF_LOCK, &gl->gl_flags);
spin_lock(&lru_lock);
continue;
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c609894ec0d..13f0bd22813 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
return gl->gl_state == LM_ST_SHARED;
}
-static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
-{
- int ret;
- spin_lock(&gl->gl_spin);
- ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
- return ret;
-}
-
int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 6985eef06c3..78554acc060 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/bio.h>
+#include <linux/posix_acl.h>
#include "gfs2.h"
#include "incore.h"
@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
if (flags & DIO_METADATA) {
struct address_space *mapping = gl->gl_aspace->i_mapping;
truncate_inode_pages(mapping, 0);
- if (ip)
+ if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
+ forget_all_cached_acls(&ip->i_inode);
+ }
}
if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6edb423f90b..4792200978c 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -429,7 +429,11 @@ struct gfs2_args {
unsigned int ar_meta:1; /* mount metafs */
unsigned int ar_discard:1; /* discard requests */
unsigned int ar_errors:2; /* errors=withdraw | panic */
+ unsigned int ar_nobarrier:1; /* do not send barriers */
int ar_commit; /* Commit interval */
+ int ar_statfs_quantum; /* The fast statfs interval */
+ int ar_quota_quantum; /* The quota interval */
+ int ar_statfs_percent; /* The % change to force sync */
};
struct gfs2_tune {
@@ -558,6 +562,7 @@ struct gfs2_sbd {
spinlock_t sd_statfs_spin;
struct gfs2_statfs_change_host sd_statfs_master;
struct gfs2_statfs_change_host sd_statfs_local;
+ int sd_statfs_force_sync;
/* Resource group stuff */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb15d3b1f40..26ba2a4c4a2 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
if (error)
goto fail_gunlock2;
- error = gfs2_acl_create(dip, GFS2_I(inode));
+ error = gfs2_acl_create(dip, inode);
if (error)
goto fail_gunlock2;
@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
- str->di_header.__pad0 = 0;
str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
- str->di_header.__pad1 = 0;
str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 13c6237c5f6..4511b08fc45 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
memset(lh, 0, sizeof(struct gfs2_log_header));
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
+ lh->lh_header.__pad0 = cpu_to_be64(0);
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
+ lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
lh->lh_flags = cpu_to_be32(flags);
lh->lh_tail = cpu_to_be32(tail);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 9969ff062c5..de97632ba32 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
{
struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
+ struct gfs2_meta_header *mh;
struct gfs2_trans *tr;
lock_buffer(bd->bd_bh);
@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
gfs2_meta_check(sdp, bd->bd_bh);
gfs2_pin(sdp, bd->bd_bh);
+ mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
+ mh->__pad0 = cpu_to_be64(0);
+ mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
sdp->sd_log_num_buf++;
list_add(&le->le_list, &sdp->sd_log_le_buf);
tr->tr_num_buf_new++;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index eacd78a5d08..5b31f7741a8 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
if (error)
goto fail_unregister;
- error = slow_work_register_user();
+ error = slow_work_register_user(THIS_MODULE);
if (error)
goto fail_slow;
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
- slow_work_unregister_user();
+ slow_work_unregister_user(THIS_MODULE);
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 52fb6c04898..edfee24f363 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -18,6 +18,7 @@
#include <linux/mount.h>
#include <linux/gfs2_ondisk.h>
#include <linux/slow-work.h>
+#include <linux/quotaops.h>
#include "gfs2.h"
#include "incore.h"
@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
- gt->gt_quota_quantum = 60;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18;
gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10;
- gt->gt_statfs_quantum = 30;
- gt->gt_statfs_slow = 0;
}
static struct gfs2_sbd *init_sbd(struct super_block *sb)
@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
* Returns: errno
*/
-static int fill_super(struct super_block *sb, void *data, int silent)
+static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
{
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
return -ENOMEM;
}
-
- sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
- sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
- sdp->sd_args.ar_commit = 60;
- sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
-
- error = gfs2_mount_args(sdp, &sdp->sd_args, data);
- if (error) {
- printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
- goto fail;
- }
+ sdp->sd_args = *args;
if (sdp->sd_args.ar_spectator) {
sb->s_flags |= MS_RDONLY;
@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
}
if (sdp->sd_args.ar_posix_acl)
sb->s_flags |= MS_POSIXACL;
+ if (sdp->sd_args.ar_nobarrier)
+ set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers;
+ sb->s_qcop = &gfs2_quotactl_ops;
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
sb->s_time_gran = 1;
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
+ sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
+ if (sdp->sd_args.ar_statfs_quantum) {
+ sdp->sd_tune.gt_statfs_slow = 0;
+ sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
+ }
+ else {
+ sdp->sd_tune.gt_statfs_slow = 1;
+ sdp->sd_tune.gt_statfs_quantum = 30;
+ }
error = init_names(sdp, silent);
if (error)
@@ -1243,18 +1244,127 @@ fail:
return error;
}
-static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data, struct vfsmount *mnt)
+static int set_gfs2_super(struct super_block *s, void *data)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
+ s->s_bdev = data;
+ s->s_dev = s->s_bdev->bd_dev;
+
+ /*
+ * We set the bdi here to the queue backing, file systems can
+ * overwrite this in ->fill_super()
+ */
+ s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ return 0;
}
-static int test_meta_super(struct super_block *s, void *ptr)
+static int test_gfs2_super(struct super_block *s, void *ptr)
{
struct block_device *bdev = ptr;
return (bdev == s->s_bdev);
}
+/**
+ * gfs2_get_sb - Get the GFS2 superblock
+ * @fs_type: The GFS2 filesystem type
+ * @flags: Mount flags
+ * @dev_name: The name of the device
+ * @data: The mount arguments
+ * @mnt: The vfsmnt for this mount
+ *
+ * Q. Why not use get_sb_bdev() ?
+ * A. We need to select one of two root directories to mount, independent
+ * of whether this is the initial, or subsequent, mount of this sb
+ *
+ * Returns: 0 or -ve on error
+ */
+
+static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ struct block_device *bdev;
+ struct super_block *s;
+ fmode_t mode = FMODE_READ;
+ int error;
+ struct gfs2_args args;
+ struct gfs2_sbd *sdp;
+
+ if (!(flags & MS_RDONLY))
+ mode |= FMODE_WRITE;
+
+ bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+
+ /*
+ * once the super is inserted into the list by sget, s_umount
+ * will protect the lockfs code from trying to start a snapshot
+ * while we are mounting
+ */
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ error = -EBUSY;
+ goto error_bdev;
+ }
+ s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ error = PTR_ERR(s);
+ if (IS_ERR(s))
+ goto error_bdev;
+
+ memset(&args, 0, sizeof(args));
+ args.ar_quota = GFS2_QUOTA_DEFAULT;
+ args.ar_data = GFS2_DATA_DEFAULT;
+ args.ar_commit = 60;
+ args.ar_statfs_quantum = 30;
+ args.ar_quota_quantum = 60;
+ args.ar_errors = GFS2_ERRORS_DEFAULT;
+
+ error = gfs2_mount_args(&args, data);
+ if (error) {
+ printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
+ if (s->s_root)
+ goto error_super;
+ deactivate_locked_super(s);
+ return error;
+ }
+
+ if (s->s_root) {
+ error = -EBUSY;
+ if ((flags ^ s->s_flags) & MS_RDONLY)
+ goto error_super;
+ close_bdev_exclusive(bdev, mode);
+ } else {
+ char b[BDEVNAME_SIZE];
+
+ s->s_flags = flags;
+ s->s_mode = mode;
+ strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
+ sb_set_blocksize(s, block_size(bdev));
+ error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
+ if (error) {
+ deactivate_locked_super(s);
+ return error;
+ }
+ s->s_flags |= MS_ACTIVE;
+ bdev->bd_super = s;
+ }
+
+ sdp = s->s_fs_info;
+ mnt->mnt_sb = s;
+ if (args.ar_meta)
+ mnt->mnt_root = dget(sdp->sd_master_dir);
+ else
+ mnt->mnt_root = dget(sdp->sd_root_dir);
+ return 0;
+
+error_super:
+ deactivate_locked_super(s);
+error_bdev:
+ close_bdev_exclusive(bdev, mode);
+ return error;
+}
+
static int set_meta_super(struct super_block *s, void *ptr)
{
return -EINVAL;
@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
dev_name, error);
return error;
}
- s = sget(&gfs2_fs_type, test_meta_super, set_meta_super,
+ s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
path.dentry->d_inode->i_sb->s_bdev);
path_put(&path);
if (IS_ERR(s)) {
printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
return PTR_ERR(s);
}
+ if ((flags ^ s->s_flags) & MS_RDONLY) {
+ deactivate_locked_super(s);
+ return -EBUSY;
+ }
sdp = s->s_fs_info;
mnt->mnt_sb = s;
mnt->mnt_root = dget(sdp->sd_master_dir);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 2e9b9326bfc..e3bf6eab875 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -15,7 +15,7 @@
* fuzziness in the current usage value of IDs that are being used on different
* nodes in the cluster simultaneously. So, it is possible for a user on
* multiple nodes to overrun their quota, but that overrun is controlable.
- * Since quota tags are part of transactions, there is no need to a quota check
+ * Since quota tags are part of transactions, there is no need for a quota check
* program to be run on node crashes or anything like that.
*
* There are couple of knobs that let the administrator manage the quota
@@ -47,6 +47,8 @@
#include <linux/gfs2_ondisk.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/quota.h>
+#include <linux/dqblk_xfs.h>
#include "gfs2.h"
#include "incore.h"
@@ -65,13 +67,6 @@
#define QUOTA_USER 1
#define QUOTA_GROUP 0
-struct gfs2_quota_host {
- u64 qu_limit;
- u64 qu_warn;
- s64 qu_value;
- u32 qu_ll_next;
-};
-
struct gfs2_quota_change_host {
u64 qc_change;
u32 qc_flags; /* GFS2_QCF_... */
@@ -164,7 +159,7 @@ fail:
return error;
}
-static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
+static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
spin_unlock(&qd_lru_lock);
- if (qd || !create) {
+ if (qd) {
if (new_qd) {
gfs2_glock_put(new_qd->qd_gl);
kmem_cache_free(gfs2_quotad_cachep, new_qd);
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
qd_put(qd);
}
-static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
+static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
struct gfs2_quota_data **qdp)
{
int error;
- error = qd_get(sdp, user, id, create, qdp);
+ error = qd_get(sdp, user, id, qdp);
if (error)
return error;
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
- error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
+ error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
if (error)
goto out;
al->al_qd_num++;
qd++;
- error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
+ error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
if (error)
goto out;
al->al_qd_num++;
qd++;
if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
- error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
+ error = qdsb_get(sdp, QUOTA_USER, uid, qd);
if (error)
goto out;
al->al_qd_num++;
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
}
if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
- error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
+ error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
if (error)
goto out;
al->al_qd_num++;
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
mutex_unlock(&sdp->sd_quota_mutex);
}
-static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
-{
- const struct gfs2_quota *str = buf;
-
- qu->qu_limit = be64_to_cpu(str->qu_limit);
- qu->qu_warn = be64_to_cpu(str->qu_warn);
- qu->qu_value = be64_to_cpu(str->qu_value);
- qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
-}
-
-static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
-{
- struct gfs2_quota *str = buf;
-
- str->qu_limit = cpu_to_be64(qu->qu_limit);
- str->qu_warn = cpu_to_be64(qu->qu_warn);
- str->qu_value = cpu_to_be64(qu->qu_value);
- str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
- memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
-}
-
/**
- * gfs2_adjust_quota
+ * gfs2_adjust_quota - adjust record of current block usage
+ * @ip: The quota inode
+ * @loc: Offset of the entry in the quota file
+ * @change: The amount of usage change to record
+ * @qd: The quota data
+ * @fdq: The updated limits to record
*
* This function was mostly borrowed from gfs2_block_truncate_page which was
* in turn mostly borrowed from ext3
+ *
+ * Returns: 0 or -ve on error
*/
+
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
- s64 change, struct gfs2_quota_data *qd)
+ s64 change, struct gfs2_quota_data *qd,
+ struct fs_disk_quota *fdq)
{
struct inode *inode = &ip->i_inode;
struct address_space *mapping = inode->i_mapping;
unsigned long index = loc >> PAGE_CACHE_SHIFT;
unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
unsigned blocksize, iblock, pos;
- struct buffer_head *bh;
+ struct buffer_head *bh, *dibh;
struct page *page;
void *kaddr;
- char *ptr;
- struct gfs2_quota_host qp;
+ struct gfs2_quota *qp;
s64 value;
int err = -EIO;
+ u64 size;
if (gfs2_is_stuffed(ip))
gfs2_unstuff_dinode(ip, NULL);
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
gfs2_trans_add_bh(ip->i_gl, bh, 0);
kaddr = kmap_atomic(page, KM_USER0);
- ptr = kaddr + offset;
- gfs2_quota_in(&qp, ptr);
- qp.qu_value += change;
- value = qp.qu_value;
- gfs2_quota_out(&qp, ptr);
+ qp = kaddr + offset;
+ value = (s64)be64_to_cpu(qp->qu_value) + change;
+ qp->qu_value = cpu_to_be64(value);
+ qd->qd_qb.qb_value = qp->qu_value;
+ if (fdq) {
+ if (fdq->d_fieldmask & FS_DQ_BSOFT) {
+ qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
+ qd->qd_qb.qb_warn = qp->qu_warn;
+ }
+ if (fdq->d_fieldmask & FS_DQ_BHARD) {
+ qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
+ qd->qd_qb.qb_limit = qp->qu_limit;
+ }
+ }
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
- err = 0;
- qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
- qd->qd_qb.qb_value = cpu_to_be64(value);
- ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
- ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
+
+ err = gfs2_meta_inode_buffer(ip, &dibh);
+ if (err)
+ goto unlock;
+
+ size = loc + sizeof(struct gfs2_quota);
+ if (size > inode->i_size) {
+ ip->i_disksize = size;
+ i_size_write(inode, size);
+ }
+ inode->i_mtime = inode->i_atime = CURRENT_TIME;
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+ gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
+ mark_inode_dirty(inode);
+
unlock:
unlock_page(page);
page_cache_release(page);
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
return -ENOMEM;
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
+ mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
for (qx = 0; qx < num_qd; qx++) {
- error = gfs2_glock_nq_init(qda[qx]->qd_gl,
- LM_ST_EXCLUSIVE,
+ error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
GL_NOCACHE, &ghs[qx]);
if (error)
goto out;
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
for (x = 0; x < num_qd; x++) {
qd = qda[x];
offset = qd2offset(qd);
- error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
- (struct gfs2_quota_data *)
- qd);
+ error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
if (error)
goto out_end_trans;
@@ -817,21 +818,44 @@ out_gunlock:
out:
while (qx--)
gfs2_glock_dq_uninit(&ghs[qx]);
+ mutex_unlock(&ip->i_inode.i_mutex);
kfree(ghs);
gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
return error;
}
+static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
+{
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+ struct gfs2_quota q;
+ struct gfs2_quota_lvb *qlvb;
+ loff_t pos;
+ int error;
+
+ memset(&q, 0, sizeof(struct gfs2_quota));
+ pos = qd2offset(qd);
+ error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
+ if (error < 0)
+ return error;
+
+ qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
+ qlvb->__pad = 0;
+ qlvb->qb_limit = q.qu_limit;
+ qlvb->qb_warn = q.qu_warn;
+ qlvb->qb_value = q.qu_value;
+ qd->qd_qb = *qlvb;
+
+ return 0;
+}
+
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
struct gfs2_holder *q_gh)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_holder i_gh;
- struct gfs2_quota_host q;
- char buf[sizeof(struct gfs2_quota)];
int error;
- struct gfs2_quota_lvb *qlvb;
restart:
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
@@ -841,11 +865,9 @@ restart:
qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
- loff_t pos;
gfs2_glock_dq_uninit(q_gh);
- error = gfs2_glock_nq_init(qd->qd_gl,
- LM_ST_EXCLUSIVE, GL_NOCACHE,
- q_gh);
+ error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
+ GL_NOCACHE, q_gh);
if (error)
return error;
@@ -853,29 +875,14 @@ restart:
if (error)
goto fail;
- memset(buf, 0, sizeof(struct gfs2_quota));
- pos = qd2offset(qd);
- error = gfs2_internal_read(ip, NULL, buf, &pos,
- sizeof(struct gfs2_quota));
- if (error < 0)
+ error = update_qd(sdp, qd);
+ if (error)
goto fail_gunlock;
gfs2_glock_dq_uninit(&i_gh);
-
- gfs2_quota_in(&q, buf);
- qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
- qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
- qlvb->__pad = 0;
- qlvb->qb_limit = cpu_to_be64(q.qu_limit);
- qlvb->qb_warn = cpu_to_be64(q.qu_warn);
- qlvb->qb_value = cpu_to_be64(q.qu_value);
- qd->qd_qb = *qlvb;
-
- if (gfs2_glock_is_blocking(qd->qd_gl)) {
- gfs2_glock_dq_uninit(q_gh);
- force_refresh = 0;
- goto restart;
- }
+ gfs2_glock_dq_uninit(q_gh);
+ force_refresh = 0;
+ goto restart;
}
return 0;
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
+ printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
sdp->sd_fsname, type,
(test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
qd->qd_id);
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
print_message(qd, "exceeded");
+ quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
+ USRQUOTA : GRPQUOTA, qd->qd_id,
+ sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
+
error = -EDQUOT;
break;
} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
time_after_eq(jiffies, qd->qd_last_warn +
gfs2_tune_get(sdp,
gt_quota_warn_period) * HZ)) {
+ quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
+ USRQUOTA : GRPQUOTA, qd->qd_id,
+ sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
error = print_message(qd, "warning");
qd->qd_last_warn = jiffies;
}
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
}
}
-int gfs2_quota_sync(struct gfs2_sbd *sdp)
+int gfs2_quota_sync(struct super_block *sb, int type)
{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
unsigned int num_qd;
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
struct gfs2_holder q_gh;
int error;
- error = qd_get(sdp, user, id, CREATE, &qd);
+ error = qd_get(sdp, user, id, &qd);
if (error)
return error;
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
gfs2_glock_dq_uninit(&q_gh);
qd_put(qd);
-
return error;
}
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
}
static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
- int (*fxn)(struct gfs2_sbd *sdp),
+ int (*fxn)(struct super_block *sb, int type),
unsigned long t, unsigned long *timeo,
unsigned int *new_timeo)
{
if (t >= *timeo) {
- int error = fxn(sdp);
+ int error = fxn(sdp->sd_vfs, 0);
quotad_error(sdp, msg, error);
*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
} else {
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
}
}
+void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
+ if (!sdp->sd_statfs_force_sync) {
+ sdp->sd_statfs_force_sync = 1;
+ wake_up(&sdp->sd_quota_wait);
+ }
+}
+
+
/**
* gfs2_quotad - Write cached quota changes into the quota file
* @sdp: Pointer to GFS2 superblock
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data)
while (!kthread_should_stop()) {
/* Update the master statfs file */
- quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
- &statfs_timeo, &tune->gt_statfs_quantum);
+ if (sdp->sd_statfs_force_sync) {
+ int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
+ quotad_error(sdp, "statfs", error);
+ statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
+ }
+ else
+ quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
+ &statfs_timeo,
+ &tune->gt_statfs_quantum);
/* Update quota file */
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data)
spin_lock(&sdp->sd_trunc_lock);
empty = list_empty(&sdp->sd_trunc_list);
spin_unlock(&sdp->sd_trunc_lock);
- if (empty)
+ if (empty && !sdp->sd_statfs_force_sync)
t -= schedule_timeout(t);
else
t = 0;
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data)
return 0;
}
+static int gfs2_quota_get_xstate(struct super_block *sb,
+ struct fs_quota_stat *fqs)
+{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+
+ memset(fqs, 0, sizeof(struct fs_quota_stat));
+ fqs->qs_version = FS_QSTAT_VERSION;
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
+ fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
+ else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
+ fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
+ if (sdp->sd_quota_inode) {
+ fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
+ fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
+ }
+ fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
+ fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
+ fqs->qs_incoredqs = atomic_read(&qd_lru_count);
+ return 0;
+}
+
+static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *fdq)
+{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+ struct gfs2_quota_lvb *qlvb;
+ struct gfs2_quota_data *qd;
+ struct gfs2_holder q_gh;
+ int error;
+
+ memset(fdq, 0, sizeof(struct fs_disk_quota));
+
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ return -ESRCH; /* Crazy XFS error code */
+
+ if (type == USRQUOTA)
+ type = QUOTA_USER;
+ else if (type == GRPQUOTA)
+ type = QUOTA_GROUP;
+ else
+ return -EINVAL;
+
+ error = qd_get(sdp, type, id, &qd);
+ if (error)
+ return error;
+ error = do_glock(qd, FORCE, &q_gh);
+ if (error)
+ goto out;
+
+ qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
+ fdq->d_version = FS_DQUOT_VERSION;
+ fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+ fdq->d_id = id;
+ fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
+ fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
+ fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
+
+ gfs2_glock_dq_uninit(&q_gh);
+out:
+ qd_put(qd);
+ return error;
+}
+
+/* GFS2 only supports a subset of the XFS fields */
+#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
+
+static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
+ struct fs_disk_quota *fdq)
+{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+ struct gfs2_quota_data *qd;
+ struct gfs2_holder q_gh, i_gh;
+ unsigned int data_blocks, ind_blocks;
+ unsigned int blocks = 0;
+ int alloc_required;
+ struct gfs2_alloc *al;
+ loff_t offset;
+ int error;
+
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ return -ESRCH; /* Crazy XFS error code */
+
+ switch(type) {
+ case USRQUOTA:
+ type = QUOTA_USER;
+ if (fdq->d_flags != XFS_USER_QUOTA)
+ return -EINVAL;
+ break;
+ case GRPQUOTA:
+ type = QUOTA_GROUP;
+ if (fdq->d_flags != XFS_GROUP_QUOTA)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
+ return -EINVAL;
+ if (fdq->d_id != id)
+ return -EINVAL;
+
+ error = qd_get(sdp, type, id, &qd);
+ if (error)
+ return error;
+
+ mutex_lock(&ip->i_inode.i_mutex);
+ error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
+ if (error)
+ goto out_put;
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
+ if (error)
+ goto out_q;
+
+ /* Check for existing entry, if none then alloc new blocks */
+ error = update_qd(sdp, qd);
+ if (error)
+ goto out_i;
+
+ /* If nothing has changed, this is a no-op */
+ if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
+ (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
+ fdq->d_fieldmask ^= FS_DQ_BSOFT;
+ if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
+ (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
+ fdq->d_fieldmask ^= FS_DQ_BHARD;
+ if (fdq->d_fieldmask == 0)
+ goto out_i;
+
+ offset = qd2offset(qd);
+ error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
+ &alloc_required);
+ if (error)
+ goto out_i;
+ if (alloc_required) {
+ al = gfs2_alloc_get(ip);
+ if (al == NULL)
+ goto out_i;
+ gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
+ &data_blocks, &ind_blocks);
+ blocks = al->al_requested = 1 + data_blocks + ind_blocks;
+ error = gfs2_inplace_reserve(ip);
+ if (error)
+ goto out_alloc;
+ }
+
+ error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
+ if (error)
+ goto out_release;
+
+ /* Apply changes */
+ error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
+
+ gfs2_trans_end(sdp);
+out_release:
+ if (alloc_required) {
+ gfs2_inplace_release(ip);
+out_alloc:
+ gfs2_alloc_put(ip);
+ }
+out_i:
+ gfs2_glock_dq_uninit(&i_gh);
+out_q:
+ gfs2_glock_dq_uninit(&q_gh);
+out_put:
+ mutex_unlock(&ip->i_inode.i_mutex);
+ qd_put(qd);
+ return error;
+}
+
+const struct quotactl_ops gfs2_quotactl_ops = {
+ .quota_sync = gfs2_quota_sync,
+ .get_xstate = gfs2_quota_get_xstate,
+ .get_xquota = gfs2_xquota_get,
+ .set_xquota = gfs2_xquota_set,
+};
+
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0fa5fa63d0e..e271fa07ad0 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid);
-extern int gfs2_quota_sync(struct gfs2_sbd *sdp);
+extern int gfs2_quota_sync(struct super_block *sb, int type);
extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
extern int gfs2_quota_init(struct gfs2_sbd *sdp);
extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
extern int gfs2_quotad(void *data);
+extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
+
static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
}
extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
+extern const struct quotactl_ops gfs2_quotactl_ops;
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 59d2695509d..4b9bece3d43 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -7,6 +7,7 @@
* of the GNU General Public License version 2.
*/
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
@@ -409,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
memset(lh, 0, sizeof(struct gfs2_log_header));
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
+ lh->lh_header.__pad0 = cpu_to_be64(0);
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
+ lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
lh->lh_blkno = cpu_to_be32(lblock);
@@ -593,6 +596,7 @@ fail:
}
struct slow_work_ops gfs2_recover_ops = {
+ .owner = THIS_MODULE,
.get_ref = gfs2_recover_get_ref,
.put_ref = gfs2_recover_put_ref,
.execute = gfs2_recover_work,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8f1cfb02a6c..0608f490c29 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{
struct gfs2_rgrpd *rgd;
struct gfs2_holder ri_gh, rgd_gh;
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
+ int ri_locked = 0;
int error;
- error = gfs2_rindex_hold(sdp, &ri_gh);
- if (error)
- goto fail;
+ if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+ error = gfs2_rindex_hold(sdp, &ri_gh);
+ if (error)
+ goto fail;
+ ri_locked = 1;
+ }
error = -EINVAL;
rgd = gfs2_blk2rgrpd(sdp, no_addr);
@@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
gfs2_glock_dq_uninit(&rgd_gh);
fail_rindex:
- gfs2_glock_dq_uninit(&ri_gh);
+ if (ri_locked)
+ gfs2_glock_dq_uninit(&ri_gh);
fail:
return error;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0ec3ec672de..c282ad41f3d 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -70,6 +70,11 @@ enum {
Opt_commit,
Opt_err_withdraw,
Opt_err_panic,
+ Opt_statfs_quantum,
+ Opt_statfs_percent,
+ Opt_quota_quantum,
+ Opt_barrier,
+ Opt_nobarrier,
Opt_error,
};
@@ -101,18 +106,23 @@ static const match_table_t tokens = {
{Opt_commit, "commit=%d"},
{Opt_err_withdraw, "errors=withdraw"},
{Opt_err_panic, "errors=panic"},
+ {Opt_statfs_quantum, "statfs_quantum=%d"},
+ {Opt_statfs_percent, "statfs_percent=%d"},
+ {Opt_quota_quantum, "quota_quantum=%d"},
+ {Opt_barrier, "barrier"},
+ {Opt_nobarrier, "nobarrier"},
{Opt_error, NULL}
};
/**
* gfs2_mount_args - Parse mount options
- * @sdp:
- * @data:
+ * @args: The structure into which the parsed options will be written
+ * @options: The options to parse
*
* Return: errno
*/
-int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
+int gfs2_mount_args(struct gfs2_args *args, char *options)
{
char *o;
int token;
@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
break;
case Opt_debug:
if (args->ar_errors == GFS2_ERRORS_PANIC) {
- fs_info(sdp, "-o debug and -o errors=panic "
+ printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
"are mutually exclusive.\n");
return -EINVAL;
}
@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
case Opt_commit:
rv = match_int(&tmp[0], &args->ar_commit);
if (rv || args->ar_commit <= 0) {
- fs_info(sdp, "commit mount option requires a positive numeric argument\n");
+ printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
+ return rv ? rv : -EINVAL;
+ }
+ break;
+ case Opt_statfs_quantum:
+ rv = match_int(&tmp[0], &args->ar_statfs_quantum);
+ if (rv || args->ar_statfs_quantum < 0) {
+ printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
+ return rv ? rv : -EINVAL;
+ }
+ break;
+ case Opt_quota_quantum:
+ rv = match_int(&tmp[0], &args->ar_quota_quantum);
+ if (rv || args->ar_quota_quantum <= 0) {
+ printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
+ return rv ? rv : -EINVAL;
+ }
+ break;
+ case Opt_statfs_percent:
+ rv = match_int(&tmp[0], &args->ar_statfs_percent);
+ if (rv || args->ar_statfs_percent < 0 ||
+ args->ar_statfs_percent > 100) {
+ printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
return rv ? rv : -EINVAL;
}
break;
@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
break;
case Opt_err_panic:
if (args->ar_debug) {
- fs_info(sdp, "-o debug and -o errors=panic "
+ printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
"are mutually exclusive.\n");
return -EINVAL;
}
args->ar_errors = GFS2_ERRORS_PANIC;
break;
+ case Opt_barrier:
+ args->ar_nobarrier = 0;
+ break;
+ case Opt_nobarrier:
+ args->ar_nobarrier = 1;
+ break;
case Opt_error:
default:
- fs_info(sdp, "invalid mount option: %s\n", o);
+ printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
return -EINVAL;
}
}
@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
{
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+ struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
struct buffer_head *l_bh;
+ s64 x, y;
+ int need_sync = 0;
int error;
error = gfs2_meta_inode_buffer(l_ip, &l_bh);
@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
l_sc->sc_free += free;
l_sc->sc_dinodes += dinodes;
gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
+ if (sdp->sd_args.ar_statfs_percent) {
+ x = 100 * l_sc->sc_free;
+ y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
+ if (x >= y || x <= -y)
+ need_sync = 1;
+ }
spin_unlock(&sdp->sd_statfs_spin);
brelse(l_bh);
+ if (need_sync)
+ gfs2_wake_up_statfs(sdp);
}
void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
}
-int gfs2_statfs_sync(struct gfs2_sbd *sdp)
+int gfs2_statfs_sync(struct super_block *sb, int type)
{
+ struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
goto out_bh2;
update_statfs(sdp, m_bh, l_bh);
+ sdp->sd_statfs_force_sync = 0;
gfs2_trans_end(sdp);
@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
int error;
flush_workqueue(gfs2_delete_workqueue);
- gfs2_quota_sync(sdp);
- gfs2_statfs_sync(sdp);
+ gfs2_quota_sync(sdp->sd_vfs, 0);
+ gfs2_statfs_sync(sdp->sd_vfs, 0);
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
&t_gh);
@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
spin_lock(&gt->gt_spin);
args.ar_commit = gt->gt_log_flush_secs;
+ args.ar_quota_quantum = gt->gt_quota_quantum;
+ if (gt->gt_statfs_slow)
+ args.ar_statfs_quantum = 0;
+ else
+ args.ar_statfs_quantum = gt->gt_statfs_quantum;
spin_unlock(&gt->gt_spin);
- error = gfs2_mount_args(sdp, &args, data);
+ error = gfs2_mount_args(&args, data);
if (error)
return error;
@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
sb->s_flags |= MS_POSIXACL;
else
sb->s_flags &= ~MS_POSIXACL;
+ if (sdp->sd_args.ar_nobarrier)
+ set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
+ else
+ clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
spin_lock(&gt->gt_spin);
gt->gt_log_flush_secs = args.ar_commit;
+ gt->gt_quota_quantum = args.ar_quota_quantum;
+ if (args.ar_statfs_quantum) {
+ gt->gt_statfs_slow = 0;
+ gt->gt_statfs_quantum = args.ar_statfs_quantum;
+ }
+ else {
+ gt->gt_statfs_slow = 1;
+ gt->gt_statfs_quantum = 30;
+ }
spin_unlock(&gt->gt_spin);
gfs2_online_uevent(sdp);
@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
{
struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
struct gfs2_args *args = &sdp->sd_args;
- int lfsecs;
+ int val;
if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
seq_printf(s, ",meta");
@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
}
if (args->ar_discard)
seq_printf(s, ",discard");
- lfsecs = sdp->sd_tune.gt_log_flush_secs;
- if (lfsecs != 60)
- seq_printf(s, ",commit=%d", lfsecs);
+ val = sdp->sd_tune.gt_log_flush_secs;
+ if (val != 60)
+ seq_printf(s, ",commit=%d", val);
+ val = sdp->sd_tune.gt_statfs_quantum;
+ if (val != 30)
+ seq_printf(s, ",statfs_quantum=%d", val);
+ val = sdp->sd_tune.gt_quota_quantum;
+ if (val != 60)
+ seq_printf(s, ",quota_quantum=%d", val);
+ if (args->ar_statfs_percent)
+ seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
const char *state;
@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
}
seq_printf(s, ",errors=%s", state);
}
+ if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
+ seq_printf(s, ",nobarrier");
+
return 0;
}
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 235db368288..3df60f2d84e 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
-extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data);
+extern int gfs2_mount_args(struct gfs2_args *args, char *data);
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
const void *buf);
extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
struct buffer_head *l_bh);
-extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
+extern int gfs2_statfs_sync(struct super_block *sb, int type);
extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 446329728d5..c5dad1eb7b9 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
- gfs2_statfs_sync(sdp);
+ gfs2_statfs_sync(sdp->sd_vfs, 0);
return len;
}
@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
- gfs2_quota_sync(sdp);
+ gfs2_quota_sync(sdp->sd_vfs, 0);
return len;
}
static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
size_t len)
{
+ int error;
u32 id;
if (!capable(CAP_SYS_ADMIN))
@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
id = simple_strtoul(buf, NULL, 0);
- gfs2_quota_refresh(sdp, 1, id);
- return len;
+ error = gfs2_quota_refresh(sdp, 1, id);
+ return error ? error : len;
}
static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
size_t len)
{
+ int error;
u32 id;
if (!capable(CAP_SYS_ADMIN))
@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
id = simple_strtoul(buf, NULL, 0);
- gfs2_quota_refresh(sdp, 0, id);
- return len;
+ error = gfs2_quota_refresh(sdp, 0, id);
+ return error ? error : len;
}
static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a0f8ef6ee2..912f5cbc474 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
return 0;
}
-int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
- struct gfs2_ea_location *el)
+static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
+ struct gfs2_ea_location *el)
{
struct ea_find ef;
int error;
@@ -516,8 +516,8 @@ out:
return error;
}
-int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
- char *data, size_t size)
+static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
+ char *data, size_t size)
{
int ret;
size_t len = GFS2_EA_DATA_LEN(el->el_ea);
@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
return len;
}
+int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
+{
+ struct gfs2_ea_location el;
+ int error;
+ int len;
+ char *data;
+
+ error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
+ if (error)
+ return error;
+ if (!el.el_ea)
+ goto out;
+ if (!GFS2_EA_DATA_LEN(el.el_ea))
+ goto out;
+
+ len = GFS2_EA_DATA_LEN(el.el_ea);
+ data = kmalloc(len, GFP_NOFS);
+ error = -ENOMEM;
+ if (data == NULL)
+ goto out;
+
+ error = gfs2_ea_get_copy(ip, &el, data, len);
+ if (error == 0)
+ error = len;
+ *ppdata = data;
+out:
+ brelse(el.el_bh);
+ return error;
+}
+
/**
* gfs2_xattr_get - Get a GFS2 extended attribute
* @inode: The inode
@@ -1259,22 +1289,26 @@ fail:
return error;
}
-int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
- struct iattr *attr, char *data)
+int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
{
+ struct gfs2_ea_location el;
struct buffer_head *dibh;
int error;
- if (GFS2_EA_IS_STUFFED(el->el_ea)) {
+ error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
+ if (error)
+ return error;
+
+ if (GFS2_EA_IS_STUFFED(el.el_ea)) {
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
if (error)
return error;
- gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
- memcpy(GFS2_EA2DATA(el->el_ea), data,
- GFS2_EA_DATA_LEN(el->el_ea));
+ gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
+ memcpy(GFS2_EA2DATA(el.el_ea), data,
+ GFS2_EA_DATA_LEN(el.el_ea));
} else
- error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
+ error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
if (error)
return error;
@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
}
-static int gfs2_xattr_system_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
-}
-
-static int gfs2_xattr_system_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
-}
-
static int gfs2_xattr_security_get(struct inode *inode, const char *name,
void *buffer, size_t size)
{
@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
.set = gfs2_xattr_security_set,
};
-static struct xattr_handler gfs2_xattr_system_handler = {
- .prefix = XATTR_SYSTEM_PREFIX,
- .get = gfs2_xattr_system_get,
- .set = gfs2_xattr_system_set,
-};
-
struct xattr_handler *gfs2_xattr_handlers[] = {
&gfs2_xattr_user_handler,
&gfs2_xattr_security_handler,
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index cbdfd774373..8d6ae5813c4 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
/* Exported to acl.c */
-extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
- struct gfs2_ea_location *el);
-extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
- char *data, size_t size);
-extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
- struct iattr *attr, char *data);
+extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
+extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
#endif /* __EATTR_DOT_H__ */
diff --git a/fs/inode.c b/fs/inode.c
index 4d8e3be5597..06c1f02de61 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -18,7 +18,6 @@
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
-#include <linux/ima.h>
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
@@ -157,11 +156,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
if (security_inode_alloc(inode))
goto out;
-
- /* allocate and initialize an i_integrity */
- if (ima_inode_alloc(inode))
- goto out_free_security;
-
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
@@ -201,9 +195,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
#endif
return 0;
-
-out_free_security:
- security_inode_free(inode);
out:
return -ENOMEM;
}
@@ -235,7 +226,6 @@ static struct inode *alloc_inode(struct super_block *sb)
void __destroy_inode(struct inode *inode)
{
BUG_ON(inode_has_buffers(inode));
- ima_inode_free(inode);
security_inode_free(inode);
fsnotify_inode_delete(inode);
#ifdef CONFIG_FS_POSIX_ACL
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index f25e70c1b51..f0294410868 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -177,7 +177,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
spin_unlock(&jffs2_compressor_list_lock);
break;
default:
- printk(KERN_ERR "JFFS2: unknow compression mode.\n");
+ printk(KERN_ERR "JFFS2: unknown compression mode.\n");
}
out:
if (ret == JFFS2_COMPR_NONE) {
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index cfe05c1966a..3f39be1b045 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -164,12 +164,15 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
/* XXX FIXME: Where a single physical node actually shows up in two
frags, we read it twice. Don't do that. */
- /* Now we're pointing at the first frag which overlaps our page */
+ /* Now we're pointing at the first frag which overlaps our page
+ * (or perhaps is before it, if we've been asked to read off the
+ * end of the file). */
while(offset < end) {
D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
- if (unlikely(!frag || frag->ofs > offset)) {
+ if (unlikely(!frag || frag->ofs > offset ||
+ frag->ofs + frag->size <= offset)) {
uint32_t holesize = end - offset;
- if (frag) {
+ if (frag && frag->ofs > offset) {
D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
holesize = min(holesize, frag->ofs - offset);
}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 1a80301004b..378991cfe40 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -931,7 +931,7 @@ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_re
* Helper function for jffs2_get_inode_nodes().
* The function detects whether more data should be read and reads it if yes.
*
- * Returns: 0 on succes;
+ * Returns: 0 on success;
* negative error code on failure.
*/
static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 082e844ab2d..4b107881acd 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -31,7 +31,7 @@
* is used to release xattr name/value pair and detach from c->xattrindex.
* reclaim_xattr_datum(c)
* is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
- * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold
+ * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold
* is hard coded as 32KiB.
* do_verify_xattr_datum(c, xd)
* is used to load the xdatum informations without name/value pair from the medium.
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 2bc7d8aa574..d9b031cf69f 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -755,7 +755,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
* allocation group.
*/
if ((blkno & (bmp->db_agsize - 1)) == 0)
- /* check if the AG is currenly being written to.
+ /* check if the AG is currently being written to.
* if so, call dbNextAG() to find a non-busy
* AG with sufficient free space.
*/
@@ -3337,7 +3337,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
for (i = 0, n = 0; i < agno; n++) {
bmp->db_agfree[n] = 0; /* init collection point */
- /* coalesce cotiguous k AGs; */
+ /* coalesce contiguous k AGs; */
for (j = 0; j < k && i < agno; j++, i++) {
/* merge AGi to AGn */
bmp->db_agfree[n] += bmp->db_agfree[i];
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 1a54ae14a19..e50cfa3d965 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -371,82 +371,74 @@ EXPORT_SYMBOL_GPL(lockd_down);
static ctl_table nlm_sysctls[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nlm_grace_period",
.data = &nlm_grace_period,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_grace_period_min,
.extra2 = (unsigned long *) &nlm_grace_period_max,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nlm_timeout",
.data = &nlm_timeout,
.maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = &proc_doulongvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = (unsigned long *) &nlm_timeout_min,
.extra2 = (unsigned long *) &nlm_timeout_max,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nlm_udpport",
.data = &nlm_udpport,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nlm_tcpport",
.data = &nlm_tcpport,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = (int *) &nlm_port_min,
.extra2 = (int *) &nlm_port_max,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nsm_use_hostnames",
.data = &nsm_use_hostnames,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nsm_local_state",
.data = &nsm_local_state,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table nlm_sysctl_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nfs",
.mode = 0555,
.child = nlm_sysctls,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table nlm_sysctl_root[] = {
{
- .ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = nlm_sysctl_dir,
},
- { .ctl_name = 0 }
+ { }
};
#endif /* CONFIG_SYSCTL */
diff --git a/fs/namespace.c b/fs/namespace.c
index bdc3cb4fd22..7d70d63ceb2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1921,6 +1921,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
+ /* ... and get the mountpoint */
+ retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
+ if (retval)
+ return retval;
+
+ retval = security_sb_mount(dev_name, &path,
+ type_page, flags, data_page);
+ if (retval)
+ goto dput_out;
+
/* Default to relatime unless overriden */
if (!(flags & MS_NOATIME))
mnt_flags |= MNT_RELATIME;
@@ -1945,16 +1955,6 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
- /* ... and get the mountpoint */
- retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
- if (retval)
- return retval;
-
- retval = security_sb_mount(dev_name, &path,
- type_page, flags, data_page);
- if (retval)
- goto dput_out;
-
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
data_page);
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 0d58caf4a6e..ec8f45f12e0 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -835,7 +835,7 @@ static int ncp_ioctl_need_write(unsigned int cmd)
case NCP_IOC_SETROOT:
return 0;
default:
- /* unkown IOCTL command, assume write */
+ /* unknown IOCTL command, assume write */
return 1;
}
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 70fad69eb95..fa588006588 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
BUG_ON(!cookie);
- if (fscache_check_page_write(cookie, page)) {
- if (!(gfp & __GFP_WAIT))
- return 0;
- fscache_wait_on_page_write(cookie, page);
- }
-
if (PageFsCache(page)) {
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
cookie, page, nfsi);
- fscache_uncache_page(cookie, page);
+ if (!fscache_maybe_release_page(cookie, page, gfp))
+ return 0;
+
nfs_add_fscache_stats(page->mapping->host,
NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index b62481dabae..70e1fbbaaea 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -22,63 +22,55 @@ static struct ctl_table_header *nfs_callback_sysctl_table;
static ctl_table nfs_cb_sysctls[] = {
#ifdef CONFIG_NFS_V4
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nfs_callback_tcpport",
.data = &nfs_callback_set_tcpport,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = (int *)&nfs_set_port_min,
.extra2 = (int *)&nfs_set_port_max,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "idmap_cache_timeout",
.data = &nfs_idmap_cache_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
#endif
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nfs_mountpoint_timeout",
.data = &nfs_mountpoint_expiry_timeout,
.maxlen = sizeof(nfs_mountpoint_expiry_timeout),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nfs_congestion_kb",
.data = &nfs_congestion_kb,
.maxlen = sizeof(nfs_congestion_kb),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table nfs_cb_sysctl_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "nfs",
.mode = 0555,
.child = nfs_cb_sysctls,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table nfs_cb_sysctl_root[] = {
{
- .ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = nfs_cb_sysctl_dir,
},
- { .ctl_name = 0 }
+ { }
};
int nfs_register_sysctl(void)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53eb26c16b5..c84b5cc1a94 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -178,7 +178,7 @@ static int wb_priority(struct writeback_control *wbc)
{
if (wbc->for_reclaim)
return FLUSH_HIGHPRI | FLUSH_STABLE;
- if (wbc->for_kupdate)
+ if (wbc->for_kupdate || wbc->for_background)
return FLUSH_LOWPRI;
return 0;
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index dcd2040d330..5ef5f365a5c 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -69,36 +69,30 @@ static int zero;
ctl_table inotify_table[] = {
{
- .ctl_name = INOTIFY_MAX_USER_INSTANCES,
.procname = "max_user_instances",
.data = &inotify_max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
- .ctl_name = INOTIFY_MAX_USER_WATCHES,
.procname = "max_user_watches",
.data = &inotify_max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
- .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
.procname = "max_queued_events",
.data = &inotify_max_queued_events,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
- { .ctl_name = 0 }
+ { }
};
#endif /* CONFIG_SYSCTL */
@@ -747,10 +741,6 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
- if (unlikely(ret))
- goto path_put_and_out;
-
-path_put_and_out:
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index 9669541d011..08f7530e934 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -927,7 +927,7 @@ lock_retry_remap:
return 0;
ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
- "EOVERFLOW" : (!err ? "EIO" : "unkown error"));
+ "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
return err < 0 ? err : -EIO;
read_err:
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 663c0e341f8..43179ddd336 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -399,7 +399,7 @@ static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
* @cached_page: allocated but as yet unused page
* @lru_pvec: lru-buffering pagevec of caller
*
- * Obtain @nr_pages locked page cache pages from the mapping @maping and
+ * Obtain @nr_pages locked page cache pages from the mapping @mapping and
* starting at index @index.
*
* If a page is newly created, increment its refcount and add it to the
@@ -1281,7 +1281,7 @@ rl_not_mapped_enoent:
/*
* Copy as much as we can into the pages and return the number of bytes which
- * were sucessfully copied. If a fault is encountered then clear the pages
+ * were successfully copied. If a fault is encountered then clear the pages
* out to (ofs + bytes) and return the number of bytes which were copied.
*/
static inline size_t ntfs_copy_from_user(struct page **pages,
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 89b02985c05..4dadcdf3d45 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -338,7 +338,7 @@ err_out:
* copy of the complete multi sector transfer deprotected page. On failure,
* *@wrp is undefined.
*
- * Simillarly, if @lsn is not NULL, on succes *@lsn will be set to the current
+ * Simillarly, if @lsn is not NULL, on success *@lsn will be set to the current
* logfile lsn according to this restart page. On failure, *@lsn is undefined.
*
* The following error codes are defined:
diff --git a/fs/ntfs/sysctl.c b/fs/ntfs/sysctl.c
index 9ef85e628fe..79a89184cb5 100644
--- a/fs/ntfs/sysctl.c
+++ b/fs/ntfs/sysctl.c
@@ -36,12 +36,11 @@
/* Definition of the ntfs sysctl. */
static ctl_table ntfs_sysctls[] = {
{
- .ctl_name = CTL_UNNUMBERED, /* Binary and text IDs. */
.procname = "ntfs-debug",
.data = &debug_msgs, /* Data pointer and size. */
.maxlen = sizeof(debug_msgs),
.mode = 0644, /* Mode, proc handler. */
- .proc_handler = &proc_dointvec
+ .proc_handler = proc_dointvec
},
{}
};
@@ -49,7 +48,6 @@ static ctl_table ntfs_sysctls[] = {
/* Define the parent directory /proc/sys/fs. */
static ctl_table sysctls_root[] = {
{
- .ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = ntfs_sysctls
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 38a42f5d59f..7c7198a5bc9 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -2398,7 +2398,7 @@ static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
*
* The array is assumed to be large enough to hold an entire path (tree depth).
*
- * Upon succesful return from this function:
+ * Upon successful return from this function:
*
* - The 'right_path' array will contain a path to the leaf block
* whose range contains e_cpos.
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index a1163b8b417..b7428c5d0d3 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -47,7 +47,7 @@
* Calculate the bit offset in the hamming code buffer based on the bit's
* offset in the data buffer. Since the hamming code reserves all
* power-of-two bits for parity, the data bit number and the code bit
- * number are offest by all the parity bits beforehand.
+ * number are offset by all the parity bits beforehand.
*
* Recall that bit numbers in hamming code are 1-based. This function
* takes the 0-based data bit from the caller.
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index da794bc07a6..a3f150e52b0 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -294,10 +294,10 @@ static int sc_seq_show(struct seq_file *seq, void *v)
if (sc->sc_sock) {
inet = inet_sk(sc->sc_sock->sk);
/* the stack's structs aren't sparse endian clean */
- saddr = (__force __be32)inet->saddr;
- daddr = (__force __be32)inet->daddr;
- sport = (__force __be16)inet->sport;
- dport = (__force __be16)inet->dport;
+ saddr = (__force __be32)inet->inet_saddr;
+ daddr = (__force __be32)inet->inet_daddr;
+ sport = (__force __be16)inet->inet_sport;
+ dport = (__force __be16)inet->inet_dport;
}
/* XXX sigh, inet-> doesn't have sparse annotation so any
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 83bcaf266b3..03ccf9a7b1f 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2586,7 +2586,7 @@ fail:
* is complete everywhere. if the target dies while this is
* going on, some nodes could potentially see the target as the
* master, so it is important that my recovery finds the migration
- * mle and sets the master to UNKNONWN. */
+ * mle and sets the master to UNKNOWN. */
/* wait for new node to assert master */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 0d38d67194c..c5e4a49e3a1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1855,7 +1855,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock)
* outstanding lock request, so a cancel convert is
* required. We intentionally overwrite 'ret' - if the
* cancel fails and the lock was granted, it's easier
- * to just bubble sucess back up to the user.
+ * to just bubble success back up to the user.
*/
ret = ocfs2_flock_handle_signal(lockres, level);
} else if (!ret && (level > lockres->l_level)) {
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 54c16b66327..bf34c491ae9 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -659,7 +659,7 @@ static int __ocfs2_journal_access(handle_t *handle,
default:
status = -EINVAL;
- mlog(ML_ERROR, "Uknown access type!\n");
+ mlog(ML_ERROR, "Unknown access type!\n");
}
if (!status && ocfs2_meta_ecc(osb) && triggers)
jbd2_journal_set_triggers(bh, &triggers->ot_triggers);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3a0df7a1b81..30967e3f5e4 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2431,7 +2431,7 @@ out:
* we gonna touch and whether we need to create new blocks.
*
* Normally the refcount blocks store these refcount should be
- * continguous also, so that we can get the number easily.
+ * contiguous also, so that we can get the number easily.
* As for meta_ac, we will at most add split 2 refcount record and
* 2 more refcount block, so just check it in a rough way.
*
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 3f2f1c45b7b..f3df0baa9a4 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -620,51 +620,46 @@ error:
static ctl_table ocfs2_nm_table[] = {
{
- .ctl_name = 1,
.procname = "hb_ctl_path",
.data = ocfs2_hb_ctl_path,
.maxlen = OCFS2_MAX_HB_CTL_PATH,
.mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
+ .proc_handler = proc_dostring,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table ocfs2_mod_table[] = {
{
- .ctl_name = FS_OCFS2_NM,
.procname = "nm",
.data = NULL,
.maxlen = 0,
.mode = 0555,
.child = ocfs2_nm_table
},
- { .ctl_name = 0}
+ { }
};
static ctl_table ocfs2_kern_table[] = {
{
- .ctl_name = FS_OCFS2,
.procname = "ocfs2",
.data = NULL,
.maxlen = 0,
.mode = 0555,
.child = ocfs2_mod_table
},
- { .ctl_name = 0}
+ { }
};
static ctl_table ocfs2_root_table[] = {
{
- .ctl_name = CTL_FS,
.procname = "fs",
.data = NULL,
.maxlen = 0,
.mode = 0555,
.child = ocfs2_kern_table
},
- { .ctl_name = 0 }
+ { }
};
static struct ctl_table_header *ocfs2_table_header = NULL;
diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c
index e1c0ec0ae98..082234581d0 100644
--- a/fs/omfs/bitmap.c
+++ b/fs/omfs/bitmap.c
@@ -85,7 +85,7 @@ out:
}
/*
- * Tries to allocate exactly one block. Returns true if sucessful.
+ * Tries to allocate exactly one block. Returns true if successful.
*/
int omfs_allocate_block(struct super_block *sb, u64 block)
{
diff --git a/fs/open.c b/fs/open.c
index 4f01e06227c..b4b31d277f3 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -587,6 +587,9 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
error = -EPERM;
if (!capable(CAP_SYS_CHROOT))
goto dput_and_out;
+ error = security_path_chroot(&path);
+ if (error)
+ goto dput_and_out;
set_fs_root(current->fs, &path);
error = 0;
@@ -617,11 +620,15 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
if (err)
goto out_putf;
mutex_lock(&inode->i_mutex);
+ err = security_path_chmod(dentry, file->f_vfsmnt, mode);
+ if (err)
+ goto out_unlock;
if (mode == (mode_t) -1)
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
err = notify_change(dentry, &newattrs);
+out_unlock:
mutex_unlock(&inode->i_mutex);
mnt_drop_write(file->f_path.mnt);
out_putf:
@@ -646,11 +653,15 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
if (error)
goto dput_and_out;
mutex_lock(&inode->i_mutex);
+ error = security_path_chmod(path.dentry, path.mnt, mode);
+ if (error)
+ goto out_unlock;
if (mode == (mode_t) -1)
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
error = notify_change(path.dentry, &newattrs);
+out_unlock:
mutex_unlock(&inode->i_mutex);
mnt_drop_write(path.mnt);
dput_and_out:
@@ -664,9 +675,9 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
return sys_fchmodat(AT_FDCWD, filename, mode);
}
-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
+static int chown_common(struct path *path, uid_t user, gid_t group)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = path->dentry->d_inode;
int error;
struct iattr newattrs;
@@ -683,7 +694,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
newattrs.ia_valid |=
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
mutex_lock(&inode->i_mutex);
- error = notify_change(dentry, &newattrs);
+ error = security_path_chown(path, user, group);
+ if (!error)
+ error = notify_change(path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
return error;
@@ -700,7 +713,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
- error = chown_common(path.dentry, user, group);
+ error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
@@ -725,7 +738,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
- error = chown_common(path.dentry, user, group);
+ error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
@@ -744,7 +757,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
error = mnt_want_write(path.mnt);
if (error)
goto out_release;
- error = chown_common(path.dentry, user, group);
+ error = chown_common(&path, user, group);
mnt_drop_write(path.mnt);
out_release:
path_put(&path);
@@ -767,7 +780,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
goto out_fput;
dentry = file->f_path.dentry;
audit_inode(NULL, dentry);
- error = chown_common(dentry, user, group);
+ error = chown_common(&file->f_path, user, group);
mnt_drop_write(file->f_path.mnt);
out_fput:
fput(file);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 7b685e10cba..64bc8998ac9 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -226,6 +226,13 @@ ssize_t part_alignment_offset_show(struct device *dev,
return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset);
}
+ssize_t part_discard_alignment_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%u\n", p->discard_alignment);
+}
+
ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -288,6 +295,8 @@ static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
+static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show,
+ NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -300,6 +309,7 @@ static struct attribute *part_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
&dev_attr_alignment_offset.attr,
+ &dev_attr_discard_alignment.attr,
&dev_attr_stat.attr,
&dev_attr_inflight.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -403,6 +413,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
p->start_sect = start;
p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
+ p->discard_alignment = queue_sector_discard_alignment(disk->queue,
+ start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 038a6022152..49cfd5f5423 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -1,7 +1,9 @@
/************************************************************
* EFI GUID Partition Table handling
- * Per Intel EFI Specification v1.02
- * http://developer.intel.com/technology/efi/efi.htm
+ *
+ * http://www.uefi.org/specs/
+ * http://www.intel.com/technology/efi/
+ *
* efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
* Copyright 2000,2001,2002,2004 Dell Inc.
*
@@ -92,6 +94,7 @@
*
************************************************************/
#include <linux/crc32.h>
+#include <linux/math64.h>
#include "check.h"
#include "efi.h"
@@ -141,7 +144,8 @@ last_lba(struct block_device *bdev)
{
if (!bdev || !bdev->bd_inode)
return 0;
- return (bdev->bd_inode->i_size >> 9) - 1ULL;
+ return div_u64(bdev->bd_inode->i_size,
+ bdev_logical_block_size(bdev)) - 1ULL;
}
static inline int
@@ -188,6 +192,7 @@ static size_t
read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
{
size_t totalreadcount = 0;
+ sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
if (!bdev || !buffer || lba > last_lba(bdev))
return 0;
@@ -195,7 +200,7 @@ read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
while (count) {
int copied = 512;
Sector sect;
- unsigned char *data = read_dev_sector(bdev, lba++, &sect);
+ unsigned char *data = read_dev_sector(bdev, n++, &sect);
if (!data)
break;
if (copied > count)
@@ -257,15 +262,16 @@ static gpt_header *
alloc_read_gpt_header(struct block_device *bdev, u64 lba)
{
gpt_header *gpt;
+ unsigned ssz = bdev_logical_block_size(bdev);
+
if (!bdev)
return NULL;
- gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL);
+ gpt = kzalloc(ssz, GFP_KERNEL);
if (!gpt)
return NULL;
- if (read_lba(bdev, lba, (u8 *) gpt,
- sizeof (gpt_header)) < sizeof (gpt_header)) {
+ if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) {
kfree(gpt);
gpt=NULL;
return NULL;
@@ -601,6 +607,7 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
gpt_header *gpt = NULL;
gpt_entry *ptes = NULL;
u32 i;
+ unsigned ssz = bdev_logical_block_size(bdev) / 512;
if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
kfree(gpt);
@@ -611,13 +618,14 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev)
pr_debug("GUID Partition Table is valid! Yea!\n");
for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
+ u64 start = le64_to_cpu(ptes[i].starting_lba);
+ u64 size = le64_to_cpu(ptes[i].ending_lba) -
+ le64_to_cpu(ptes[i].starting_lba) + 1ULL;
+
if (!is_pte_valid(&ptes[i], last_lba(bdev)))
continue;
- put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba),
- (le64_to_cpu(ptes[i].ending_lba) -
- le64_to_cpu(ptes[i].starting_lba) +
- 1ULL));
+ put_partition(state, i+1, start * ssz, size * ssz);
/* If this is a RAID volume, tell md */
if (!efi_guidcmp(ptes[i].partition_type_guid,
diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h
index 2cc89d0475b..6998b589abf 100644
--- a/fs/partitions/efi.h
+++ b/fs/partitions/efi.h
@@ -37,7 +37,6 @@
#define EFI_PMBR_OSTYPE_EFI 0xEF
#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
-#define GPT_BLOCK_SIZE 512
#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
#define GPT_HEADER_REVISION_V1 0x00010000
#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
@@ -79,7 +78,12 @@ typedef struct _gpt_header {
__le32 num_partition_entries;
__le32 sizeof_partition_entry;
__le32 partition_entry_array_crc32;
- u8 reserved2[GPT_BLOCK_SIZE - 92];
+
+ /* The rest of the logical block is reserved by UEFI and must be zero.
+ * EFI standard handles this by:
+ *
+ * uint8_t reserved2[ BlockSize - 92 ];
+ */
} __attribute__ ((packed)) gpt_header;
typedef struct _gpt_entry_attributes {
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 822c2d50651..4badde179b1 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -410,6 +410,16 @@ static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
}
#endif /* CONFIG_MMU */
+static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+{
+ seq_printf(m, "Cpus_allowed:\t");
+ seq_cpumask(m, &task->cpus_allowed);
+ seq_printf(m, "\n");
+ seq_printf(m, "Cpus_allowed_list:\t");
+ seq_cpumask_list(m, &task->cpus_allowed);
+ seq_printf(m, "\n");
+}
+
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
@@ -424,6 +434,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
}
task_sig(m, task);
task_cap(m, task);
+ task_cpus_allowed(m, task);
cpuset_task_status_allowed(m, task);
#if defined(CONFIG_S390)
task_show_regs(m, task);
@@ -495,20 +506,17 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
/* add up live thread stats at the group level */
if (whole) {
- struct task_cputime cputime;
struct task_struct *t = task;
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
- gtime = cputime_add(gtime, task_gtime(t));
+ gtime = cputime_add(gtime, t->gtime);
t = next_thread(t);
} while (t != task);
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
- thread_group_cputime(task, &cputime);
- utime = cputime.utime;
- stime = cputime.stime;
+ thread_group_times(task, &utime, &stime);
gtime = cputime_add(gtime, sig->gtime);
}
@@ -524,9 +532,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
if (!whole) {
min_flt = task->min_flt;
maj_flt = task->maj_flt;
- utime = task_utime(task);
- stime = task_stime(task);
- gtime = task_gtime(task);
+ task_times(task, &utime, &stime);
+ gtime = task->gtime;
}
/* scale priority and nice values from timeslices to -20..20 */
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f667e8aeabd..6ff9981f0a1 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -48,7 +48,7 @@ out:
static struct ctl_table *find_in_table(struct ctl_table *p, struct qstr *name)
{
int len;
- for ( ; p->ctl_name || p->procname; p++) {
+ for ( ; p->procname; p++) {
if (!p->procname)
continue;
@@ -218,7 +218,7 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
void *dirent, filldir_t filldir)
{
- for (; table->ctl_name || table->procname; table++, (*pos)++) {
+ for (; table->procname; table++, (*pos)++) {
int res;
/* Can't do anything without a proc name */
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 7cc726c6d70..b9b7aad2003 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -27,7 +27,7 @@ static int show_stat(struct seq_file *p, void *v)
int i, j;
unsigned long jif;
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
- cputime64_t guest;
+ cputime64_t guest, guest_nice;
u64 sum = 0;
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
@@ -36,7 +36,7 @@ static int show_stat(struct seq_file *p, void *v)
user = nice = system = idle = iowait =
irq = softirq = steal = cputime64_zero;
- guest = cputime64_zero;
+ guest = guest_nice = cputime64_zero;
getboottime(&boottime);
jif = boottime.tv_sec;
@@ -51,6 +51,8 @@ static int show_stat(struct seq_file *p, void *v)
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
+ guest_nice = cputime64_add(guest_nice,
+ kstat_cpu(i).cpustat.guest_nice);
for_each_irq_nr(j) {
sum += kstat_irqs_cpu(j, i);
}
@@ -65,7 +67,8 @@ static int show_stat(struct seq_file *p, void *v)
}
sum += arch_irq_stat();
- seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu "
+ "%llu\n",
(unsigned long long)cputime64_to_clock_t(user),
(unsigned long long)cputime64_to_clock_t(nice),
(unsigned long long)cputime64_to_clock_t(system),
@@ -74,7 +77,8 @@ static int show_stat(struct seq_file *p, void *v)
(unsigned long long)cputime64_to_clock_t(irq),
(unsigned long long)cputime64_to_clock_t(softirq),
(unsigned long long)cputime64_to_clock_t(steal),
- (unsigned long long)cputime64_to_clock_t(guest));
+ (unsigned long long)cputime64_to_clock_t(guest),
+ (unsigned long long)cputime64_to_clock_t(guest_nice));
for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
@@ -88,8 +92,10 @@ static int show_stat(struct seq_file *p, void *v)
softirq = kstat_cpu(i).cpustat.softirq;
steal = kstat_cpu(i).cpustat.steal;
guest = kstat_cpu(i).cpustat.guest;
+ guest_nice = kstat_cpu(i).cpustat.guest_nice;
seq_printf(p,
- "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
+ "%llu\n",
i,
(unsigned long long)cputime64_to_clock_t(user),
(unsigned long long)cputime64_to_clock_t(nice),
@@ -99,7 +105,8 @@ static int show_stat(struct seq_file *p, void *v)
(unsigned long long)cputime64_to_clock_t(irq),
(unsigned long long)cputime64_to_clock_t(softirq),
(unsigned long long)cputime64_to_clock_t(steal),
- (unsigned long long)cputime64_to_clock_t(guest));
+ (unsigned long long)cputime64_to_clock_t(guest),
+ (unsigned long long)cputime64_to_clock_t(guest_nice));
}
seq_printf(p, "intr %llu", (unsigned long long)sum);
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 0afba069d56..32f5d131a64 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -67,7 +67,7 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb)
while (total < size) {
if ((bh = sb_bread(sb, start + offset)) == NULL) {
- printk("qnx4: I/O error in counting free blocks\n");
+ printk(KERN_ERR "qnx4: I/O error in counting free blocks\n");
break;
}
count_bits(bh->b_data, size - total, &total_free);
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 86cc39cb139..6f30c3d5bcb 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -26,8 +26,8 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
int ix, ino;
int size;
- QNX4DEBUG(("qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
- QNX4DEBUG(("filp->f_pos = %ld\n", (long) filp->f_pos));
+ QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
+ QNX4DEBUG((KERN_INFO "filp->f_pos = %ld\n", (long) filp->f_pos));
lock_kernel();
@@ -50,7 +50,7 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
size = QNX4_NAME_MAX;
if ( ( de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK) ) != 0 ) {
- QNX4DEBUG(("qnx4_readdir:%.*s\n", size, de->di_fname));
+ QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
if ( ( de->di_status & QNX4_FILE_LINK ) == 0 )
ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
else {
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index d2cd1798d8c..449f5a66dd3 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -107,7 +107,7 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h
{
unsigned long phys;
- QNX4DEBUG(("qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
+ QNX4DEBUG((KERN_INFO "qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
phys = qnx4_block_map( inode, iblock );
if ( phys ) {
@@ -142,12 +142,12 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
// read next xtnt block.
bh = sb_bread(inode->i_sb, i_xblk - 1);
if ( !bh ) {
- QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
+ QNX4DEBUG((KERN_ERR "qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
return -EIO;
}
xblk = (struct qnx4_xblk*)bh->b_data;
if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
- QNX4DEBUG(("qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
+ QNX4DEBUG((KERN_ERR "qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
return -EIO;
}
}
@@ -168,7 +168,7 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
brelse( bh );
}
- QNX4DEBUG(("qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
+ QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
return block;
}
@@ -209,7 +209,7 @@ static const char *qnx4_checkroot(struct super_block *sb)
if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') {
return "no qnx4 filesystem (no root dir).";
} else {
- QNX4DEBUG(("QNX4 filesystem found on dev %s.\n", sb->s_id));
+ QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
for (j = 0; j < rl; j++) {
@@ -220,7 +220,7 @@ static const char *qnx4_checkroot(struct super_block *sb)
for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) {
rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
if (rootdir->di_fname != NULL) {
- QNX4DEBUG(("Rootdir entry found : [%s]\n", rootdir->di_fname));
+ QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
if (!strncmp(rootdir->di_fname, QNX4_BMNAME, sizeof QNX4_BMNAME)) {
found = 1;
qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
@@ -265,12 +265,12 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
if we don't belong here... */
bh = sb_bread(s, 1);
if (!bh) {
- printk("qnx4: unable to read the superblock\n");
+ printk(KERN_ERR "qnx4: unable to read the superblock\n");
goto outnobh;
}
if ( le32_to_cpup((__le32*) bh->b_data) != QNX4_SUPER_MAGIC ) {
if (!silent)
- printk("qnx4: wrong fsid in superblock.\n");
+ printk(KERN_ERR "qnx4: wrong fsid in superblock.\n");
goto out;
}
s->s_op = &qnx4_sops;
@@ -284,14 +284,14 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
errmsg = qnx4_checkroot(s);
if (errmsg != NULL) {
if (!silent)
- printk("qnx4: %s\n", errmsg);
+ printk(KERN_ERR "qnx4: %s\n", errmsg);
goto out;
}
/* does root not have inode number QNX4_ROOT_INO ?? */
root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
if (IS_ERR(root)) {
- printk("qnx4: get inode failed\n");
+ printk(KERN_ERR "qnx4: get inode failed\n");
ret = PTR_ERR(root);
goto out;
}
@@ -374,7 +374,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
qnx4_inode = qnx4_raw_inode(inode);
inode->i_mode = 0;
- QNX4DEBUG(("Reading inode : [%d]\n", ino));
+ QNX4DEBUG((KERN_INFO "reading inode : [%d]\n", ino));
if (!ino) {
printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
"out of range\n",
@@ -385,7 +385,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
block = ino / QNX4_INODES_PER_BLOCK;
if (!(bh = sb_bread(sb, block))) {
- printk("qnx4: major problem: unable to read inode from dev "
+ printk(KERN_ERR "qnx4: major problem: unable to read inode from dev "
"%s\n", sb->s_id);
iget_failed(inode);
return ERR_PTR(-EIO);
@@ -499,7 +499,7 @@ static int __init init_qnx4_fs(void)
return err;
}
- printk("QNX4 filesystem 0.2.3 registered.\n");
+ printk(KERN_INFO "QNX4 filesystem 0.2.3 registered.\n");
return 0;
}
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index ae1e7edbacd..58703ebba87 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -30,7 +30,7 @@ static int qnx4_match(int len, const char *name,
int namelen, thislen;
if (bh == NULL) {
- printk("qnx4: matching unassigned buffer !\n");
+ printk(KERN_WARNING "qnx4: matching unassigned buffer !\n");
return 0;
}
de = (struct qnx4_inode_entry *) (bh->b_data + *offset);
@@ -66,7 +66,7 @@ static struct buffer_head *qnx4_find_entry(int len, struct inode *dir,
*res_dir = NULL;
if (!dir->i_sb) {
- printk("qnx4: no superblock on dir.\n");
+ printk(KERN_WARNING "qnx4: no superblock on dir.\n");
return NULL;
}
bh = NULL;
@@ -124,7 +124,7 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam
foundinode = qnx4_iget(dir->i_sb, ino);
if (IS_ERR(foundinode)) {
unlock_kernel();
- QNX4DEBUG(("qnx4: lookup->iget -> error %ld\n",
+ QNX4DEBUG((KERN_ERR "qnx4: lookup->iget -> error %ld\n",
PTR_ERR(foundinode)));
return ERR_CAST(foundinode);
}
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 8047e01ef46..353e78a9ebe 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -17,7 +17,7 @@ config QUOTA
config QUOTA_NETLINK_INTERFACE
bool "Report quota messages through netlink interface"
- depends on QUOTA && NET
+ depends on QUOTACTL && NET
help
If you say Y here, quota warnings (about exceeding softlimit, reaching
hardlimit, etc.) will be reported through netlink interface. If unsure,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 39b49c42a7e..eb5a755718f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -77,10 +77,6 @@
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-#include <net/netlink.h>
-#include <net/genetlink.h>
-#endif
#include <asm/uaccess.h>
@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
}
#endif
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
-
-/* Netlink family structure for quota */
-static struct genl_family quota_genl_family = {
- .id = GENL_ID_GENERATE,
- .hdrsize = 0,
- .name = "VFS_DQUOT",
- .version = 1,
- .maxattr = QUOTA_NL_A_MAX,
-};
-
-/* Send warning to userspace about user which exceeded quota */
-static void send_warning(const struct dquot *dquot, const char warntype)
-{
- static atomic_t seq;
- struct sk_buff *skb;
- void *msg_head;
- int ret;
- int msg_size = 4 * nla_total_size(sizeof(u32)) +
- 2 * nla_total_size(sizeof(u64));
-
- /* We have to allocate using GFP_NOFS as we are called from a
- * filesystem performing write and thus further recursion into
- * the fs to free some data could cause deadlocks. */
- skb = genlmsg_new(msg_size, GFP_NOFS);
- if (!skb) {
- printk(KERN_ERR
- "VFS: Not enough memory to send quota warning.\n");
- return;
- }
- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
- &quota_genl_family, 0, QUOTA_NL_C_WARNING);
- if (!msg_head) {
- printk(KERN_ERR
- "VFS: Cannot store netlink header in quota warning.\n");
- goto err_out;
- }
- ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
- if (ret)
- goto attr_err_out;
- ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
- if (ret)
- goto attr_err_out;
- ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
- if (ret)
- goto attr_err_out;
- ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
- MAJOR(dquot->dq_sb->s_dev));
- if (ret)
- goto attr_err_out;
- ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
- MINOR(dquot->dq_sb->s_dev));
- if (ret)
- goto attr_err_out;
- ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
- if (ret)
- goto attr_err_out;
- genlmsg_end(skb, msg_head);
-
- genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
- return;
-attr_err_out:
- printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
-err_out:
- kfree_skb(skb);
-}
-#endif
/*
* Write warnings to the console and send warning messages over netlink.
*
@@ -1145,18 +1074,20 @@ err_out:
*/
static void flush_warnings(struct dquot *const *dquots, char *warntype)
{
+ struct dquot *dq;
int i;
- for (i = 0; i < MAXQUOTAS; i++)
- if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
- !warning_issued(dquots[i], warntype[i])) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ dq = dquots[i];
+ if (dq && warntype[i] != QUOTA_NL_NOWARN &&
+ !warning_issued(dq, warntype[i])) {
#ifdef CONFIG_PRINT_QUOTA_WARNING
- print_warning(dquots[i], warntype[i]);
-#endif
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
- send_warning(dquots[i], warntype[i]);
+ print_warning(dq, warntype[i]);
#endif
+ quota_send_warning(dq->dq_type, dq->dq_id,
+ dq->dq_sb->s_dev, warntype[i]);
}
+ }
}
static int ignore_hardlimit(struct dquot *dquot)
@@ -2473,100 +2404,89 @@ const struct quotactl_ops vfs_quotactl_ops = {
static ctl_table fs_dqstats_table[] = {
{
- .ctl_name = FS_DQ_LOOKUPS,
.procname = "lookups",
.data = &dqstats.lookups,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_DROPS,
.procname = "drops",
.data = &dqstats.drops,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_READS,
.procname = "reads",
.data = &dqstats.reads,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_WRITES,
.procname = "writes",
.data = &dqstats.writes,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_CACHE_HITS,
.procname = "cache_hits",
.data = &dqstats.cache_hits,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_ALLOCATED,
.procname = "allocated_dquots",
.data = &dqstats.allocated_dquots,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_FREE,
.procname = "free_dquots",
.data = &dqstats.free_dquots,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = FS_DQ_SYNCS,
.procname = "syncs",
.data = &dqstats.syncs,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#ifdef CONFIG_PRINT_QUOTA_WARNING
{
- .ctl_name = FS_DQ_WARNINGS,
.procname = "warnings",
.data = &flag_print_warnings,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
#endif
- { .ctl_name = 0 },
+ { },
};
static ctl_table fs_table[] = {
{
- .ctl_name = FS_DQSTATS,
.procname = "quota",
.mode = 0555,
.child = fs_dqstats_table,
},
- { .ctl_name = 0 },
+ { },
};
static ctl_table sys_table[] = {
{
- .ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = fs_table,
},
- { .ctl_name = 0 },
+ { },
};
static int __init dquot_init(void)
@@ -2607,12 +2527,6 @@ static int __init dquot_init(void)
register_shrinker(&dqcache_shrinker);
-#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
- if (genl_register_family(&quota_genl_family) != 0)
- printk(KERN_ERR
- "VFS: Failed to create quota netlink interface.\n");
-#endif
-
return 0;
}
module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95c5b42384b..ee91e275695 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,8 @@
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
/* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
return ret;
}
#endif
+
+
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+
+/* Netlink family structure for quota */
+static struct genl_family quota_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = 0,
+ .name = "VFS_DQUOT",
+ .version = 1,
+ .maxattr = QUOTA_NL_A_MAX,
+};
+
+/**
+ * quota_send_warning - Send warning to userspace about exceeded quota
+ * @type: The quota type: USRQQUOTA, GRPQUOTA,...
+ * @id: The user or group id of the quota that was exceeded
+ * @dev: The device on which the fs is mounted (sb->s_dev)
+ * @warntype: The type of the warning: QUOTA_NL_...
+ *
+ * This can be used by filesystems (including those which don't use
+ * dquot) to send a message to userspace relating to quota limits.
+ *
+ */
+
+void quota_send_warning(short type, unsigned int id, dev_t dev,
+ const char warntype)
+{
+ static atomic_t seq;
+ struct sk_buff *skb;
+ void *msg_head;
+ int ret;
+ int msg_size = 4 * nla_total_size(sizeof(u32)) +
+ 2 * nla_total_size(sizeof(u64));
+
+ /* We have to allocate using GFP_NOFS as we are called from a
+ * filesystem performing write and thus further recursion into
+ * the fs to free some data could cause deadlocks. */
+ skb = genlmsg_new(msg_size, GFP_NOFS);
+ if (!skb) {
+ printk(KERN_ERR
+ "VFS: Not enough memory to send quota warning.\n");
+ return;
+ }
+ msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
+ &quota_genl_family, 0, QUOTA_NL_C_WARNING);
+ if (!msg_head) {
+ printk(KERN_ERR
+ "VFS: Cannot store netlink header in quota warning.\n");
+ goto err_out;
+ }
+ ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
+ if (ret)
+ goto attr_err_out;
+ ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
+ if (ret)
+ goto attr_err_out;
+ genlmsg_end(skb, msg_head);
+
+ genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
+ return;
+attr_err_out:
+ printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
+err_out:
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(quota_send_warning);
+
+static int __init quota_init(void)
+{
+ if (genl_register_family(&quota_genl_family) != 0)
+ printk(KERN_ERR
+ "VFS: Failed to create quota netlink interface.\n");
+ return 0;
+};
+
+module_init(quota_init);
+#endif
+
diff --git a/fs/read_write.c b/fs/read_write.c
index 3ac28987f22..b7f4a1f94d4 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -826,8 +826,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
if (!(out_file->f_mode & FMODE_WRITE))
goto fput_out;
retval = -EINVAL;
- if (!out_file->f_op || !out_file->f_op->sendpage)
- goto fput_out;
in_inode = in_file->f_path.dentry->d_inode;
out_inode = out_file->f_path.dentry->d_inode;
retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 7c5ab6330dd..6a9e30c041d 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
hashes.o tail_conversion.o journal.o resize.o \
- item_ops.o ioctl.o procfs.o xattr.o
+ item_ops.o ioctl.o procfs.o xattr.o lock.o
ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index e716161ab32..68549570718 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1249,14 +1249,18 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
else if (bitmap == 0)
block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, block);
+ reiserfs_write_lock(sb);
if (bh == NULL)
reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
"reading failed", __func__, block);
else {
if (buffer_locked(bh)) {
PROC_INFO_INC(sb, scan_bitmap.wait);
+ reiserfs_write_unlock(sb);
__wait_on_buffer(bh);
+ reiserfs_write_lock(sb);
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 6d2668fdc38..c094f58c744 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -20,7 +20,7 @@ const struct file_operations reiserfs_dir_operations = {
.read = generic_read_dir,
.readdir = reiserfs_readdir,
.fsync = reiserfs_dir_fsync,
- .ioctl = reiserfs_ioctl,
+ .unlocked_ioctl = reiserfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = reiserfs_compat_ioctl,
#endif
@@ -174,14 +174,22 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
// user space buffer is swapped out. At that time
// entry can move to somewhere else
memcpy(local_buf, d_name, d_reclen);
+
+ /*
+ * Since filldir might sleep, we can release
+ * the write lock here for other waiters
+ */
+ reiserfs_write_unlock(inode->i_sb);
if (filldir
(dirent, local_buf, d_reclen, d_off, d_ino,
DT_UNKNOWN) < 0) {
+ reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
}
goto end;
}
+ reiserfs_write_lock(inode->i_sb);
if (local_buf != small_buf) {
kfree(local_buf);
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 128d3f7c8aa..60c08044066 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -21,14 +21,6 @@
#include <linux/buffer_head.h>
#include <linux/kernel.h>
-#ifdef CONFIG_REISERFS_CHECK
-
-struct tree_balance *cur_tb = NULL; /* detects whether more than one
- copy of tb exists as a means
- of checking whether schedule
- is interrupting do_balance */
-#endif
-
static inline void buffer_info_init_left(struct tree_balance *tb,
struct buffer_info *bi)
{
@@ -1840,11 +1832,12 @@ static int check_before_balancing(struct tree_balance *tb)
{
int retval = 0;
- if (cur_tb) {
+ if (REISERFS_SB(tb->tb_sb)->cur_tb) {
reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
"occurred based on cur_tb not being null at "
"this point in code. do_balance cannot properly "
- "handle schedule occurring while it runs.");
+ "handle concurrent tree accesses on a same "
+ "mount point.");
}
/* double check that buffers that we will modify are unlocked. (fix_nodes should already have
@@ -1986,7 +1979,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
"check");*/
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
#ifdef CONFIG_REISERFS_CHECK
- cur_tb = tb;
+ REISERFS_SB(tb->tb_sb)->cur_tb = tb;
#endif
}
@@ -1996,7 +1989,7 @@ static inline void do_balance_completed(struct tree_balance *tb)
#ifdef CONFIG_REISERFS_CHECK
check_leaf_level(tb);
check_internal_levels(tb);
- cur_tb = NULL;
+ REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
#endif
/* reiserfs_free_block is no longer schedule safe. So, we need to
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9f436668b7f..da2dba082e2 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -284,7 +284,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
const struct file_operations reiserfs_file_operations = {
.read = do_sync_read,
.write = reiserfs_file_write,
- .ioctl = reiserfs_ioctl,
+ .unlocked_ioctl = reiserfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = reiserfs_compat_ioctl,
#endif
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 5e5a4e6fbaf..6591cb21edf 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -563,9 +563,6 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
return needed_nodes;
}
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
/* Set parameters for balancing.
* Performs write of results of analysis of balancing into structure tb,
@@ -834,7 +831,7 @@ static int get_empty_nodes(struct tree_balance *tb, int h)
RFALSE(buffer_dirty(new_bh) ||
buffer_journaled(new_bh) ||
buffer_journal_dirty(new_bh),
- "PAP-8140: journlaled or dirty buffer %b for the new block",
+ "PAP-8140: journaled or dirty buffer %b for the new block",
new_bh);
/* Put empty buffers into the array. */
@@ -1022,7 +1019,11 @@ static int get_far_parent(struct tree_balance *tb,
/* Check whether the common parent is locked. */
if (buffer_locked(*pcom_father)) {
+
+ /* Release the write lock while the buffer is busy */
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(*pcom_father);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(*pcom_father);
return REPEAT_SEARCH;
@@ -1927,7 +1928,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(bh);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -1965,7 +1968,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
FL[h]);
son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
+ reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2003,7 +2008,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
child_position =
(bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
+ reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
+ reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2278,7 +2285,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
REPEAT_SEARCH : CARRY_ON;
}
#endif
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(locked);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -2349,12 +2358,14 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
/* if it possible in indirect_to_direct conversion */
if (buffer_locked(tbS0)) {
+ reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(tbS0);
+ reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
#ifdef CONFIG_REISERFS_CHECK
- if (cur_tb) {
+ if (REISERFS_SB(tb->tb_sb)->cur_tb) {
print_cur_tb("fix_nodes");
reiserfs_panic(tb->tb_sb, "PAP-8305",
"there is pending do_balance");
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a14d6cd9eed..3a28e7751b3 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -251,7 +251,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
struct cpu_key key;
struct buffer_head *bh;
struct item_head *ih, tmp_ih;
- int fs_gen;
b_blocknr_t blocknr;
char *p = NULL;
int chars;
@@ -265,7 +264,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
(loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
3);
- research:
result = search_for_position_by_key(inode->i_sb, &key, &path);
if (result != POSITION_FOUND) {
pathrelse(&path);
@@ -340,7 +338,6 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
}
// read file tail into part of page
offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1);
- fs_gen = get_generation(inode->i_sb);
copy_item_head(&tmp_ih, ih);
/* we only want to kmap if we are reading the tail into the page.
@@ -348,13 +345,9 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
** sure we need to. But, this means the item might move if
** kmap schedules
*/
- if (!p) {
+ if (!p)
p = (char *)kmap(bh_result->b_page);
- if (fs_changed(fs_gen, inode->i_sb)
- && item_moved(&tmp_ih, &path)) {
- goto research;
- }
- }
+
p += offset;
memset(p, 0, inode->i_sb->s_blocksize);
do {
@@ -489,10 +482,14 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
disappeared */
if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
int err;
- lock_kernel();
+
+ reiserfs_write_lock(inode->i_sb);
+
err = reiserfs_commit_for_inode(inode);
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
- unlock_kernel();
+
+ reiserfs_write_unlock(inode->i_sb);
+
if (err < 0)
ret = err;
}
@@ -601,6 +598,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
__le32 *item;
int done;
int fs_gen;
+ int lock_depth;
struct reiserfs_transaction_handle *th = NULL;
/* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion
@@ -616,12 +614,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
loff_t new_offset =
(((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
- /* bad.... */
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
version = get_inode_item_key_version(inode);
if (!file_capable(inode, block)) {
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
return -EFBIG;
}
@@ -633,7 +630,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* find number of block-th logical block of the file */
ret = _get_block_create_0(inode, block, bh_result,
create | GET_BLOCK_READ_DIRECT);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
return ret;
}
/*
@@ -751,7 +748,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (!dangle && th)
retval = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
/* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this
@@ -935,7 +932,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (blocks_needed == 1) {
un = &unf_single;
} else {
- un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC); // We need to avoid scheduling.
+ un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_NOFS);
if (!un) {
un = &unf_single;
blocks_needed = 1;
@@ -997,10 +994,16 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (retval)
goto failure;
}
- /* inserting indirect pointers for a hole can take a
- ** long time. reschedule if needed
+ /*
+ * inserting indirect pointers for a hole can take a
+ * long time. reschedule if needed and also release the write
+ * lock for others.
*/
- cond_resched();
+ if (need_resched()) {
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ schedule();
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ }
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval == IO_ERROR) {
@@ -1035,7 +1038,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
retval = err;
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
reiserfs_check_path(&path);
return retval;
}
@@ -2072,8 +2075,9 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
int error;
struct buffer_head *bh = NULL;
int err2;
+ int lock_depth;
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
if (inode->i_size > 0) {
error = grab_tail_page(inode, &page, &bh);
@@ -2142,14 +2146,17 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+
return 0;
out:
if (page) {
unlock_page(page);
page_cache_release(page);
}
- reiserfs_write_unlock(inode->i_sb);
+
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+
return error;
}
@@ -2608,7 +2615,10 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
int ret;
int old_ref = 0;
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
fix_tail_page_for_writing(page);
if (reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th;
@@ -2664,6 +2674,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
int update_sd = 0;
struct reiserfs_transaction_handle *th;
unsigned start;
+ int lock_depth = 0;
+ bool locked = false;
if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
pos ++;
@@ -2690,9 +2702,11 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
** to do the i_size updates here.
*/
pos += copied;
+
if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ locked = true;
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
packing */
@@ -2703,10 +2717,9 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
ret = journal_begin(&myth, inode->i_sb, 1);
- if (ret) {
- reiserfs_write_unlock(inode->i_sb);
+ if (ret)
goto journal_error;
- }
+
reiserfs_update_inode_transaction(inode);
inode->i_size = pos;
/*
@@ -2718,34 +2731,36 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
reiserfs_update_sd(&myth, inode);
update_sd = 1;
ret = journal_end(&myth, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto journal_error;
}
if (th) {
- reiserfs_write_lock(inode->i_sb);
+ if (!locked) {
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ locked = true;
+ }
if (!update_sd)
mark_inode_dirty(inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto out;
}
out:
+ if (locked)
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
unlock_page(page);
page_cache_release(page);
return ret == 0 ? copied : ret;
journal_error:
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ locked = false;
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
reiserfs_update_sd(th, inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
}
-
goto out;
}
@@ -2758,7 +2773,10 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int update_sd = 0;
struct reiserfs_transaction_handle *th = NULL;
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
if (reiserfs_transaction_running(inode->i_sb)) {
th = current->journal_info;
}
@@ -2770,7 +2788,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
*/
if (pos > inode->i_size) {
struct reiserfs_transaction_handle myth;
- reiserfs_write_lock(inode->i_sb);
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
packing */
@@ -2781,10 +2798,9 @@ int reiserfs_commit_write(struct file *f, struct page *page,
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
ret = journal_begin(&myth, inode->i_sb, 1);
- if (ret) {
- reiserfs_write_unlock(inode->i_sb);
+ if (ret)
goto journal_error;
- }
+
reiserfs_update_inode_transaction(inode);
inode->i_size = pos;
/*
@@ -2796,16 +2812,13 @@ int reiserfs_commit_write(struct file *f, struct page *page,
reiserfs_update_sd(&myth, inode);
update_sd = 1;
ret = journal_end(&myth, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto journal_error;
}
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
mark_inode_dirty(inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
if (ret)
goto out;
}
@@ -2815,11 +2828,9 @@ int reiserfs_commit_write(struct file *f, struct page *page,
journal_error:
if (th) {
- reiserfs_write_lock(inode->i_sb);
if (!update_sd)
reiserfs_update_sd(th, inode);
ret = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock(inode->i_sb);
}
return ret;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 0ccc3fdda7b..ace77451ceb 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -13,44 +13,52 @@
#include <linux/compat.h>
/*
-** reiserfs_ioctl - handler for ioctl for inode
-** supported commands:
-** 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
-** and prevent packing file (argument arg has to be non-zero)
-** 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
-** 3) That's all for a while ...
-*/
-int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+ * reiserfs_ioctl - handler for ioctl for inode
+ * supported commands:
+ * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
+ * and prevent packing file (argument arg has to be non-zero)
+ * 2) REISERFS_IOC_[GS]ETFLAGS, REISERFS_IOC_[GS]ETVERSION
+ * 3) That's all for a while ...
+ */
+long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_path.dentry->d_inode;
unsigned int flags;
int err = 0;
+ reiserfs_write_lock(inode->i_sb);
+
switch (cmd) {
case REISERFS_IOC_UNPACK:
if (S_ISREG(inode->i_mode)) {
if (arg)
- return reiserfs_unpack(inode, filp);
- else
- return 0;
+ err = reiserfs_unpack(inode, filp);
} else
- return -ENOTTY;
- /* following two cases are taken from fs/ext2/ioctl.c by Remy
- Card (card@masi.ibp.fr) */
+ err = -ENOTTY;
+ break;
+ /*
+ * following two cases are taken from fs/ext2/ioctl.c by Remy
+ * Card (card@masi.ibp.fr)
+ */
case REISERFS_IOC_GETFLAGS:
- if (!reiserfs_attrs(inode->i_sb))
- return -ENOTTY;
+ if (!reiserfs_attrs(inode->i_sb)) {
+ err = -ENOTTY;
+ break;
+ }
flags = REISERFS_I(inode)->i_attrs;
i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
- return put_user(flags, (int __user *)arg);
+ err = put_user(flags, (int __user *)arg);
+ break;
case REISERFS_IOC_SETFLAGS:{
- if (!reiserfs_attrs(inode->i_sb))
- return -ENOTTY;
+ if (!reiserfs_attrs(inode->i_sb)) {
+ err = -ENOTTY;
+ break;
+ }
err = mnt_want_write(filp->f_path.mnt);
if (err)
- return err;
+ break;
if (!is_owner_or_cap(inode)) {
err = -EPERM;
@@ -90,16 +98,18 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
mark_inode_dirty(inode);
setflags_out:
mnt_drop_write(filp->f_path.mnt);
- return err;
+ break;
}
case REISERFS_IOC_GETVERSION:
- return put_user(inode->i_generation, (int __user *)arg);
+ err = put_user(inode->i_generation, (int __user *)arg);
+ break;
case REISERFS_IOC_SETVERSION:
if (!is_owner_or_cap(inode))
- return -EPERM;
+ err = -EPERM;
+ break;
err = mnt_want_write(filp->f_path.mnt);
if (err)
- return err;
+ break;
if (get_user(inode->i_generation, (int __user *)arg)) {
err = -EFAULT;
goto setversion_out;
@@ -108,19 +118,20 @@ setflags_out:
mark_inode_dirty(inode);
setversion_out:
mnt_drop_write(filp->f_path.mnt);
- return err;
+ break;
default:
- return -ENOTTY;
+ err = -ENOTTY;
}
+
+ reiserfs_write_unlock(inode->i_sb);
+
+ return err;
}
#ifdef CONFIG_COMPAT
long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- int ret;
-
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
case REISERFS_IOC32_UNPACK:
@@ -141,10 +152,8 @@ long reiserfs_compat_ioctl(struct file *file, unsigned int cmd,
default:
return -ENOIOCTLCMD;
}
- lock_kernel();
- ret = reiserfs_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
- unlock_kernel();
- return ret;
+
+ return reiserfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 90622200b39..2f8a7e7b8da 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -429,21 +429,6 @@ static void clear_prepared_bits(struct buffer_head *bh)
clear_buffer_journal_restore_dirty(bh);
}
-/* utility function to force a BUG if it is called without the big
-** kernel lock held. caller is the string printed just before calling BUG()
-*/
-void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
-{
-#ifdef CONFIG_SMP
- if (current->lock_depth < 0) {
- reiserfs_panic(sb, "journal-1", "%s called without kernel "
- "lock held", caller);
- }
-#else
- ;
-#endif
-}
-
/* return a cnode with same dev, block number and size in table, or null if not found */
static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
super_block
@@ -556,7 +541,8 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
static inline void lock_journal(struct super_block *sb)
{
PROC_INFO_INC(sb, journal.lock_journal);
- mutex_lock(&SB_JOURNAL(sb)->j_mutex);
+
+ reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
}
/* unlock the current transaction */
@@ -708,7 +694,9 @@ static void check_barrier_completion(struct super_block *s,
disable_barrier(s);
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
+ reiserfs_write_unlock(s);
sync_dirty_buffer(bh);
+ reiserfs_write_lock(s);
}
}
@@ -996,8 +984,13 @@ static int reiserfs_async_progress_wait(struct super_block *s)
{
DEFINE_WAIT(wait);
struct reiserfs_journal *j = SB_JOURNAL(s);
- if (atomic_read(&j->j_async_throttle))
+
+ if (atomic_read(&j->j_async_throttle)) {
+ reiserfs_write_unlock(s);
congestion_wait(BLK_RW_ASYNC, HZ / 10);
+ reiserfs_write_lock(s);
+ }
+
return 0;
}
@@ -1043,7 +1036,8 @@ static int flush_commit_list(struct super_block *s,
}
/* make sure nobody is trying to flush this one at the same time */
- mutex_lock(&jl->j_commit_mutex);
+ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
+
if (!journal_list_still_alive(s, trans_id)) {
mutex_unlock(&jl->j_commit_mutex);
goto put_jl;
@@ -1061,12 +1055,17 @@ static int flush_commit_list(struct super_block *s,
if (!list_empty(&jl->j_bh_list)) {
int ret;
- unlock_kernel();
+
+ /*
+ * We might sleep in numerous places inside
+ * write_ordered_buffers. Relax the write lock.
+ */
+ reiserfs_write_unlock(s);
ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_bh_list);
if (ret < 0 && retval == 0)
retval = ret;
- lock_kernel();
+ reiserfs_write_lock(s);
}
BUG_ON(!list_empty(&jl->j_bh_list));
/*
@@ -1085,8 +1084,11 @@ static int flush_commit_list(struct super_block *s,
SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
if (tbh) {
- if (buffer_dirty(tbh))
- ll_rw_block(WRITE, 1, &tbh) ;
+ if (buffer_dirty(tbh)) {
+ reiserfs_write_unlock(s);
+ ll_rw_block(WRITE, 1, &tbh);
+ reiserfs_write_lock(s);
+ }
put_bh(tbh) ;
}
}
@@ -1114,12 +1116,19 @@ static int flush_commit_list(struct super_block *s,
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
+
+ reiserfs_write_unlock(s);
wait_on_buffer(tbh);
+ reiserfs_write_lock(s);
// since we're using ll_rw_blk above, it might have skipped over
// a locked buffer. Double check here
//
- if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
+ /* redundant, sync_dirty_buffer() checks */
+ if (buffer_dirty(tbh)) {
+ reiserfs_write_unlock(s);
sync_dirty_buffer(tbh);
+ reiserfs_write_lock(s);
+ }
if (unlikely(!buffer_uptodate(tbh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(s, "journal-601",
@@ -1143,10 +1152,15 @@ static int flush_commit_list(struct super_block *s,
if (buffer_dirty(jl->j_commit_bh))
BUG();
mark_buffer_dirty(jl->j_commit_bh) ;
+ reiserfs_write_unlock(s);
sync_dirty_buffer(jl->j_commit_bh) ;
+ reiserfs_write_lock(s);
}
- } else
+ } else {
+ reiserfs_write_unlock(s);
wait_on_buffer(jl->j_commit_bh);
+ reiserfs_write_lock(s);
+ }
check_barrier_completion(s, jl->j_commit_bh);
@@ -1286,7 +1300,9 @@ static int _update_journal_header_block(struct super_block *sb,
if (trans_id >= journal->j_last_flush_trans_id) {
if (buffer_locked((journal->j_header_bh))) {
+ reiserfs_write_unlock(sb);
wait_on_buffer((journal->j_header_bh));
+ reiserfs_write_lock(sb);
if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(sb, "journal-699",
@@ -1312,12 +1328,16 @@ static int _update_journal_header_block(struct super_block *sb,
disable_barrier(sb);
goto sync;
}
+ reiserfs_write_unlock(sb);
wait_on_buffer(journal->j_header_bh);
+ reiserfs_write_lock(sb);
check_barrier_completion(sb, journal->j_header_bh);
} else {
sync:
set_buffer_dirty(journal->j_header_bh);
+ reiserfs_write_unlock(sb);
sync_dirty_buffer(journal->j_header_bh);
+ reiserfs_write_lock(sb);
}
if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning(sb, "journal-837",
@@ -1409,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
/* if flushall == 0, the lock is already held */
if (flushall) {
- mutex_lock(&journal->j_flush_mutex);
+ reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
} else if (mutex_trylock(&journal->j_flush_mutex)) {
BUG();
}
@@ -1553,7 +1573,11 @@ static int flush_journal_list(struct super_block *s,
reiserfs_panic(s, "journal-1011",
"cn->bh is NULL");
}
+
+ reiserfs_write_unlock(s);
wait_on_buffer(cn->bh);
+ reiserfs_write_lock(s);
+
if (!cn->bh) {
reiserfs_panic(s, "journal-1012",
"cn->bh is NULL");
@@ -1769,7 +1793,7 @@ static int kupdate_transactions(struct super_block *s,
struct reiserfs_journal *journal = SB_JOURNAL(s);
chunk.nr = 0;
- mutex_lock(&journal->j_flush_mutex);
+ reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
if (!journal_list_still_alive(s, orig_trans_id)) {
goto done;
}
@@ -1973,11 +1997,19 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
reiserfs_mounted_fs_count--;
/* wait for all commits to finish */
cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
+
+ /*
+ * We must release the write lock here because
+ * the workqueue job (flush_async_commit) needs this lock
+ */
+ reiserfs_write_unlock(sb);
flush_workqueue(commit_wq);
+
if (!reiserfs_mounted_fs_count) {
destroy_workqueue(commit_wq);
commit_wq = NULL;
}
+ reiserfs_write_lock(sb);
free_journal_ram(sb);
@@ -2243,7 +2275,11 @@ static int journal_read_transaction(struct super_block *sb,
/* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
+
+ reiserfs_write_unlock(sb);
wait_on_buffer(log_blocks[i]);
+ reiserfs_write_lock(sb);
+
if (!buffer_uptodate(log_blocks[i])) {
reiserfs_warning(sb, "journal-1212",
"REPLAY FAILURE fsck required! "
@@ -2765,11 +2801,27 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
+ /*
+ * We need to unlock here to avoid creating the following
+ * dependency:
+ * reiserfs_lock -> sysfs_mutex
+ * Because the reiserfs mmap path creates the following dependency:
+ * mm->mmap -> reiserfs_lock, hence we have
+ * mm->mmap -> reiserfs_lock ->sysfs_mutex
+ * This would ends up in a circular dependency with sysfs readdir path
+ * which does sysfs_mutex -> mm->mmap_sem
+ * This is fine because the reiserfs lock is useless in mount path,
+ * at least until we call journal_begin. We keep it for paranoid
+ * reasons.
+ */
+ reiserfs_write_unlock(sb);
if (journal_init_dev(sb, journal, j_dev_name) != 0) {
+ reiserfs_write_lock(sb);
reiserfs_warning(sb, "sh-462",
"unable to initialize jornal device");
goto free_and_return;
}
+ reiserfs_write_lock(sb);
rs = SB_DISK_SUPER_BLOCK(sb);
@@ -2881,8 +2933,11 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
}
reiserfs_mounted_fs_count++;
- if (reiserfs_mounted_fs_count <= 1)
+ if (reiserfs_mounted_fs_count <= 1) {
+ reiserfs_write_unlock(sb);
commit_wq = create_workqueue("reiserfs");
+ reiserfs_write_lock(sb);
+ }
INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
journal->j_work_sb = sb;
@@ -2964,8 +3019,11 @@ static void queue_log_writer(struct super_block *s)
init_waitqueue_entry(&wait, current);
add_wait_queue(&journal->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
+ if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
+ reiserfs_write_unlock(s);
schedule();
+ reiserfs_write_lock(s);
+ }
__set_current_state(TASK_RUNNING);
remove_wait_queue(&journal->j_join_wait, &wait);
}
@@ -2982,7 +3040,9 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
struct reiserfs_journal *journal = SB_JOURNAL(sb);
unsigned long bcount = journal->j_bcount;
while (1) {
+ reiserfs_write_unlock(sb);
schedule_timeout_uninterruptible(1);
+ reiserfs_write_lock(sb);
journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&journal->j_wcount) > 0 ||
atomic_read(&journal->j_jlock)) &&
@@ -3033,7 +3093,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
unlock_journal(sb);
+ reiserfs_write_unlock(sb);
reiserfs_wait_on_write_block(sb);
+ reiserfs_write_lock(sb);
PROC_INFO_INC(sb, journal.journal_relock_writers);
goto relock;
}
@@ -3506,14 +3568,14 @@ static void flush_async_commits(struct work_struct *work)
struct reiserfs_journal_list *jl;
struct list_head *entry;
- lock_kernel();
+ reiserfs_write_lock(sb);
if (!list_empty(&journal->j_journal_list)) {
/* last entry is the youngest, commit it and you get everything */
entry = journal->j_journal_list.prev;
jl = JOURNAL_LIST_ENTRY(entry);
flush_commit_list(sb, jl, 1);
}
- unlock_kernel();
+ reiserfs_write_unlock(sb);
}
/*
@@ -4041,7 +4103,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* the new transaction is fully setup, and we've already flushed the
* ordered bh list
*/
- mutex_lock(&jl->j_commit_mutex);
+ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
/* save the transaction id in case we need to commit it later */
commit_trans_id = jl->j_trans_id;
@@ -4156,7 +4218,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
next = cn->next;
free_cnode(sb, cn);
cn = next;
+ reiserfs_write_unlock(sb);
cond_resched();
+ reiserfs_write_lock(sb);
}
/* we are done with both the c_bh and d_bh, but
@@ -4203,10 +4267,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* is lost.
*/
if (!list_empty(&jl->j_tail_bh_list)) {
- unlock_kernel();
+ reiserfs_write_unlock(sb);
write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_tail_bh_list);
- lock_kernel();
+ reiserfs_write_lock(sb);
}
BUG_ON(!list_empty(&jl->j_tail_bh_list));
mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
new file mode 100644
index 00000000000..ee2cfc0fd8a
--- /dev/null
+++ b/fs/reiserfs/lock.c
@@ -0,0 +1,88 @@
+#include <linux/reiserfs_fs.h>
+#include <linux/mutex.h>
+
+/*
+ * The previous reiserfs locking scheme was heavily based on
+ * the tricky properties of the Bkl:
+ *
+ * - it was acquired recursively by a same task
+ * - the performances relied on the release-while-schedule() property
+ *
+ * Now that we replace it by a mutex, we still want to keep the same
+ * recursive property to avoid big changes in the code structure.
+ * We use our own lock_owner here because the owner field on a mutex
+ * is only available in SMP or mutex debugging, also we only need this field
+ * for this mutex, no need for a system wide mutex facility.
+ *
+ * Also this lock is often released before a call that could block because
+ * reiserfs performances were partialy based on the release while schedule()
+ * property of the Bkl.
+ */
+void reiserfs_write_lock(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ if (sb_i->lock_owner != current) {
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ }
+
+ /* No need to protect it, only the current task touches it */
+ sb_i->lock_depth++;
+}
+
+void reiserfs_write_unlock(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ /*
+ * Are we unlocking without even holding the lock?
+ * Such a situation must raise a BUG() if we don't want
+ * to corrupt the data.
+ */
+ BUG_ON(sb_i->lock_owner != current);
+
+ if (--sb_i->lock_depth == -1) {
+ sb_i->lock_owner = NULL;
+ mutex_unlock(&sb_i->lock);
+ }
+}
+
+/*
+ * If we already own the lock, just exit and don't increase the depth.
+ * Useful when we don't want to lock more than once.
+ *
+ * We always return the lock_depth we had before calling
+ * this function.
+ */
+int reiserfs_write_lock_once(struct super_block *s)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ if (sb_i->lock_owner != current) {
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ return sb_i->lock_depth++;
+ }
+
+ return sb_i->lock_depth;
+}
+
+void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
+{
+ if (lock_depth == -1)
+ reiserfs_write_unlock(s);
+}
+
+/*
+ * Utility function to force a BUG if it is called without the superblock
+ * write lock held. caller is the string printed just before calling BUG()
+ */
+void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
+
+ if (sb_i->lock_depth < 0)
+ reiserfs_panic(sb, "%s called without kernel lock held %d",
+ caller);
+}
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 27157912863..e296ff72a6c 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -324,6 +324,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
int retval;
+ int lock_depth;
struct inode *inode = NULL;
struct reiserfs_dir_entry de;
INITIALIZE_PATH(path_to_entry);
@@ -331,7 +332,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
return ERR_PTR(-ENAMETOOLONG);
- reiserfs_write_lock(dir->i_sb);
+ /*
+ * Might be called with or without the write lock, must be careful
+ * to not recursively hold it in case we want to release the lock
+ * before rescheduling.
+ */
+ lock_depth = reiserfs_write_lock_once(dir->i_sb);
+
de.de_gen_number_bit_string = NULL;
retval =
reiserfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len,
@@ -341,7 +348,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
inode = reiserfs_iget(dir->i_sb,
(struct cpu_key *)&(de.de_dir_id));
if (!inode || IS_ERR(inode)) {
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
return ERR_PTR(-EACCES);
}
@@ -350,7 +357,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_PRIVATE(dir))
inode->i_flags |= S_PRIVATE;
}
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
if (retval == IO_ERROR) {
return ERR_PTR(-EIO);
}
@@ -725,6 +732,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
struct inode *inode;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
+ int lock_depth;
/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
@@ -748,7 +756,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return retval;
}
jbegin_count += retval;
- reiserfs_write_lock(dir->i_sb);
+ lock_depth = reiserfs_write_lock_once(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval) {
@@ -798,8 +806,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
d_instantiate(dentry, inode);
unlock_new_inode(inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
- out_failed:
- reiserfs_write_unlock(dir->i_sb);
+out_failed:
+ reiserfs_write_unlock_once(dir->i_sb, lock_depth);
return retval;
}
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 536eacaeb71..adbc6f53851 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -349,10 +349,6 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
. */
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
-
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 18b315d3d10..b3a94d20f0f 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -141,7 +141,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
+ reiserfs_write_unlock(s);
sync_dirty_buffer(bh);
+ reiserfs_write_lock(s);
// update bitmap_info stuff
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index d036ee5b1c8..5fa7118f04e 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -222,9 +222,6 @@ static inline int bin_search(const void *key, /* Key to search for. */
return ITEM_NOT_FOUND;
}
-#ifdef CONFIG_REISERFS_CHECK
-extern struct tree_balance *cur_tb;
-#endif
/* Minimal possible key. It is never in the tree. */
const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} };
@@ -519,25 +516,48 @@ static int is_tree_node(struct buffer_head *bh, int level)
#define SEARCH_BY_KEY_READA 16
-/* The function is NOT SCHEDULE-SAFE! */
-static void search_by_key_reada(struct super_block *s,
+/*
+ * The function is NOT SCHEDULE-SAFE!
+ * It might unlock the write lock if we needed to wait for a block
+ * to be read. Note that in this case it won't recover the lock to avoid
+ * high contention resulting from too much lock requests, especially
+ * the caller (search_by_key) will perform other schedule-unsafe
+ * operations just after calling this function.
+ *
+ * @return true if we have unlocked
+ */
+static bool search_by_key_reada(struct super_block *s,
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
+ bool unlocked = false;
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
}
+ /*
+ * We are going to read some blocks on which we
+ * have a reference. It's safe, though we might be
+ * reading blocks concurrently changed if we release
+ * the lock. But it's still fine because we check later
+ * if the tree changed
+ */
for (j = 0; j < i; j++) {
/*
* note, this needs attention if we are getting rid of the BKL
* you have to make sure the prepared bit isn't set on this buffer
*/
- if (!buffer_uptodate(bh[j]))
+ if (!buffer_uptodate(bh[j])) {
+ if (!unlocked) {
+ reiserfs_write_unlock(s);
+ unlocked = true;
+ }
ll_rw_block(READA, 1, bh + j);
+ }
brelse(bh[j]);
}
+ return unlocked;
}
/**************************************************************************
@@ -625,11 +645,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
have a pointer to it. */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
+ bool unlocked = false;
+
if (!buffer_uptodate(bh) && reada_count > 1)
- search_by_key_reada(sb, reada_bh,
+ /* may unlock the write lock */
+ unlocked = search_by_key_reada(sb, reada_bh,
reada_blocks, reada_count);
+ /*
+ * If we haven't already unlocked the write lock,
+ * then we need to do that here before reading
+ * the current block
+ */
+ if (!buffer_uptodate(bh) && !unlocked) {
+ reiserfs_write_unlock(sb);
+ unlocked = true;
+ }
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
+
+ if (unlocked)
+ reiserfs_write_lock(sb);
if (!buffer_uptodate(bh))
goto io_error;
} else {
@@ -673,7 +708,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
!key_in_buffer(search_path, key, sb),
"PAP-5130: key is not in the buffer");
#ifdef CONFIG_REISERFS_CHECK
- if (cur_tb) {
+ if (REISERFS_SB(sb)->cur_tb) {
print_cur_tb("5140");
reiserfs_panic(sb, "PAP-5140",
"schedule occurred in do_balance!");
@@ -1024,7 +1059,9 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
reiserfs_free_block(th, inode, block, 1);
}
+ reiserfs_write_unlock(sb);
cond_resched();
+ reiserfs_write_lock(sb);
if (item_moved (&s_ih, path)) {
need_re_search = 1;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f0ad05f3802..339b0baf2af 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -465,7 +465,7 @@ static void reiserfs_put_super(struct super_block *s)
struct reiserfs_transaction_handle th;
th.t_trans_id = 0;
- lock_kernel();
+ reiserfs_write_lock(s);
if (s->s_dirt)
reiserfs_write_super(s);
@@ -499,10 +499,10 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_proc_info_done(s);
+ reiserfs_write_unlock(s);
+ mutex_destroy(&REISERFS_SB(s)->lock);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
-
- unlock_kernel();
}
static struct kmem_cache *reiserfs_inode_cachep;
@@ -554,25 +554,28 @@ static void reiserfs_dirty_inode(struct inode *inode)
struct reiserfs_transaction_handle th;
int err = 0;
+ int lock_depth;
+
if (inode->i_sb->s_flags & MS_RDONLY) {
reiserfs_warning(inode->i_sb, "clm-6006",
"writing inode %lu on readonly FS",
inode->i_ino);
return;
}
- reiserfs_write_lock(inode->i_sb);
+ lock_depth = reiserfs_write_lock_once(inode->i_sb);
/* this is really only used for atime updates, so they don't have
** to be included in O_SYNC or fsync
*/
err = journal_begin(&th, inode->i_sb, 1);
- if (err) {
- reiserfs_write_unlock(inode->i_sb);
- return;
- }
+ if (err)
+ goto out;
+
reiserfs_update_sd(&th, inode);
journal_end(&th, inode->i_sb, 1);
- reiserfs_write_unlock(inode->i_sb);
+
+out:
+ reiserfs_write_unlock_once(inode->i_sb, lock_depth);
}
#ifdef CONFIG_QUOTA
@@ -1168,11 +1171,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
unsigned int qfmt = 0;
#ifdef CONFIG_QUOTA
int i;
+#endif
+
+ reiserfs_write_lock(s);
+#ifdef CONFIG_QUOTA
memcpy(qf_names, REISERFS_SB(s)->s_qf_names, sizeof(qf_names));
#endif
- lock_kernel();
rs = SB_DISK_SUPER_BLOCK(s);
if (!reiserfs_parse_options
@@ -1295,12 +1301,12 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
out_ok:
replace_mount_options(s, new_opts);
- unlock_kernel();
+ reiserfs_write_unlock(s);
return 0;
out_err:
kfree(new_opts);
- unlock_kernel();
+ reiserfs_write_unlock(s);
return err;
}
@@ -1404,7 +1410,9 @@ static int read_super_block(struct super_block *s, int offset)
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
+ reiserfs_write_unlock(s);
wait_on_buffer(SB_BUFFER_WITH_SB(s));
+ reiserfs_write_lock(s);
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
@@ -1613,7 +1621,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
if (!sbi) {
errval = -ENOMEM;
- goto error;
+ goto error_alloc;
}
s->s_fs_info = sbi;
/* Set default values for options: non-aggressive tails, RO on errors */
@@ -1627,6 +1635,20 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
/* setup default block allocator options */
reiserfs_init_alloc_options(s);
+ mutex_init(&REISERFS_SB(s)->lock);
+ REISERFS_SB(s)->lock_depth = -1;
+
+ /*
+ * This function is called with the bkl, which also was the old
+ * locking used here.
+ * do_journal_begin() will soon check if we hold the lock (ie: was the
+ * bkl). This is likely because do_journal_begin() has several another
+ * callers because at this time, it doesn't seem to be necessary to
+ * protect against anything.
+ * Anyway, let's be conservative and lock for now.
+ */
+ reiserfs_write_lock(s);
+
jdev_name = NULL;
if (reiserfs_parse_options
(s, (char *)data, &(sbi->s_mount_opt), &blocks, &jdev_name,
@@ -1852,9 +1874,13 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
init_waitqueue_head(&(sbi->s_wait));
spin_lock_init(&sbi->bitmap_lock);
+ reiserfs_write_unlock(s);
+
return (0);
error:
+ reiserfs_write_unlock(s);
+error_alloc:
if (jinit_done) { /* kill the commit thread, free journal ram */
journal_release_error(NULL, s);
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6925b835a43..58aa8e75f7f 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -975,7 +975,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
int err = 0;
/* If we don't have the privroot located yet - go find it */
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
@@ -1004,14 +1004,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
goto error;
if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
- mutex_lock(&s->s_root->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
err = create_privroot(REISERFS_SB(s)->priv_root);
mutex_unlock(&s->s_root->d_inode->i_mutex);
}
if (privroot->d_inode) {
s->s_xattr = reiserfs_xattr_handlers;
- mutex_lock(&privroot->d_inode->i_mutex);
+ reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/fs/splice.c b/fs/splice.c
index 7394e9e1753..39208663aaf 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -648,9 +648,11 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
ret = buf->ops->confirm(pipe, buf);
if (!ret) {
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
-
- ret = file->f_op->sendpage(file, buf->page, buf->offset,
- sd->len, &pos, more);
+ if (file->f_op && file->f_op->sendpage)
+ ret = file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
+ else
+ ret = -EINVAL;
}
return ret;
@@ -1068,8 +1070,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
if (unlikely(ret < 0))
return ret;
- splice_write = out->f_op->splice_write;
- if (!splice_write)
+ if (out->f_op && out->f_op->splice_write)
+ splice_write = out->f_op->splice_write;
+ else
splice_write = default_file_splice_write;
return splice_write(pipe, out, ppos, len, flags);
@@ -1093,8 +1096,9 @@ static long do_splice_to(struct file *in, loff_t *ppos,
if (unlikely(ret < 0))
return ret;
- splice_read = in->f_op->splice_read;
- if (!splice_read)
+ if (in->f_op && in->f_op->splice_read)
+ splice_read = in->f_op->splice_read;
+ else
splice_read = default_file_splice_read;
return splice_read(in, ppos, pipe, len, flags);
@@ -1316,7 +1320,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (off_in)
return -ESPIPE;
if (off_out) {
- if (out->f_op->llseek == no_llseek)
+ if (!out->f_op || !out->f_op->llseek ||
+ out->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
@@ -1336,7 +1341,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (off_out)
return -ESPIPE;
if (off_in) {
- if (in->f_op->llseek == no_llseek)
+ if (!in->f_op || !in->f_op->llseek ||
+ in->f_op->llseek == no_llseek)
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index f94ddf7efba..868a55ee080 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -23,7 +23,7 @@
/*
* This file implements functions needed to recover from unclean un-mounts.
* When UBIFS is mounted, it checks a flag on the master node to determine if
- * an un-mount was completed sucessfully. If not, the process of mounting
+ * an un-mount was completed successfully. If not, the process of mounting
* incorparates additional checking and fixing of on-flash data structures.
* UBIFS always cleans away all remnants of an unclean un-mount, so that
* errors do not accumulate. However UBIFS defers recovery if it is mounted
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
index c6ad7c7e3ee..05ac0fe9c4d 100644
--- a/fs/xattr_acl.c
+++ b/fs/xattr_acl.c
@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
if (count == 0)
return NULL;
- acl = posix_acl_alloc(count, GFP_KERNEL);
+ acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
acl_e = acl->a_entries;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c2e30eea74d..70f989895d1 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -904,16 +904,9 @@ xfs_convert_page(
if (startio) {
if (count) {
- struct backing_dev_info *bdi;
-
- bdi = inode->i_mapping->backing_dev_info;
wbc->nr_to_write--;
- if (bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
- done = 1;
- } else if (wbc->nr_to_write <= 0) {
+ if (wbc->nr_to_write <= 0)
done = 1;
- }
}
xfs_start_page_writeback(page, !page_dirty, count);
}
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index c5bc67c4e3b..7bb5092d6ae 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -55,170 +55,140 @@ xfs_stats_clear_proc_handler(
static ctl_table xfs_table[] = {
{
- .ctl_name = XFS_SGID_INHERIT,
.procname = "irix_sgid_inherit",
.data = &xfs_params.sgid_inherit.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.sgid_inherit.min,
.extra2 = &xfs_params.sgid_inherit.max
},
{
- .ctl_name = XFS_SYMLINK_MODE,
.procname = "irix_symlink_mode",
.data = &xfs_params.symlink_mode.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.symlink_mode.min,
.extra2 = &xfs_params.symlink_mode.max
},
{
- .ctl_name = XFS_PANIC_MASK,
.procname = "panic_mask",
.data = &xfs_params.panic_mask.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.panic_mask.min,
.extra2 = &xfs_params.panic_mask.max
},
{
- .ctl_name = XFS_ERRLEVEL,
.procname = "error_level",
.data = &xfs_params.error_level.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.error_level.min,
.extra2 = &xfs_params.error_level.max
},
{
- .ctl_name = XFS_SYNCD_TIMER,
.procname = "xfssyncd_centisecs",
.data = &xfs_params.syncd_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.syncd_timer.min,
.extra2 = &xfs_params.syncd_timer.max
},
{
- .ctl_name = XFS_INHERIT_SYNC,
.procname = "inherit_sync",
.data = &xfs_params.inherit_sync.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_sync.min,
.extra2 = &xfs_params.inherit_sync.max
},
{
- .ctl_name = XFS_INHERIT_NODUMP,
.procname = "inherit_nodump",
.data = &xfs_params.inherit_nodump.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nodump.min,
.extra2 = &xfs_params.inherit_nodump.max
},
{
- .ctl_name = XFS_INHERIT_NOATIME,
.procname = "inherit_noatime",
.data = &xfs_params.inherit_noatim.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_noatim.min,
.extra2 = &xfs_params.inherit_noatim.max
},
{
- .ctl_name = XFS_BUF_TIMER,
.procname = "xfsbufd_centisecs",
.data = &xfs_params.xfs_buf_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.xfs_buf_timer.min,
.extra2 = &xfs_params.xfs_buf_timer.max
},
{
- .ctl_name = XFS_BUF_AGE,
.procname = "age_buffer_centisecs",
.data = &xfs_params.xfs_buf_age.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.xfs_buf_age.min,
.extra2 = &xfs_params.xfs_buf_age.max
},
{
- .ctl_name = XFS_INHERIT_NOSYM,
.procname = "inherit_nosymlinks",
.data = &xfs_params.inherit_nosym.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nosym.min,
.extra2 = &xfs_params.inherit_nosym.max
},
{
- .ctl_name = XFS_ROTORSTEP,
.procname = "rotorstep",
.data = &xfs_params.rotorstep.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.rotorstep.min,
.extra2 = &xfs_params.rotorstep.max
},
{
- .ctl_name = XFS_INHERIT_NODFRG,
.procname = "inherit_nodefrag",
.data = &xfs_params.inherit_nodfrg.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nodfrg.min,
.extra2 = &xfs_params.inherit_nodfrg.max
},
{
- .ctl_name = XFS_FILESTREAM_TIMER,
.procname = "filestream_centisecs",
.data = &xfs_params.fstrm_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .strategy = &sysctl_intvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.fstrm_timer.min,
.extra2 = &xfs_params.fstrm_timer.max,
},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{
- .ctl_name = XFS_STATS_CLEAR,
.procname = "stats_clear",
.data = &xfs_params.stats_clear.val,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &xfs_stats_clear_proc_handler,
- .strategy = &sysctl_intvec,
+ .proc_handler = xfs_stats_clear_proc_handler,
.extra1 = &xfs_params.stats_clear.min,
.extra2 = &xfs_params.stats_clear.max
},
@@ -229,7 +199,6 @@ static ctl_table xfs_table[] = {
static ctl_table xfs_dir_table[] = {
{
- .ctl_name = FS_XFS,
.procname = "xfs",
.mode = 0555,
.child = xfs_table
@@ -239,7 +208,6 @@ static ctl_table xfs_dir_table[] = {
static ctl_table xfs_root_table[] = {
{
- .ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = xfs_dir_table
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 6533ead9b88..a2c16bcee90 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -98,7 +98,7 @@ typedef struct xfs_dquot {
#define dq_flags q_lists.dqm_flags
/*
- * Lock hierachy for q_qlock:
+ * Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
* XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
*/