diff options
Diffstat (limited to 'fs')
307 files changed, 7309 insertions, 5636 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index cf62b05e296..7d6c2139891 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -84,7 +84,7 @@ static const match_table_t tokens = { static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) { - char *options; + char *options, *tmp_options; substring_t args[MAX_OPT_ARGS]; char *p; int option = 0; @@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) if (!opts) return 0; - options = kstrdup(opts, GFP_KERNEL); - if (!options) + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + ret = -ENOMEM; goto fail_option_alloc; + } + options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) break; case Opt_cache: s = match_strdup(&args[0]); - if (!s) - goto fail_option_alloc; + if (!s) { + ret = -ENOMEM; + P9_DPRINTK(P9_DEBUG_ERROR, + "problem allocating copy of cache arg\n"); + goto free_and_return; + } if (strcmp(s, "loose") == 0) v9ses->cache = CACHE_LOOSE; @@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) case Opt_access: s = match_strdup(&args[0]); - if (!s) - goto fail_option_alloc; + if (!s) { + ret = -ENOMEM; + P9_DPRINTK(P9_DEBUG_ERROR, + "problem allocating copy of access arg\n"); + goto free_and_return; + } v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) @@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) continue; } } - kfree(options); - return ret; +free_and_return: + kfree(tmp_options); fail_option_alloc: - P9_DPRINTK(P9_DEBUG_ERROR, - "failed to allocate copy of option argument\n"); - return -ENOMEM; + return ret; } /** diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 3a7560e3586..ed835836e0d 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); +void v9fs_blank_wstat(struct p9_wstat *wstat); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 3902bf43a08..74a0461a9ac 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } +static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, + int datasync) +{ + struct p9_fid *fid; + struct p9_wstat wstat; + int retval; + + P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, + dentry, datasync); + + fid = filp->private_data; + v9fs_blank_wstat(&wstat); + + retval = p9_client_wstat(fid, &wstat); + return retval; +} + static const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, }; const struct file_operations v9fs_file_operations = { @@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = { .release = v9fs_dir_release, .lock = v9fs_file_lock, .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, }; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 18f74ec4dce..a407fa3388c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended) * */ -static void +void v9fs_blank_wstat(struct p9_wstat *wstat) { wstat->type = ~0; @@ -1001,44 +1001,6 @@ done: } /** - * v9fs_vfs_readlink - read a symlink's location - * @dentry: dentry for symlink - * @buffer: buffer to load symlink location into - * @buflen: length of buffer - * - */ - -static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, - int buflen) -{ - int retval; - int ret; - char *link = __getname(); - - if (unlikely(!link)) - return -ENOMEM; - - if (buflen > PATH_MAX) - buflen = PATH_MAX; - - P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, - dentry); - - retval = v9fs_readlink(dentry, link, buflen); - - if (retval > 0) { - if ((ret = copy_to_user(buffer, link, retval)) != 0) { - P9_DPRINTK(P9_DEBUG_ERROR, - "problem copying to user: %d\n", ret); - retval = ret; - } - } - - __putname(link); - return retval; -} - -/** * v9fs_vfs_follow_link - follow a symlink path * @dentry: dentry for symlink * @nd: nameidata @@ -1230,7 +1192,6 @@ static const struct inode_operations v9fs_dir_inode_operations_ext = { .rmdir = v9fs_vfs_rmdir, .mknod = v9fs_vfs_mknod, .rename = v9fs_vfs_rename, - .readlink = v9fs_vfs_readlink, .getattr = v9fs_vfs_getattr, .setattr = v9fs_vfs_setattr, }; @@ -1253,7 +1214,7 @@ static const struct inode_operations v9fs_file_inode_operations = { }; static const struct inode_operations v9fs_symlink_inode_operations = { - .readlink = v9fs_vfs_readlink, + .readlink = generic_readlink, .follow_link = v9fs_vfs_follow_link, .put_link = v9fs_vfs_put_link, .getattr = v9fs_vfs_getattr, diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 14a86448572..69357c0d989 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -188,7 +188,8 @@ static void v9fs_kill_super(struct super_block *s) P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); - v9fs_dentry_release(s->s_root); /* clunk root */ + if (s->s_root) + v9fs_dentry_release(s->s_root); /* clunk root */ kill_anon_super(s); diff --git a/fs/affs/affs.h b/fs/affs/affs.h index e511dc621a2..0e40caaba45 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -106,8 +106,8 @@ struct affs_sb_info { u32 s_last_bmap; struct buffer_head *s_bmap_bh; char *s_prefix; /* Prefix for volumes and assigns. */ - int s_prefix_len; /* Length of prefix. */ char s_volume[32]; /* Volume prefix for absolute symlinks. */ + spinlock_t symlink_lock; /* protects the previous two */ }; #define SF_INTL 0x0001 /* International filesystem. */ diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 960d336ec69..d70bbbac6b7 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -341,10 +341,13 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) p = (char *)AFFS_HEAD(bh)->table; lc = '/'; if (*symname == '/') { + struct affs_sb_info *sbi = AFFS_SB(sb); while (*symname == '/') symname++; - while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */ - *p++ = AFFS_SB(sb)->s_volume[i++]; + spin_lock(&sbi->symlink_lock); + while (sbi->s_volume[i]) /* Cannot overflow */ + *p++ = sbi->s_volume[i++]; + spin_unlock(&sbi->symlink_lock); } while (i < maxlen && (c = *symname++)) { if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') { diff --git a/fs/affs/super.c b/fs/affs/super.c index 104fdcb3a7f..d41e9673cd9 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s switch (token) { case Opt_bs: if (match_int(&args[0], &n)) - return -EINVAL; + return 0; if (n != 512 && n != 1024 && n != 2048 && n != 4096) { printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n"); @@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s break; case Opt_mode: if (match_octal(&args[0], &option)) - return 1; + return 0; *mode = option & 0777; *mount_opts |= SF_SETMODE; break; @@ -221,8 +221,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s *mount_opts |= SF_MUFS; break; case Opt_prefix: - /* Free any previous prefix */ - kfree(*prefix); *prefix = match_strdup(&args[0]); if (!*prefix) return 0; @@ -233,21 +231,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s break; case Opt_reserved: if (match_int(&args[0], reserved)) - return 1; + return 0; break; case Opt_root: if (match_int(&args[0], root)) - return 1; + return 0; break; case Opt_setgid: if (match_int(&args[0], &option)) - return 1; + return 0; *gid = option; *mount_opts |= SF_SETGID; break; case Opt_setuid: if (match_int(&args[0], &option)) - return -EINVAL; + return 0; *uid = option; *mount_opts |= SF_SETUID; break; @@ -311,11 +309,14 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; sb->s_fs_info = sbi; mutex_init(&sbi->s_bmlock); + spin_lock_init(&sbi->symlink_lock); if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, &blocksize,&sbi->s_prefix, sbi->s_volume, &mount_flags)) { printk(KERN_ERR "AFFS: Error parsing options\n"); + kfree(sbi->s_prefix); + kfree(sbi); return -EINVAL; } /* N.B. after this point s_prefix must be released */ @@ -516,14 +517,18 @@ affs_remount(struct super_block *sb, int *flags, char *data) unsigned long mount_flags; int res = 0; char *new_opts = kstrdup(data, GFP_KERNEL); + char volume[32]; + char *prefix = NULL; pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); *flags |= MS_NODIRATIME; + memcpy(volume, sbi->s_volume, 32); if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, - &blocksize, &sbi->s_prefix, sbi->s_volume, + &blocksize, &prefix, volume, &mount_flags)) { + kfree(prefix); kfree(new_opts); return -EINVAL; } @@ -534,6 +539,14 @@ affs_remount(struct super_block *sb, int *flags, char *data) sbi->s_mode = mode; sbi->s_uid = uid; sbi->s_gid = gid; + /* protect against readers */ + spin_lock(&sbi->symlink_lock); + if (prefix) { + kfree(sbi->s_prefix); + sbi->s_prefix = prefix; + } + memcpy(sbi->s_volume, volume, 32); + spin_unlock(&sbi->symlink_lock); if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { unlock_kernel(); diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c index 41782539c90..ee00f08c4f5 100644 --- a/fs/affs/symlink.c +++ b/fs/affs/symlink.c @@ -20,7 +20,6 @@ static int affs_symlink_readpage(struct file *file, struct page *page) int i, j; char c; char lc; - char *pf; pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino); @@ -32,11 +31,15 @@ static int affs_symlink_readpage(struct file *file, struct page *page) j = 0; lf = (struct slink_front *)bh->b_data; lc = 0; - pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/"; if (strchr(lf->symname,':')) { /* Handle assign or volume name */ + struct affs_sb_info *sbi = AFFS_SB(inode->i_sb); + char *pf; + spin_lock(&sbi->symlink_lock); + pf = sbi->s_prefix ? sbi->s_prefix : "/"; while (i < 1023 && (c = pf[i])) link[i++] = c; + spin_unlock(&sbi->symlink_lock); while (i < 1023 && lf->symname[j] != ':') link[i++] = lf->symname[j++]; if (i < 1023) diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 0118d67221b..3d283abf67d 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -60,11 +60,6 @@ do { \ current->pid, __func__, ##args); \ } while (0) -struct rehash_entry { - struct task_struct *task; - struct list_head list; -}; - /* Unified info structure. This is pointed to by both the dentry and inode structures. Each file in the filesystem has an instance of this structure. It holds a reference to the dentry, so dentries are never @@ -81,7 +76,6 @@ struct autofs_info { struct list_head active; int active_count; - struct list_head rehash_list; struct list_head expiring; @@ -104,7 +98,6 @@ struct autofs_info { #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ #define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */ #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ -#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */ struct autofs_wait_queue { wait_queue_head_t queue; diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 00bf8fcb245..c8a80dffb45 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -544,10 +544,9 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, goto out; devid = new_encode_dev(path.mnt->mnt_sb->s_dev); err = 0; - if (path.dentry->d_inode && - path.mnt->mnt_root == path.dentry) { + if (path.mnt->mnt_root == path.dentry) { err = 1; - magic = path.dentry->d_inode->i_sb->s_magic; + magic = path.mnt->mnt_sb->s_magic; } } else { dev_t dev = sbi->sb->s_dev; @@ -560,10 +559,8 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, err = have_submounts(path.dentry); - if (path.mnt->mnt_mountpoint != path.mnt->mnt_root) { - if (follow_down(&path)) - magic = path.mnt->mnt_sb->s_magic; - } + if (follow_down(&path)) + magic = path.mnt->mnt_sb->s_magic; } param->ismountpoint.out.devid = devid; diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 74bc9aa6df3..a796c9417fb 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -279,7 +279,6 @@ struct dentry *autofs4_expire_direct(struct super_block *sb, root->d_mounted--; } ino->flags |= AUTOFS_INF_EXPIRING; - autofs4_add_expiring(root); init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return root; @@ -407,7 +406,6 @@ found: expired, (int)expired->d_name.len, expired->d_name.name); ino = autofs4_dentry_ino(expired); ino->flags |= AUTOFS_INF_EXPIRING; - autofs4_add_expiring(expired); init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); spin_lock(&dcache_lock); @@ -435,7 +433,7 @@ int autofs4_expire_wait(struct dentry *dentry) DPRINTK("expire done status=%d", status); - if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode)) + if (d_unhashed(dentry)) return -EAGAIN; return status; @@ -475,7 +473,6 @@ int autofs4_expire_run(struct super_block *sb, spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); ino->flags &= ~AUTOFS_INF_EXPIRING; - autofs4_del_expiring(dentry); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); @@ -506,7 +503,6 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, ino->flags &= ~AUTOFS_INF_MOUNTPOINT; } ino->flags &= ~AUTOFS_INF_EXPIRING; - autofs4_del_expiring(dentry); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index d0a3de24745..821b2b955da 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -49,7 +49,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, ino->dentry = NULL; ino->size = 0; INIT_LIST_HEAD(&ino->active); - INIT_LIST_HEAD(&ino->rehash_list); ino->active_count = 0; INIT_LIST_HEAD(&ino->expiring); atomic_set(&ino->count, 0); @@ -97,63 +96,6 @@ void autofs4_free_ino(struct autofs_info *ino) kfree(ino); } -/* - * Deal with the infamous "Busy inodes after umount ..." message. - * - * Clean up the dentry tree. This happens with autofs if the user - * space program goes away due to a SIGKILL, SIGSEGV etc. - */ -static void autofs4_force_release(struct autofs_sb_info *sbi) -{ - struct dentry *this_parent = sbi->sb->s_root; - struct list_head *next; - - if (!sbi->sb->s_root) - return; - - spin_lock(&dcache_lock); -repeat: - next = this_parent->d_subdirs.next; -resume: - while (next != &this_parent->d_subdirs) { - struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); - - /* Negative dentry - don`t care */ - if (!simple_positive(dentry)) { - next = next->next; - continue; - } - - if (!list_empty(&dentry->d_subdirs)) { - this_parent = dentry; - goto repeat; - } - - next = next->next; - spin_unlock(&dcache_lock); - - DPRINTK("dentry %p %.*s", - dentry, (int)dentry->d_name.len, dentry->d_name.name); - - dput(dentry); - spin_lock(&dcache_lock); - } - - if (this_parent != sbi->sb->s_root) { - struct dentry *dentry = this_parent; - - next = this_parent->d_u.d_child.next; - this_parent = this_parent->d_parent; - spin_unlock(&dcache_lock); - DPRINTK("parent dentry %p %.*s", - dentry, (int)dentry->d_name.len, dentry->d_name.name); - dput(dentry); - spin_lock(&dcache_lock); - goto resume; - } - spin_unlock(&dcache_lock); -} - void autofs4_kill_sb(struct super_block *sb) { struct autofs_sb_info *sbi = autofs4_sbi(sb); @@ -170,15 +112,12 @@ void autofs4_kill_sb(struct super_block *sb) /* Free wait queues, close pipe */ autofs4_catatonic_mode(sbi); - /* Clean up and release dangling references */ - autofs4_force_release(sbi); - sb->s_fs_info = NULL; kfree(sbi); out_kill_sb: DPRINTK("shutting down"); - kill_anon_super(sb); + kill_litter_super(sb); } static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 30cc9ddf4b7..a015b49891d 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -104,99 +104,6 @@ static void autofs4_del_active(struct dentry *dentry) return; } -static void autofs4_add_rehash_entry(struct autofs_info *ino, - struct rehash_entry *entry) -{ - entry->task = current; - INIT_LIST_HEAD(&entry->list); - list_add(&entry->list, &ino->rehash_list); - return; -} - -static void autofs4_remove_rehash_entry(struct autofs_info *ino) -{ - struct list_head *head = &ino->rehash_list; - struct rehash_entry *entry; - list_for_each_entry(entry, head, list) { - if (entry->task == current) { - list_del(&entry->list); - kfree(entry); - break; - } - } - return; -} - -static void autofs4_remove_rehash_entrys(struct autofs_info *ino) -{ - struct autofs_sb_info *sbi = ino->sbi; - struct rehash_entry *entry, *next; - struct list_head *head; - - spin_lock(&sbi->fs_lock); - spin_lock(&sbi->lookup_lock); - if (!(ino->flags & AUTOFS_INF_REHASH)) { - spin_unlock(&sbi->lookup_lock); - spin_unlock(&sbi->fs_lock); - return; - } - ino->flags &= ~AUTOFS_INF_REHASH; - head = &ino->rehash_list; - list_for_each_entry_safe(entry, next, head, list) { - list_del(&entry->list); - kfree(entry); - } - spin_unlock(&sbi->lookup_lock); - spin_unlock(&sbi->fs_lock); - dput(ino->dentry); - - return; -} - -static void autofs4_revalidate_drop(struct dentry *dentry, - struct rehash_entry *entry) -{ - struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); - struct autofs_info *ino = autofs4_dentry_ino(dentry); - /* - * Add to the active list so we can pick this up in - * ->lookup(). Also add an entry to a rehash list so - * we know when there are no dentrys in flight so we - * know when we can rehash the dentry. - */ - spin_lock(&sbi->lookup_lock); - if (list_empty(&ino->active)) - list_add(&ino->active, &sbi->active_list); - autofs4_add_rehash_entry(ino, entry); - spin_unlock(&sbi->lookup_lock); - if (!(ino->flags & AUTOFS_INF_REHASH)) { - ino->flags |= AUTOFS_INF_REHASH; - dget(dentry); - spin_lock(&dentry->d_lock); - __d_drop(dentry); - spin_unlock(&dentry->d_lock); - } - return; -} - -static void autofs4_revalidate_rehash(struct dentry *dentry) -{ - struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); - struct autofs_info *ino = autofs4_dentry_ino(dentry); - if (ino->flags & AUTOFS_INF_REHASH) { - spin_lock(&sbi->lookup_lock); - autofs4_remove_rehash_entry(ino); - if (list_empty(&ino->rehash_list)) { - spin_unlock(&sbi->lookup_lock); - ino->flags &= ~AUTOFS_INF_REHASH; - d_rehash(dentry); - dput(ino->dentry); - } else - spin_unlock(&sbi->lookup_lock); - } - return; -} - static unsigned int autofs4_need_mount(unsigned int flags) { unsigned int res = 0; @@ -236,7 +143,7 @@ out: return dcache_dir_open(inode, file); } -static int try_to_fill_dentry(struct dentry *dentry) +static int try_to_fill_dentry(struct dentry *dentry, int flags) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); struct autofs_info *ino = autofs4_dentry_ino(dentry); @@ -249,17 +156,55 @@ static int try_to_fill_dentry(struct dentry *dentry) * Wait for a pending mount, triggering one if there * isn't one already */ - DPRINTK("waiting for mount name=%.*s", - dentry->d_name.len, dentry->d_name.name); + if (dentry->d_inode == NULL) { + DPRINTK("waiting for mount name=%.*s", + dentry->d_name.len, dentry->d_name.name); - status = autofs4_wait(sbi, dentry, NFY_MOUNT); + status = autofs4_wait(sbi, dentry, NFY_MOUNT); - DPRINTK("mount done status=%d", status); + DPRINTK("mount done status=%d", status); - /* Update expiry counter */ - ino->last_used = jiffies; + /* Turn this into a real negative dentry? */ + if (status == -ENOENT) { + spin_lock(&sbi->fs_lock); + ino->flags &= ~AUTOFS_INF_PENDING; + spin_unlock(&sbi->fs_lock); + return status; + } else if (status) { + /* Return a negative dentry, but leave it "pending" */ + return status; + } + /* Trigger mount for path component or follow link */ + } else if (ino->flags & AUTOFS_INF_PENDING || + autofs4_need_mount(flags) || + current->link_count) { + DPRINTK("waiting for mount name=%.*s", + dentry->d_name.len, dentry->d_name.name); - return status; + spin_lock(&sbi->fs_lock); + ino->flags |= AUTOFS_INF_PENDING; + spin_unlock(&sbi->fs_lock); + status = autofs4_wait(sbi, dentry, NFY_MOUNT); + + DPRINTK("mount done status=%d", status); + + if (status) { + spin_lock(&sbi->fs_lock); + ino->flags &= ~AUTOFS_INF_PENDING; + spin_unlock(&sbi->fs_lock); + return status; + } + } + + /* Initialize expiry counter after successful mount */ + if (ino) + ino->last_used = jiffies; + + spin_lock(&sbi->fs_lock); + ino->flags &= ~AUTOFS_INF_PENDING; + spin_unlock(&sbi->fs_lock); + + return 0; } /* For autofs direct mounts the follow link triggers the mount */ @@ -313,16 +258,10 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) */ if (ino->flags & AUTOFS_INF_PENDING || (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { - ino->flags |= AUTOFS_INF_PENDING; spin_unlock(&dcache_lock); spin_unlock(&sbi->fs_lock); - status = try_to_fill_dentry(dentry); - - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - + status = try_to_fill_dentry(dentry, 0); if (status) goto out_error; @@ -361,47 +300,18 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *dir = dentry->d_parent->d_inode; struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); - struct autofs_info *ino = autofs4_dentry_ino(dentry); - struct rehash_entry *entry; + int oz_mode = autofs4_oz_mode(sbi); int flags = nd ? nd->flags : 0; - unsigned int mutex_aquired; + int status = 1; - DPRINTK("name = %.*s oz_mode = %d", - dentry->d_name.len, dentry->d_name.name, oz_mode); - - /* Daemon never causes a mount to trigger */ - if (autofs4_oz_mode(sbi)) - return 1; - - entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - mutex_aquired = mutex_trylock(&dir->i_mutex); - - spin_lock(&sbi->fs_lock); - spin_lock(&dcache_lock); /* Pending dentry */ + spin_lock(&sbi->fs_lock); if (autofs4_ispending(dentry)) { - int status; - - /* - * We can only unhash and send this to ->lookup() if - * the directory mutex is held over d_revalidate() and - * ->lookup(). This prevents the VFS from incorrectly - * seeing the dentry as non-existent. - */ - ino->flags |= AUTOFS_INF_PENDING; - if (!mutex_aquired) { - autofs4_revalidate_drop(dentry, entry); - spin_unlock(&dcache_lock); - spin_unlock(&sbi->fs_lock); - return 0; - } - spin_unlock(&dcache_lock); + /* The daemon never causes a mount to trigger */ spin_unlock(&sbi->fs_lock); - mutex_unlock(&dir->i_mutex); - kfree(entry); + + if (oz_mode) + return 1; /* * If the directory has gone away due to an expire @@ -415,82 +325,45 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) * A zero status is success otherwise we have a * negative error code. */ - status = try_to_fill_dentry(dentry); - - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - + status = try_to_fill_dentry(dentry, flags); if (status == 0) return 1; return status; } + spin_unlock(&sbi->fs_lock); + + /* Negative dentry.. invalidate if "old" */ + if (dentry->d_inode == NULL) + return 0; /* Check for a non-mountpoint directory with no contents */ + spin_lock(&dcache_lock); if (S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { DPRINTK("dentry=%p %.*s, emptydir", dentry, dentry->d_name.len, dentry->d_name.name); + spin_unlock(&dcache_lock); - if (autofs4_need_mount(flags) || current->link_count) { - int status; - - /* - * We can only unhash and send this to ->lookup() if - * the directory mutex is held over d_revalidate() and - * ->lookup(). This prevents the VFS from incorrectly - * seeing the dentry as non-existent. - */ - ino->flags |= AUTOFS_INF_PENDING; - if (!mutex_aquired) { - autofs4_revalidate_drop(dentry, entry); - spin_unlock(&dcache_lock); - spin_unlock(&sbi->fs_lock); - return 0; - } - spin_unlock(&dcache_lock); - spin_unlock(&sbi->fs_lock); - mutex_unlock(&dir->i_mutex); - kfree(entry); - - /* - * A zero status is success otherwise we have a - * negative error code. - */ - status = try_to_fill_dentry(dentry); - - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); + /* The daemon never causes a mount to trigger */ + if (oz_mode) + return 1; - if (status == 0) - return 1; + /* + * A zero status is success otherwise we have a + * negative error code. + */ + status = try_to_fill_dentry(dentry, flags); + if (status == 0) + return 1; - return status; - } + return status; } spin_unlock(&dcache_lock); - spin_unlock(&sbi->fs_lock); - - if (mutex_aquired) - mutex_unlock(&dir->i_mutex); - - kfree(entry); return 1; } -static void autofs4_free_rehash_entrys(struct autofs_info *inf) -{ - struct list_head *head = &inf->rehash_list; - struct rehash_entry *entry, *next; - list_for_each_entry_safe(entry, next, head, list) { - list_del(&entry->list); - kfree(entry); - } -} - void autofs4_dentry_release(struct dentry *de) { struct autofs_info *inf; @@ -509,8 +382,6 @@ void autofs4_dentry_release(struct dentry *de) list_del(&inf->active); if (!list_empty(&inf->expiring)) list_del(&inf->expiring); - if (!list_empty(&inf->rehash_list)) - autofs4_free_rehash_entrys(inf); spin_unlock(&sbi->lookup_lock); } @@ -543,7 +414,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry) const unsigned char *str = name->name; struct list_head *p, *head; -restart: spin_lock(&dcache_lock); spin_lock(&sbi->lookup_lock); head = &sbi->active_list; @@ -561,19 +431,6 @@ restart: if (atomic_read(&active->d_count) == 0) goto next; - if (active->d_inode && IS_DEADDIR(active->d_inode)) { - if (!list_empty(&ino->rehash_list)) { - dget(active); - spin_unlock(&active->d_lock); - spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); - autofs4_remove_rehash_entrys(ino); - dput(active); - goto restart; - } - goto next; - } - qstr = &active->d_name; if (active->d_name.hash != hash) @@ -586,11 +443,13 @@ restart: if (memcmp(qstr->name, str, len)) goto next; - dget(active); - spin_unlock(&active->d_lock); - spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); - return active; + if (d_unhashed(active)) { + dget(active); + spin_unlock(&active->d_lock); + spin_unlock(&sbi->lookup_lock); + spin_unlock(&dcache_lock); + return active; + } next: spin_unlock(&active->d_lock); } @@ -639,11 +498,13 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry) if (memcmp(qstr->name, str, len)) goto next; - dget(expiring); - spin_unlock(&expiring->d_lock); - spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); - return expiring; + if (d_unhashed(expiring)) { + dget(expiring); + spin_unlock(&expiring->d_lock); + spin_unlock(&sbi->lookup_lock); + spin_unlock(&dcache_lock); + return expiring; + } next: spin_unlock(&expiring->d_lock); } @@ -653,48 +514,6 @@ next: return NULL; } -static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi, - struct dentry *dentry, int oz_mode) -{ - struct autofs_info *ino; - - /* - * Mark the dentry incomplete but don't hash it. We do this - * to serialize our inode creation operations (symlink and - * mkdir) which prevents deadlock during the callback to - * the daemon. Subsequent user space lookups for the same - * dentry are placed on the wait queue while the daemon - * itself is allowed passage unresticted so the create - * operation itself can then hash the dentry. Finally, - * we check for the hashed dentry and return the newly - * hashed dentry. - */ - dentry->d_op = &autofs4_root_dentry_operations; - - /* - * And we need to ensure that the same dentry is used for - * all following lookup calls until it is hashed so that - * the dentry flags are persistent throughout the request. - */ - ino = autofs4_init_ino(NULL, sbi, 0555); - if (!ino) - return ERR_PTR(-ENOMEM); - - dentry->d_fsdata = ino; - ino->dentry = dentry; - - /* - * Only set the mount pending flag for new dentrys not created - * by the daemon. - */ - if (!oz_mode) - ino->flags |= AUTOFS_INF_PENDING; - - d_instantiate(dentry, NULL); - - return ino; -} - /* Lookups in the root directory */ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { @@ -702,7 +521,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s struct autofs_info *ino; struct dentry *expiring, *active; int oz_mode; - int status = 0; DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name); @@ -717,26 +535,44 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); - spin_lock(&sbi->fs_lock); active = autofs4_lookup_active(dentry); if (active) { dentry = active; ino = autofs4_dentry_ino(dentry); - /* If this came from revalidate, rehash it */ - autofs4_revalidate_rehash(dentry); - spin_unlock(&sbi->fs_lock); } else { - spin_unlock(&sbi->fs_lock); - ino = init_new_dentry(sbi, dentry, oz_mode); - if (IS_ERR(ino)) - return (struct dentry *) ino; - } + /* + * Mark the dentry incomplete but don't hash it. We do this + * to serialize our inode creation operations (symlink and + * mkdir) which prevents deadlock during the callback to + * the daemon. Subsequent user space lookups for the same + * dentry are placed on the wait queue while the daemon + * itself is allowed passage unresticted so the create + * operation itself can then hash the dentry. Finally, + * we check for the hashed dentry and return the newly + * hashed dentry. + */ + dentry->d_op = &autofs4_root_dentry_operations; + + /* + * And we need to ensure that the same dentry is used for + * all following lookup calls until it is hashed so that + * the dentry flags are persistent throughout the request. + */ + ino = autofs4_init_ino(NULL, sbi, 0555); + if (!ino) + return ERR_PTR(-ENOMEM); - autofs4_add_active(dentry); + dentry->d_fsdata = ino; + ino->dentry = dentry; + + autofs4_add_active(dentry); + + d_instantiate(dentry, NULL); + } if (!oz_mode) { - expiring = autofs4_lookup_expiring(dentry); mutex_unlock(&dir->i_mutex); + expiring = autofs4_lookup_expiring(dentry); if (expiring) { /* * If we are racing with expire the request might not @@ -744,22 +580,23 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s * so it must have been successful, so just wait for it. */ autofs4_expire_wait(expiring); + autofs4_del_expiring(expiring); dput(expiring); } - status = try_to_fill_dentry(dentry); - mutex_lock(&dir->i_mutex); + spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; + ino->flags |= AUTOFS_INF_PENDING; spin_unlock(&sbi->fs_lock); + if (dentry->d_op && dentry->d_op->d_revalidate) + (dentry->d_op->d_revalidate)(dentry, nd); + mutex_lock(&dir->i_mutex); } - autofs4_del_active(dentry); - /* - * If we had a mount fail, check if we had to handle + * If we are still pending, check if we had to handle * a signal. If so we can force a restart.. */ - if (status) { + if (ino->flags & AUTOFS_INF_PENDING) { /* See if we were interrupted */ if (signal_pending(current)) { sigset_t *sigset = ¤t->pending.signal; @@ -771,46 +608,43 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s return ERR_PTR(-ERESTARTNOINTR); } } - } - - /* - * User space can (and has done in the past) remove and re-create - * this directory during the callback. This can leave us with an - * unhashed dentry, but a successful mount! So we need to - * perform another cached lookup in case the dentry now exists. - */ - if (!oz_mode && !have_submounts(dentry)) { - struct dentry *new; - new = d_lookup(dentry->d_parent, &dentry->d_name); - if (new) { - if (active) - dput(active); - return new; - } else { - if (!status) - status = -ENOENT; + if (!oz_mode) { + spin_lock(&sbi->fs_lock); + ino->flags &= ~AUTOFS_INF_PENDING; + spin_unlock(&sbi->fs_lock); } } /* - * If we had a mount failure, return status to user space. - * If the mount succeeded and we used a dentry from the active queue - * return it. + * If this dentry is unhashed, then we shouldn't honour this + * lookup. Returning ENOENT here doesn't do the right thing + * for all system calls, but it should be OK for the operations + * we permit from an autofs. */ - if (status) { - dentry = ERR_PTR(status); - if (active) - dput(active); - return dentry; - } else { + if (!oz_mode && d_unhashed(dentry)) { /* - * Valid successful mount, return active dentry or NULL - * for a new dentry. + * A user space application can (and has done in the past) + * remove and re-create this directory during the callback. + * This can leave us with an unhashed dentry, but a + * successful mount! So we need to perform another + * cached lookup in case the dentry now exists. */ + struct dentry *parent = dentry->d_parent; + struct dentry *new = d_lookup(parent, &dentry->d_name); + if (new != NULL) + dentry = new; + else + dentry = ERR_PTR(-ENOENT); + if (active) - return active; + dput(active); + + return dentry; } + if (active) + return active; + return NULL; } @@ -834,6 +668,8 @@ static int autofs4_dir_symlink(struct inode *dir, if (!ino) return -ENOMEM; + autofs4_del_active(dentry); + ino->size = strlen(symname); cp = kmalloc(ino->size + 1, GFP_KERNEL); if (!cp) { @@ -910,6 +746,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) dir->i_mtime = CURRENT_TIME; spin_lock(&dcache_lock); + autofs4_add_expiring(dentry); spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); @@ -935,6 +772,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) spin_unlock(&dcache_lock); return -ENOTEMPTY; } + autofs4_add_expiring(dentry); spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); @@ -972,6 +810,8 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (!ino) return -ENOMEM; + autofs4_del_active(dentry); + inode = autofs4_get_inode(dir->i_sb, ino); if (!inode) { if (!dentry->d_fsdata) diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 33baf27fac7..34ddda888e6 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) brelse(bh); unacquire_priv_sbp: + kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); unacquire_none: diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 6f60336c662..8f3d9fd8960 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) struct inode *inode; unsigned i, imap_len; struct bfs_sb_info *info; - long ret = -EINVAL; + int ret = -EINVAL; unsigned long i_sblock, i_eblock, i_eoff, s_size; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; + mutex_init(&info->bfs_lock); s->s_fs_info = info; sb_set_blocksize(s, BFS_BSIZE); - bh = sb_bread(s, 0); - if(!bh) + info->si_sbh = sb_bread(s, 0); + if (!info->si_sbh) goto out; - bfs_sb = (struct bfs_super_block *)bh->b_data; + bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data; if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) { if (!silent) printf("No BFS filesystem on %s (magic=%08x)\n", s->s_id, le32_to_cpu(bfs_sb->s_magic)); - goto out; + goto out1; } if (BFS_UNCLEAN(bfs_sb, s) && !silent) printf("%s is unclean, continuing\n", s->s_id); s->s_magic = BFS_MAGIC; - info->si_sbh = bh; if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { printf("Superblock is corrupted\n"); - goto out; + goto out1; } info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / @@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) imap_len = (info->si_lasti / 8) + 1; info->si_imap = kzalloc(imap_len, GFP_KERNEL); if (!info->si_imap) - goto out; + goto out1; for (i = 0; i < BFS_ROOT_INO; i++) set_bit(i, info->si_imap); @@ -398,15 +398,13 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) inode = bfs_iget(s, BFS_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); - kfree(info->si_imap); - goto out; + goto out2; } s->s_root = d_alloc_root(inode); if (!s->s_root) { iput(inode); ret = -ENOMEM; - kfree(info->si_imap); - goto out; + goto out2; } info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS; @@ -419,10 +417,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) bh = sb_bread(s, info->si_blocks - 1); if (!bh) { printf("Last block not available: %lu\n", info->si_blocks - 1); - iput(inode); ret = -EIO; - kfree(info->si_imap); - goto out; + goto out3; } brelse(bh); @@ -459,11 +455,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) printf("Inode 0x%08x corrupted\n", i); brelse(bh); - s->s_root = NULL; - kfree(info->si_imap); - kfree(info); - s->s_fs_info = NULL; - return -EIO; + ret = -EIO; + goto out3; } if (!di->i_ino) { @@ -483,11 +476,17 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) s->s_dirt = 1; } dump_imap("read_super", s); - mutex_init(&info->bfs_lock); return 0; +out3: + dput(s->s_root); + s->s_root = NULL; +out2: + kfree(info->si_imap); +out1: + brelse(info->si_sbh); out: - brelse(bh); + mutex_destroy(&info->bfs_lock); kfree(info); s->s_fs_info = NULL; return ret; diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 346b6940536..fdd39709917 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -264,6 +264,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) #else set_personality(PER_LINUX); #endif + setup_new_exec(bprm); current->mm->end_code = ex.a_text + (current->mm->start_code = N_TXTADDR(ex)); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index edd90c49003..fd5b2ea5d29 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -662,27 +662,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') goto out_free_interp; - /* - * The early SET_PERSONALITY here is so that the lookup - * for the interpreter happens in the namespace of the - * to-be-execed image. SET_PERSONALITY can select an - * alternate root. - * - * However, SET_PERSONALITY is NOT allowed to switch - * this task into the new images's memory mapping - * policy - that is, TASK_SIZE must still evaluate to - * that which is appropriate to the execing application. - * This is because exit_mmap() needs to have TASK_SIZE - * evaluate to the size of the old image. - * - * So if (say) a 64-bit application is execing a 32-bit - * application it is the architecture's responsibility - * to defer changing the value of TASK_SIZE until the - * switch really is going to happen - do this in - * flush_thread(). - akpm - */ - SET_PERSONALITY(loc->elf_ex); - interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) @@ -730,9 +709,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* Verify the interpreter has a valid arch */ if (!elf_check_arch(&loc->interp_elf_ex)) goto out_free_dentry; - } else { - /* Executables without an interpreter also need a personality */ - SET_PERSONALITY(loc->elf_ex); } /* Flush all traces of the currently running executable */ @@ -752,7 +728,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current->flags |= PF_RANDOMIZE; - arch_pick_mmap_layout(current->mm); + + setup_new_exec(bprm); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index c57d9ce5ff7..18d77297ccc 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -321,6 +321,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, set_personality(PER_LINUX_FDPIC); if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) current->personality |= READ_IMPLIES_EXEC; + + setup_new_exec(bprm); + set_binfmt(&elf_fdpic_format); current->mm->start_code = 0; diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index d4a00ea1054..42c6b4a5444 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -519,6 +519,7 @@ static int load_flat_file(struct linux_binprm * bprm, /* OK, This is the point of no return */ set_personality(PER_LINUX_32BIT); + setup_new_exec(bprm); } /* diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index 2a9b5330cc5..cc8560f6c9b 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -227,6 +227,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) /* OK, This is the point of no return */ current->flags &= ~PF_FORKNOEXEC; current->personality = PER_HPUX; + setup_new_exec(bprm); /* Set the task size for HP-UX processes such that * the gateway page is outside the address space. diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 49a34e7f730..a16f29e888c 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr) static inline int use_bip_pool(unsigned int idx) { - if (idx == BIOVEC_NR_POOLS) + if (idx == BIOVEC_MAX_IDX) return 1; return 0; @@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, /* Use mempool if lower order alloc failed or max vecs were requested */ if (bip == NULL) { + idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); if (unlikely(bip == NULL)) { @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) i = 0; while (i < bio_slab_nr) { - struct bio_slab *bslab = &bio_slabs[i]; + bslab = &bio_slabs[i]; if (!bslab->slab && entry == -1) entry = i; @@ -507,10 +507,8 @@ int bio_get_nr_vecs(struct block_device *bdev) int nr_pages; nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (nr_pages > queue_max_phys_segments(q)) - nr_pages = queue_max_phys_segments(q); - if (nr_pages > queue_max_hw_segments(q)) - nr_pages = queue_max_hw_segments(q); + if (nr_pages > queue_max_segments(q)) + nr_pages = queue_max_segments(q); return nr_pages; } @@ -542,13 +540,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page if (page == prev->bv_page && offset == prev->bv_offset + prev->bv_len) { + unsigned int prev_bv_len = prev->bv_len; prev->bv_len += len; if (q->merge_bvec_fn) { struct bvec_merge_data bvm = { + /* prev_bvec is already charged in + bi_size, discharge it in order to + simulate merging updated prev_bvec + as new bvec. */ .bi_bdev = bio->bi_bdev, .bi_sector = bio->bi_sector, - .bi_size = bio->bi_size, + .bi_size = bio->bi_size - prev_bv_len, .bi_rw = bio->bi_rw, }; @@ -570,8 +573,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page * make this too complex. */ - while (bio->bi_phys_segments >= queue_max_phys_segments(q) - || bio->bi_phys_segments >= queue_max_hw_segments(q)) { + while (bio->bi_phys_segments >= queue_max_segments(q)) { if (retried_segments) return 0; diff --git a/fs/block_dev.c b/fs/block_dev.c index 73d6a735b8f..d11d0289f3d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev) if (!sb) goto out; if (sb->s_flags & MS_RDONLY) { - deactivate_locked_super(sb); + sb->s_frozen = SB_FREEZE_TRANS; + up_write(&sb->s_umount); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } @@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) BUG_ON(sb->s_bdev != bdev); down_write(&sb->s_umount); if (sb->s_flags & MS_RDONLY) - goto out_deactivate; + goto out_unfrozen; if (sb->s_op->unfreeze_fs) { error = sb->s_op->unfreeze_fs(sb); @@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) } } +out_unfrozen: sb->s_frozen = SB_UNFROZEN; smp_wmb(); wake_up(&sb->s_wait_unfrozen); -out_deactivate: if (sb) deactivate_locked_super(sb); out_unlock: diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 2e9e69987a8..6df6d6ed74f 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -112,12 +112,14 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans, switch (type) { case ACL_TYPE_ACCESS: mode = inode->i_mode; - ret = posix_acl_equiv_mode(acl, &mode); - if (ret < 0) - return ret; - ret = 0; - inode->i_mode = mode; name = POSIX_ACL_XATTR_ACCESS; + if (acl) { + ret = posix_acl_equiv_mode(acl, &mode); + if (ret < 0) + return ret; + inode->i_mode = mode; + } + ret = 0; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) @@ -242,6 +244,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans, ACL_TYPE_ACCESS); } } + posix_acl_release(clone); } failed: posix_acl_release(acl); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9f806dd04c2..2aa8ec6a098 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1161,6 +1161,7 @@ struct btrfs_root { #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) #define BTRFS_MOUNT_NOSSD (1 << 9) #define BTRFS_MOUNT_DISCARD (1 << 10) +#define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 009e3bd18f2..2b59201b955 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!(sb->s_flags & MS_RDONLY)) { ret = btrfs_recover_relocation(tree_root); - BUG_ON(ret); + if (ret < 0) { + printk(KERN_WARNING + "btrfs: failed to recover relocation\n"); + err = -EINVAL; + goto fail_trans_kthread; + } } location.objectid = BTRFS_FS_TREE_OBJECTID; @@ -1993,6 +1998,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!fs_info->fs_root) goto fail_trans_kthread; + if (!(sb->s_flags & MS_RDONLY)) { + down_read(&fs_info->cleanup_work_sem); + btrfs_orphan_cleanup(fs_info->fs_root); + up_read(&fs_info->cleanup_work_sem); + } + return tree_root; fail_trans_kthread: diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 56e50137d0e..559f72489b3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) return (cache->flags & bits) == bits; } +void btrfs_get_block_group(struct btrfs_block_group_cache *cache) +{ + atomic_inc(&cache->count); +} + +void btrfs_put_block_group(struct btrfs_block_group_cache *cache) +{ + if (atomic_dec_and_test(&cache->count)) + kfree(cache); +} + /* * this adds the block group to the fs_info rb tree for the block group * cache @@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, } } if (ret) - atomic_inc(&ret->count); + btrfs_get_block_group(ret); spin_unlock(&info->block_group_cache_lock); return ret; @@ -407,6 +418,8 @@ err: put_caching_control(caching_ctl); atomic_dec(&block_group->space_info->caching_threads); + btrfs_put_block_group(block_group); + return 0; } @@ -447,6 +460,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache) up_write(&fs_info->extent_commit_sem); atomic_inc(&cache->space_info->caching_threads); + btrfs_get_block_group(cache); tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", cache->key.objectid); @@ -486,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group( return cache; } -void btrfs_put_block_group(struct btrfs_block_group_cache *cache) -{ - if (atomic_dec_and_test(&cache->count)) - kfree(cache); -} - static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, u64 flags) { @@ -2582,7 +2590,7 @@ next_block_group(struct btrfs_root *root, if (node) { cache = rb_entry(node, struct btrfs_block_group_cache, cache_node); - atomic_inc(&cache->count); + btrfs_get_block_group(cache); } else cache = NULL; spin_unlock(&root->fs_info->block_group_cache_lock); @@ -4227,7 +4235,7 @@ search: u64 offset; int cached; - atomic_inc(&block_group->count); + btrfs_get_block_group(block_group); search_start = block_group->key.objectid; have_block_group: @@ -4315,7 +4323,7 @@ have_block_group: btrfs_put_block_group(block_group); block_group = last_ptr->block_group; - atomic_inc(&block_group->count); + btrfs_get_block_group(block_group); spin_unlock(&last_ptr->lock); spin_unlock(&last_ptr->refill_lock); @@ -5394,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, int ret; while (level >= 0) { - if (path->slots[level] >= - btrfs_header_nritems(path->nodes[level])) - break; - ret = walk_down_proc(trans, root, path, wc, lookup_info); if (ret > 0) break; @@ -5405,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans, if (level == 0) break; + if (path->slots[level] >= + btrfs_header_nritems(path->nodes[level])) + break; + ret = do_walk_down(trans, root, path, wc, &lookup_info); if (ret > 0) { path->slots[level]++; @@ -7395,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) wait_block_group_cache_done(block_group); btrfs_remove_free_space_cache(block_group); - - WARN_ON(atomic_read(&block_group->count) != 1); - kfree(block_group); + btrfs_put_block_group(block_group); spin_lock(&info->block_group_cache_lock); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 96577e8bf9f..b177ed31961 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, spin_unlock(&tree->buffer_lock); goto free_eb; } - spin_unlock(&tree->buffer_lock); - /* add one reference for the tree */ atomic_inc(&eb->refs); + spin_unlock(&tree->buffer_lock); return eb; free_eb: diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 46bea0f4dc7..428fcac45f9 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -155,20 +155,6 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, return NULL; } -/* - * look for an offset in the tree, and if it can't be found, return - * the first offset we can find smaller than 'offset'. - */ -static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) -{ - struct rb_node *prev; - struct rb_node *ret; - ret = __tree_search(root, offset, &prev, NULL); - if (!ret) - return prev; - return ret; -} - /* check to see if two extent_map structs are adjacent and safe to merge */ static int mergable_maps(struct extent_map *prev, struct extent_map *next) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index feaa13b105d..6ed434ac037 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -506,7 +506,8 @@ next_slot: } static int extent_mergeable(struct extent_buffer *leaf, int slot, - u64 objectid, u64 bytenr, u64 *start, u64 *end) + u64 objectid, u64 bytenr, u64 orig_offset, + u64 *start, u64 *end) { struct btrfs_file_extent_item *fi; struct btrfs_key key; @@ -522,6 +523,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot, fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || + btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset || btrfs_file_extent_compression(leaf, fi) || btrfs_file_extent_encryption(leaf, fi) || btrfs_file_extent_other_encoding(leaf, fi)) @@ -561,6 +563,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, u64 split; int del_nr = 0; int del_slot = 0; + int recow; int ret; btrfs_drop_extent_cache(inode, start, end - 1, 0); @@ -568,6 +571,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); BUG_ON(!path); again: + recow = 0; split = start; key.objectid = inode->i_ino; key.type = BTRFS_EXTENT_DATA_KEY; @@ -591,12 +595,60 @@ again: bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi); + memcpy(&new_key, &key, sizeof(new_key)); + + if (start == key.offset && end < extent_end) { + other_start = 0; + other_end = start; + if (extent_mergeable(leaf, path->slots[0] - 1, + inode->i_ino, bytenr, orig_offset, + &other_start, &other_end)) { + new_key.offset = end; + btrfs_set_item_key_safe(trans, root, path, &new_key); + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_num_bytes(leaf, fi, + extent_end - end); + btrfs_set_file_extent_offset(leaf, fi, + end - orig_offset); + fi = btrfs_item_ptr(leaf, path->slots[0] - 1, + struct btrfs_file_extent_item); + btrfs_set_file_extent_num_bytes(leaf, fi, + end - other_start); + btrfs_mark_buffer_dirty(leaf); + goto out; + } + } + + if (start > key.offset && end == extent_end) { + other_start = end; + other_end = 0; + if (extent_mergeable(leaf, path->slots[0] + 1, + inode->i_ino, bytenr, orig_offset, + &other_start, &other_end)) { + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_num_bytes(leaf, fi, + start - key.offset); + path->slots[0]++; + new_key.offset = start; + btrfs_set_item_key_safe(trans, root, path, &new_key); + + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_num_bytes(leaf, fi, + other_end - start); + btrfs_set_file_extent_offset(leaf, fi, + start - orig_offset); + btrfs_mark_buffer_dirty(leaf); + goto out; + } + } while (start > key.offset || end < extent_end) { if (key.offset == start) split = end; - memcpy(&new_key, &key, sizeof(new_key)); new_key.offset = split; ret = btrfs_duplicate_item(trans, root, path, &new_key); if (ret == -EAGAIN) { @@ -631,15 +683,18 @@ again: path->slots[0]--; extent_end = end; } + recow = 1; } - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - other_start = end; other_end = 0; - if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, - bytenr, &other_start, &other_end)) { + if (extent_mergeable(leaf, path->slots[0] + 1, + inode->i_ino, bytenr, orig_offset, + &other_start, &other_end)) { + if (recow) { + btrfs_release_path(root, path); + goto again; + } extent_end = other_end; del_slot = path->slots[0] + 1; del_nr++; @@ -650,8 +705,13 @@ again: } other_start = 0; other_end = start; - if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, - bytenr, &other_start, &other_end)) { + if (extent_mergeable(leaf, path->slots[0] - 1, + inode->i_ino, bytenr, orig_offset, + &other_start, &other_end)) { + if (recow) { + btrfs_release_path(root, path); + goto again; + } key.offset = other_start; del_slot = path->slots[0]; del_nr++; @@ -661,21 +721,23 @@ again: BUG_ON(ret); } if (del_nr == 0) { + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); btrfs_mark_buffer_dirty(leaf); - goto out; - } - - fi = btrfs_item_ptr(leaf, del_slot - 1, - struct btrfs_file_extent_item); - btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_num_bytes(leaf, fi, - extent_end - key.offset); - btrfs_mark_buffer_dirty(leaf); + } else { + fi = btrfs_item_ptr(leaf, del_slot - 1, + struct btrfs_file_extent_item); + btrfs_set_file_extent_type(leaf, fi, + BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_num_bytes(leaf, fi, + extent_end - key.offset); + btrfs_mark_buffer_dirty(leaf); - ret = btrfs_del_items(trans, root, path, del_slot, del_nr); - BUG_ON(ret); + ret = btrfs_del_items(trans, root, path, del_slot, del_nr); + BUG_ON(ret); + } out: btrfs_free_path(path); return 0; @@ -1073,7 +1135,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } mutex_lock(&dentry->d_inode->i_mutex); out: - return ret > 0 ? EIO : ret; + return ret > 0 ? -EIO : ret; } static const struct vm_operations_struct btrfs_file_vm_ops = { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5440bab2363..4deb280f896 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -483,7 +483,8 @@ again: nr_pages_ret = 0; /* flag the file so we don't compress in the future */ - BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; + if (!btrfs_test_opt(root, FORCE_COMPRESS)) + BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; } if (will_compress) { *num_added += 1; @@ -1680,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, * before we start the transaction. It limits the amount of btree * reads required while inside the transaction. */ -static noinline void reada_csum(struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_ordered_extent *ordered_extent) -{ - struct btrfs_ordered_sum *sum; - u64 bytenr; - - sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum, - list); - bytenr = sum->sums[0].bytenr; - - /* - * we don't care about the results, the point of this search is - * just to get the btree leaves into ram - */ - btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0); -} - /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1708,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct btrfs_path *path; int compressed = 0; int ret; @@ -1716,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (!ret) return 0; - /* - * before we join the transaction, try to do some of our IO. - * This will limit the amount of IO that we have to do with - * the transaction running. We're unlikely to need to do any - * IO if the file extents are new, the disk_i_size checks - * covers the most common case. - */ - if (start < BTRFS_I(inode)->disk_i_size) { - path = btrfs_alloc_path(); - if (path) { - ret = btrfs_lookup_file_extent(NULL, root, path, - inode->i_ino, - start, 0); - ordered_extent = btrfs_lookup_ordered_extent(inode, - start); - if (!list_empty(&ordered_extent->list)) { - btrfs_release_path(root, path); - reada_csum(root, path, ordered_extent); - } - btrfs_free_path(path); - } - } - - if (!ordered_extent) - ordered_extent = btrfs_lookup_ordered_extent(inode, start); + ordered_extent = btrfs_lookup_ordered_extent(inode, start); BUG_ON(!ordered_extent); + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); @@ -3995,7 +3954,11 @@ skip: /* Reached end of directory/root. Bump pos past the last item. */ if (key_type == BTRFS_DIR_INDEX_KEY) - filp->f_pos = INT_LIMIT(off_t); + /* + * 32-bit glibc will use getdents64, but then strtol - + * so the last number we can serve is this. + */ + filp->f_pos = 0x7fffffff; else filp->f_pos++; nopos: @@ -5789,7 +5752,7 @@ out_fail: } static int prealloc_file_range(struct inode *inode, u64 start, u64 end, - u64 alloc_hint, int mode) + u64 alloc_hint, int mode, loff_t actual_len) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5798,6 +5761,7 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, u64 cur_offset = start; u64 num_bytes = end - start; int ret = 0; + u64 i_size; while (num_bytes > 0) { alloc_size = min(num_bytes, root->fs_info->max_extent); @@ -5835,9 +5799,15 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - cur_offset > inode->i_size) { - i_size_write(inode, cur_offset); - btrfs_ordered_update_i_size(inode, cur_offset, NULL); + (actual_len > inode->i_size) && + (cur_offset > inode->i_size)) { + + if (cur_offset > actual_len) + i_size = actual_len; + else + i_size = cur_offset; + i_size_write(inode, i_size); + btrfs_ordered_update_i_size(inode, i_size, NULL); } ret = btrfs_update_inode(trans, root, inode); @@ -5930,7 +5900,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { ret = prealloc_file_range(inode, cur_offset, last_byte, - alloc_hint, mode); + alloc_hint, mode, offset+len); if (ret < 0) { free_extent_map(em); break; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b10a49d4bc6..5c2a9e78a94 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -626,6 +626,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, if (ordered) offset = entry_end(ordered); + else + offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); mutex_lock(&tree->mutex); disk_i_size = BTRFS_I(inode)->disk_i_size; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index a9728680eca..ab7ab531874 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3281,8 +3281,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) return -ENOMEM; path = btrfs_alloc_path(); - if (!path) + if (!path) { + kfree(cluster); return -ENOMEM; + } rc->extents_found = 0; rc->extents_skipped = 0; @@ -3762,7 +3764,8 @@ out: BTRFS_DATA_RELOC_TREE_OBJECTID); if (IS_ERR(fs_root)) err = PTR_ERR(fs_root); - btrfs_orphan_cleanup(fs_root); + else + btrfs_orphan_cleanup(fs_root); } return err; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3f9b45704fc..8a1ea6e6457 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -66,7 +66,8 @@ enum { Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, - Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit, + Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, + Opt_flushoncommit, Opt_discard, Opt_err, }; @@ -82,6 +83,7 @@ static match_table_t tokens = { {Opt_alloc_start, "alloc_start=%s"}, {Opt_thread_pool, "thread_pool=%d"}, {Opt_compress, "compress"}, + {Opt_compress_force, "compress-force"}, {Opt_ssd, "ssd"}, {Opt_ssd_spread, "ssd_spread"}, {Opt_nossd, "nossd"}, @@ -173,6 +175,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) printk(KERN_INFO "btrfs: use compression\n"); btrfs_set_opt(info->mount_opt, COMPRESS); break; + case Opt_compress_force: + printk(KERN_INFO "btrfs: forcing compression\n"); + btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); + btrfs_set_opt(info->mount_opt, COMPRESS); + break; case Opt_ssd: printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); btrfs_set_opt(info->mount_opt, SSD); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 198cff28766..41ecbb2347f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1135,7 +1135,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) root->fs_info->avail_metadata_alloc_bits; if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && - root->fs_info->fs_devices->rw_devices <= 4) { + root->fs_info->fs_devices->num_devices <= 4) { printk(KERN_ERR "btrfs: unable to go below four devices " "on raid10\n"); ret = -EINVAL; @@ -1143,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && - root->fs_info->fs_devices->rw_devices <= 2) { + root->fs_info->fs_devices->num_devices <= 2) { printk(KERN_ERR "btrfs: unable to go below two " "devices on raid1\n"); ret = -EINVAL; @@ -1434,8 +1434,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) return -EINVAL; bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); - if (!bdev) - return -EIO; + if (IS_ERR(bdev)) + return PTR_ERR(bdev); if (root->fs_info->fs_devices->seeding) { seeding_dev = 1; @@ -2538,6 +2538,11 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) if (!em) return 1; + if (btrfs_test_opt(root, DEGRADED)) { + free_extent_map(em); + return 0; + } + map = (struct map_lookup *)em->bdev; for (i = 0; i < map->num_stripes; i++) { if (!map->stripes[i].dev->writeable) { @@ -2649,8 +2654,10 @@ again: em = lookup_extent_mapping(em_tree, logical, *length); read_unlock(&em_tree->lock); - if (!em && unplug_page) + if (!em && unplug_page) { + kfree(multi); return 0; + } if (!em) { printk(KERN_CRIT "unable to find logical %llu len %llu\n", diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 14ac4806e29..eeb4986ea7d 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, dir = dget_parent(object->dentry); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); - ret = cachefiles_bury_object(cache, dir, object->dentry); + + /* we need to check that our parent is _still_ our parent - it may have + * been renamed */ + if (dir == object->dentry->d_parent) { + ret = cachefiles_bury_object(cache, dir, object->dentry); + } else { + /* it got moved, presumably by cachefilesd culling it, so it's + * no longer in the key path and we can ignore it */ + mutex_unlock(&dir->d_inode->i_mutex); + ret = 0; + } dput(dir); _leave(" = %d", ret); diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 7b2600b380d..bc0025cdd1c 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -1,3 +1,8 @@ +Version 1.62 +------------ +Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened +to more strictly handle corrupt frames. + Version 1.61 ------------ Fix append problem to Samba servers (files opened with O_APPEND could diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index fea9e898c4b..b44ce0a0711 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -269,7 +269,7 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd, int err; mntget(newmnt); - err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags, mntlist); + err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags | MNT_SHRINKABLE, mntlist); switch (err) { case 0: path_put(&nd->path); @@ -371,7 +371,6 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) if (IS_ERR(mnt)) goto out_err; - nd->path.mnt->mnt_flags |= MNT_SHRINKABLE; rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list); out: diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index ac2b24c192f..78c1b86d55f 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.61" +#define CIFS_VERSION "1.62" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 4b35f7ec058..a1c817eb291 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -149,6 +149,7 @@ struct TCP_Server_Info { bool svlocal:1; /* local server or remote */ bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ + bool tcp_nodelay; atomic_t inFlight; /* number of requests on the wire to server */ #ifdef CONFIG_CIFS_STATS2 atomic_t inSend; /* requests trying to send */ @@ -204,7 +205,7 @@ struct cifsUidInfo { struct cifsSesInfo { struct list_head smb_ses_list; struct list_head tcon_list; - struct semaphore sesSem; + struct mutex session_mutex; #if 0 struct cifsUidInfo *uidInfo; /* pointer to user info */ #endif diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 3877737f96a..14d036d8db1 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -415,10 +415,10 @@ struct smb_hdr { __u8 WordCount; } __attribute__((packed)); /* given a pointer to an smb_hdr retrieve the value of byte count */ -#define BCC(smb_var) (*(__u16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount))) -#define BCC_LE(smb_var) (*(__le16 *)((char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount))) +#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount))) +#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount))) /* given a pointer to an smb_hdr retrieve the pointer to the byte area */ -#define pByteArea(smb_var) ((unsigned char *)smb_var + sizeof(struct smb_hdr) + (2 * smb_var->WordCount) + 2) +#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2) /* * Computer Name Length (since Netbios name was length 16 with last byte 0x20) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5646727e33f..88e2bc44ac5 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -363,13 +363,10 @@ extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, __u32 filter, struct file *file, int multishot, const struct nls_table *nls_codepage); extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, - const unsigned char *searchName, char *EAData, + const unsigned char *searchName, + const unsigned char *ea_name, char *EAData, size_t bufsize, const struct nls_table *nls_codepage, int remap_special_chars); -extern ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon, - const unsigned char *searchName, const unsigned char *ea_name, - unsigned char *ea_value, size_t buf_size, - const struct nls_table *nls_codepage, int remap_special_chars); extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName, const char *ea_name, const void *ea_value, const __u16 ea_value_len, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 941441d3e38..9d17df3e076 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -170,19 +170,19 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ - down(&ses->sesSem); + mutex_lock(&ses->session_mutex); if (ses->need_reconnect) rc = cifs_setup_session(0, ses, nls_codepage); /* do we need to reconnect tcon? */ if (rc || !tcon->need_reconnect) { - up(&ses->sesSem); + mutex_unlock(&ses->session_mutex); goto out; } mark_open_files_invalid(tcon); rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage); - up(&ses->sesSem); + mutex_unlock(&ses->session_mutex); cFYI(1, ("reconnect tcon rc = %d", rc)); if (rc) @@ -700,13 +700,13 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) if (!ses || !ses->server) return -EIO; - down(&ses->sesSem); + mutex_lock(&ses->session_mutex); if (ses->need_reconnect) goto session_already_dead; /* no need to send SMBlogoff if uid already closed due to reconnect */ rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); if (rc) { - up(&ses->sesSem); + mutex_unlock(&ses->session_mutex); return rc; } @@ -721,7 +721,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) pSMB->AndXCommand = 0xFF; rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); session_already_dead: - up(&ses->sesSem); + mutex_unlock(&ses->session_mutex); /* if session dead then we do not need to do ulogoff, since server closed smb session, no sense reporting @@ -5269,22 +5269,34 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, cifs_buf_release(pSMB); return rc; } + #ifdef CONFIG_CIFS_XATTR +/* + * Do a path-based QUERY_ALL_EAS call and parse the result. This is a common + * function used by listxattr and getxattr type calls. When ea_name is set, + * it looks for that attribute name and stuffs that value into the EAData + * buffer. When ea_name is NULL, it stuffs a list of attribute names into the + * buffer. In both cases, the return value is either the length of the + * resulting data or a negative error code. If EAData is a NULL pointer then + * the data isn't copied to it, but the length is returned. + */ ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, - const unsigned char *searchName, - char *EAData, size_t buf_size, - const struct nls_table *nls_codepage, int remap) + const unsigned char *searchName, const unsigned char *ea_name, + char *EAData, size_t buf_size, + const struct nls_table *nls_codepage, int remap) { /* BB assumes one setup word */ TRANSACTION2_QPI_REQ *pSMB = NULL; TRANSACTION2_QPI_RSP *pSMBr = NULL; int rc = 0; int bytes_returned; - int name_len; + int list_len; + struct fealist *ea_response_data; struct fea *temp_fea; char *temp_ptr; - __u16 params, byte_count; + char *end_of_smb; + __u16 params, byte_count, data_offset; cFYI(1, ("In Query All EAs path %s", searchName)); QAllEAsRetry: @@ -5294,22 +5306,22 @@ QAllEAsRetry: return rc; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { - name_len = + list_len = cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, PATH_MAX, nls_codepage, remap); - name_len++; /* trailing null */ - name_len *= 2; + list_len++; /* trailing null */ + list_len *= 2; } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(searchName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, searchName, name_len); + list_len = strnlen(searchName, PATH_MAX); + list_len++; /* trailing null */ + strncpy(pSMB->FileName, searchName, list_len); } - params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; + params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; pSMB->TotalDataCount = 0; pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ - pSMB->MaxDataCount = cpu_to_le16(4000); + pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize); pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; pSMB->Flags = 0; @@ -5334,237 +5346,117 @@ QAllEAsRetry: (struct smb_hdr *) pSMBr, &bytes_returned, 0); if (rc) { cFYI(1, ("Send error in QueryAllEAs = %d", rc)); - } else { /* decode response */ - rc = validate_t2((struct smb_t2_rsp *)pSMBr); + goto QAllEAsOut; + } - /* BB also check enough total bytes returned */ - /* BB we need to improve the validity checking - of these trans2 responses */ - if (rc || (pSMBr->ByteCount < 4)) - rc = -EIO; /* bad smb */ - /* else if (pFindData){ - memcpy((char *) pFindData, - (char *) &pSMBr->hdr.Protocol + - data_offset, kl); - }*/ else { - /* check that length of list is not more than bcc */ - /* check that each entry does not go beyond length - of list */ - /* check that each element of each entry does not - go beyond end of list */ - __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); - struct fealist *ea_response_data; - rc = 0; - /* validate_trans2_offsets() */ - /* BB check if start of smb + data_offset > &bcc+ bcc */ - ea_response_data = (struct fealist *) - (((char *) &pSMBr->hdr.Protocol) + - data_offset); - name_len = le32_to_cpu(ea_response_data->list_len); - cFYI(1, ("ea length %d", name_len)); - if (name_len <= 8) { - /* returned EA size zeroed at top of function */ - cFYI(1, ("empty EA list returned from server")); - } else { - /* account for ea list len */ - name_len -= 4; - temp_fea = ea_response_data->list; - temp_ptr = (char *)temp_fea; - while (name_len > 0) { - __u16 value_len; - name_len -= 4; - temp_ptr += 4; - rc += temp_fea->name_len; - /* account for prefix user. and trailing null */ - rc = rc + 5 + 1; - if (rc < (int)buf_size) { - memcpy(EAData, "user.", 5); - EAData += 5; - memcpy(EAData, temp_ptr, - temp_fea->name_len); - EAData += temp_fea->name_len; - /* null terminate name */ - *EAData = 0; - EAData = EAData + 1; - } else if (buf_size == 0) { - /* skip copy - calc size only */ - } else { - /* stop before overrun buffer */ - rc = -ERANGE; - break; - } - name_len -= temp_fea->name_len; - temp_ptr += temp_fea->name_len; - /* account for trailing null */ - name_len--; - temp_ptr++; - value_len = - le16_to_cpu(temp_fea->value_len); - name_len -= value_len; - temp_ptr += value_len; - /* BB check that temp_ptr is still - within the SMB BB*/ - - /* no trailing null to account for - in value len */ - /* go on to next EA */ - temp_fea = (struct fea *)temp_ptr; - } - } - } + + /* BB also check enough total bytes returned */ + /* BB we need to improve the validity checking + of these trans2 responses */ + + rc = validate_t2((struct smb_t2_rsp *)pSMBr); + if (rc || (pSMBr->ByteCount < 4)) { + rc = -EIO; /* bad smb */ + goto QAllEAsOut; } - cifs_buf_release(pSMB); - if (rc == -EAGAIN) - goto QAllEAsRetry; - return (ssize_t)rc; -} + /* check that length of list is not more than bcc */ + /* check that each entry does not go beyond length + of list */ + /* check that each element of each entry does not + go beyond end of list */ + /* validate_trans2_offsets() */ + /* BB check if start of smb + data_offset > &bcc+ bcc */ -ssize_t CIFSSMBQueryEA(const int xid, struct cifsTconInfo *tcon, - const unsigned char *searchName, const unsigned char *ea_name, - unsigned char *ea_value, size_t buf_size, - const struct nls_table *nls_codepage, int remap) -{ - TRANSACTION2_QPI_REQ *pSMB = NULL; - TRANSACTION2_QPI_RSP *pSMBr = NULL; - int rc = 0; - int bytes_returned; - int name_len; - struct fea *temp_fea; - char *temp_ptr; - __u16 params, byte_count; + data_offset = le16_to_cpu(pSMBr->t2.DataOffset); + ea_response_data = (struct fealist *) + (((char *) &pSMBr->hdr.Protocol) + data_offset); - cFYI(1, ("In Query EA path %s", searchName)); -QEARetry: - rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, - (void **) &pSMBr); - if (rc) - return rc; + list_len = le32_to_cpu(ea_response_data->list_len); + cFYI(1, ("ea length %d", list_len)); + if (list_len <= 8) { + cFYI(1, ("empty EA list returned from server")); + goto QAllEAsOut; + } - if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { - name_len = - cifsConvertToUCS((__le16 *) pSMB->FileName, searchName, - PATH_MAX, nls_codepage, remap); - name_len++; /* trailing null */ - name_len *= 2; - } else { /* BB improve the check for buffer overruns BB */ - name_len = strnlen(searchName, PATH_MAX); - name_len++; /* trailing null */ - strncpy(pSMB->FileName, searchName, name_len); + /* make sure list_len doesn't go past end of SMB */ + end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr); + if ((char *)ea_response_data + list_len > end_of_smb) { + cFYI(1, ("EA list appears to go beyond SMB")); + rc = -EIO; + goto QAllEAsOut; } - params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; - pSMB->TotalDataCount = 0; - pSMB->MaxParameterCount = cpu_to_le16(2); - /* BB find exact max SMB PDU from sess structure BB */ - pSMB->MaxDataCount = cpu_to_le16(4000); - pSMB->MaxSetupCount = 0; - pSMB->Reserved = 0; - pSMB->Flags = 0; - pSMB->Timeout = 0; - pSMB->Reserved2 = 0; - pSMB->ParameterOffset = cpu_to_le16(offsetof( - struct smb_com_transaction2_qpi_req, InformationLevel) - 4); - pSMB->DataCount = 0; - pSMB->DataOffset = 0; - pSMB->SetupCount = 1; - pSMB->Reserved3 = 0; - pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_PATH_INFORMATION); - byte_count = params + 1 /* pad */ ; - pSMB->TotalParameterCount = cpu_to_le16(params); - pSMB->ParameterCount = pSMB->TotalParameterCount; - pSMB->InformationLevel = cpu_to_le16(SMB_INFO_QUERY_ALL_EAS); - pSMB->Reserved4 = 0; - pSMB->hdr.smb_buf_length += byte_count; - pSMB->ByteCount = cpu_to_le16(byte_count); + /* account for ea list len */ + list_len -= 4; + temp_fea = ea_response_data->list; + temp_ptr = (char *)temp_fea; + while (list_len > 0) { + unsigned int name_len; + __u16 value_len; + + list_len -= 4; + temp_ptr += 4; + /* make sure we can read name_len and value_len */ + if (list_len < 0) { + cFYI(1, ("EA entry goes beyond length of list")); + rc = -EIO; + goto QAllEAsOut; + } - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, - (struct smb_hdr *) pSMBr, &bytes_returned, 0); - if (rc) { - cFYI(1, ("Send error in Query EA = %d", rc)); - } else { /* decode response */ - rc = validate_t2((struct smb_t2_rsp *)pSMBr); + name_len = temp_fea->name_len; + value_len = le16_to_cpu(temp_fea->value_len); + list_len -= name_len + 1 + value_len; + if (list_len < 0) { + cFYI(1, ("EA entry goes beyond length of list")); + rc = -EIO; + goto QAllEAsOut; + } - /* BB also check enough total bytes returned */ - /* BB we need to improve the validity checking - of these trans2 responses */ - if (rc || (pSMBr->ByteCount < 4)) - rc = -EIO; /* bad smb */ - /* else if (pFindData){ - memcpy((char *) pFindData, - (char *) &pSMBr->hdr.Protocol + - data_offset, kl); - }*/ else { - /* check that length of list is not more than bcc */ - /* check that each entry does not go beyond length - of list */ - /* check that each element of each entry does not - go beyond end of list */ - __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); - struct fealist *ea_response_data; - rc = -ENODATA; - /* validate_trans2_offsets() */ - /* BB check if start of smb + data_offset > &bcc+ bcc*/ - ea_response_data = (struct fealist *) - (((char *) &pSMBr->hdr.Protocol) + - data_offset); - name_len = le32_to_cpu(ea_response_data->list_len); - cFYI(1, ("ea length %d", name_len)); - if (name_len <= 8) { - /* returned EA size zeroed at top of function */ - cFYI(1, ("empty EA list returned from server")); - } else { - /* account for ea list len */ - name_len -= 4; - temp_fea = ea_response_data->list; - temp_ptr = (char *)temp_fea; - /* loop through checking if we have a matching - name and then return the associated value */ - while (name_len > 0) { - __u16 value_len; - name_len -= 4; - temp_ptr += 4; - value_len = - le16_to_cpu(temp_fea->value_len); - /* BB validate that value_len falls within SMB, - even though maximum for name_len is 255 */ - if (memcmp(temp_fea->name, ea_name, - temp_fea->name_len) == 0) { - /* found a match */ - rc = value_len; - /* account for prefix user. and trailing null */ - if (rc <= (int)buf_size) { - memcpy(ea_value, - temp_fea->name+temp_fea->name_len+1, - rc); - /* ea values, unlike ea - names, are not null - terminated */ - } else if (buf_size == 0) { - /* skip copy - calc size only */ - } else { - /* stop before overrun buffer */ - rc = -ERANGE; - } - break; - } - name_len -= temp_fea->name_len; - temp_ptr += temp_fea->name_len; - /* account for trailing null */ - name_len--; - temp_ptr++; - name_len -= value_len; - temp_ptr += value_len; - /* No trailing null to account for in - value_len. Go on to next EA */ - temp_fea = (struct fea *)temp_ptr; + if (ea_name) { + if (strncmp(ea_name, temp_ptr, name_len) == 0) { + temp_ptr += name_len + 1; + rc = value_len; + if (buf_size == 0) + goto QAllEAsOut; + if ((size_t)value_len > buf_size) { + rc = -ERANGE; + goto QAllEAsOut; } + memcpy(EAData, temp_ptr, value_len); + goto QAllEAsOut; + } + } else { + /* account for prefix user. and trailing null */ + rc += (5 + 1 + name_len); + if (rc < (int) buf_size) { + memcpy(EAData, "user.", 5); + EAData += 5; + memcpy(EAData, temp_ptr, name_len); + EAData += name_len; + /* null terminate name */ + *EAData = 0; + ++EAData; + } else if (buf_size == 0) { + /* skip copy - calc size only */ + } else { + /* stop before overrun buffer */ + rc = -ERANGE; + break; } } + temp_ptr += name_len + 1 + value_len; + temp_fea = (struct fea *)temp_ptr; } + + /* didn't find the named attribute */ + if (ea_name) + rc = -ENODATA; + +QAllEAsOut: cifs_buf_release(pSMB); if (rc == -EAGAIN) - goto QEARetry; + goto QAllEAsRetry; return (ssize_t)rc; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3bbcaa716b3..45eb6cba793 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -98,7 +98,7 @@ struct smb_vol { bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ unsigned int rsize; unsigned int wsize; - unsigned int sockopt; + bool sockopt_tcp_nodelay:1; unsigned short int port; char *prepath; }; @@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname, simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "sockopt", 5) == 0) { - if (value && *value) { - vol->sockopt = - simple_strtoul(value, &value, 0); + if (!value || !*value) { + cERROR(1, ("no socket option specified")); + continue; + } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { + vol->sockopt_tcp_nodelay = 1; } } else if (strnicmp(data, "netbiosname", 4) == 0) { if (!value || !*value || (*value == ' ')) { @@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; + tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1764,6 +1767,7 @@ static int ipv4_connect(struct TCP_Server_Info *server) { int rc = 0; + int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server) socket->sk->sk_rcvbuf = 140 * 1024; } + if (server->tcp_nodelay) { + val = 1; + rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + if (rc) + cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); + } + cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); @@ -1916,6 +1928,7 @@ static int ipv6_connect(struct TCP_Server_Info *server) { int rc = 0; + int val; bool connected = false; __be16 orig_port = 0; struct socket *socket = server->ssocket; @@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server) */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; + + if (server->tcp_nodelay) { + val = 1; + rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + if (rc) + cFYI(1, ("set TCP_NODELAY socket option error %d", rc)); + } + server->ssocket = socket; return rc; @@ -2366,13 +2388,13 @@ try_mount_again: */ cifs_put_tcp_session(srvTcp); - down(&pSesInfo->sesSem); + mutex_lock(&pSesInfo->session_mutex); if (pSesInfo->need_reconnect) { cFYI(1, ("Session needs reconnect")); rc = cifs_setup_session(xid, pSesInfo, cifs_sb->local_nls); } - up(&pSesInfo->sesSem); + mutex_unlock(&pSesInfo->session_mutex); } else if (!rc) { cFYI(1, ("Existing smb sess not found")); pSesInfo = sesInfoAlloc(); @@ -2415,12 +2437,12 @@ try_mount_again: } pSesInfo->linux_uid = volume_info->linux_uid; pSesInfo->overrideSecFlg = volume_info->secFlg; - down(&pSesInfo->sesSem); + mutex_lock(&pSesInfo->session_mutex); /* BB FIXME need to pass vol->secFlgs BB */ rc = cifs_setup_session(xid, pSesInfo, cifs_sb->local_nls); - up(&pSesInfo->sesSem); + mutex_unlock(&pSesInfo->session_mutex); } /* search for existing tcon to this server share */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 057e1dae12a..3d8f8a96f5a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2289,9 +2289,9 @@ cifs_oplock_break(struct slow_work *work) if (inode && S_ISREG(inode->i_mode)) { #ifdef CONFIG_CIFS_EXPERIMENTAL if (cinode->clientCanCacheAll == 0) - break_lease(inode, FMODE_READ); + break_lease(inode, O_RDONLY); else if (cinode->clientCanCacheRead == 0) - break_lease(inode, FMODE_WRITE); + break_lease(inode, O_WRONLY); #endif rc = filemap_fdatawrite(inode->i_mapping); if (cinode->clientCanCacheRead == 0) { diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index cf18ee76559..8bdbc818164 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -111,6 +111,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; + cifs_i->server_eof = fattr->cf_eof; /* * Can't safely change the file size here if the client is writing to * it due to potential races. @@ -366,7 +367,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, char ea_value[4]; __u32 mode; - rc = CIFSSMBQueryEA(xid, cifs_sb->tcon, path, "SETFILEBITS", + rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS", ea_value, 4 /* size of buf */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); @@ -1762,8 +1763,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) CIFS_MOUNT_MAP_SPECIAL_CHR); } - if (!rc) + if (!rc) { rc = inode_setattr(inode, attrs); + + /* force revalidate when any of these times are set since some + of the fs types (eg ext3, fat) do not have fine enough + time granularity to match protocol, and we do not have a + a way (yet) to query the server fs's time granularity (and + whether it rounds times down). + */ + if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))) + cifsInode->time = 0; + } out: kfree(args); kfree(full_path); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index d27d4ec6579..d1474996a81 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -79,7 +79,7 @@ sesInfoAlloc(void) ++ret_buf->ses_count; INIT_LIST_HEAD(&ret_buf->smb_ses_list); INIT_LIST_HEAD(&ret_buf->tcon_list); - init_MUTEX(&ret_buf->sesSem); + mutex_init(&ret_buf->session_mutex); } return ret_buf; } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index f84062f9a98..c343b14ba2d 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, cFYI(1, ("For %s", name->name)); + if (parent->d_op && parent->d_op->d_hash) + parent->d_op->d_hash(parent, name); + else + name->hash = full_name_hash(name->name, name->len); + dentry = d_lookup(parent, name); if (dentry) { /* FIXME: check for inode number changes? */ @@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, min(len, max_len), nlt, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + pqst->len -= nls_nullsize(nlt); } else { pqst->name = filename; pqst->len = len; } - pqst->hash = full_name_hash(pqst->name, pqst->len); -/* cFYI(1, ("filldir on %s",pqst->name)); */ return rc; } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7085a6275c4..aaa9c1c5a5b 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* null user mount */ *bcc_ptr = 0; *(bcc_ptr+1) = 0; - } else { /* 300 should be long enough for any conceivable user name */ + } else { bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, - 300, nls_cp); + MAX_USERNAME_SIZE, nls_cp); } bcc_ptr += 2 * bytes_ret; bcc_ptr += 2; /* account for null termination */ @@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, /* copy user */ if (ses->userName == NULL) { /* BB what about null user mounts - check that we do this BB */ - } else { /* 300 should be long enough for any conceivable user name */ - strncpy(bcc_ptr, ses->userName, 300); + } else { + strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); } - /* BB improve check for overflow */ - bcc_ptr += strnlen(ses->userName, 300); + bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); *bcc_ptr = 0; bcc_ptr++; /* account for null termination */ diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index a75afa3dd9e..3e2ef0de120 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -244,7 +244,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, /* revalidate/getattr then populate from inode */ } /* BB add else when above is implemented */ ea_name += 5; /* skip past user. prefix */ - rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value, + rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value, buf_size, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) { @@ -252,7 +252,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, goto get_ea_exit; ea_name += 4; /* skip past os2. prefix */ - rc = CIFSSMBQueryEA(xid, pTcon, full_path, ea_name, ea_value, + rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value, buf_size, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS, @@ -364,8 +364,8 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) /* if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ - rc = CIFSSMBQAllEAs(xid, pTcon, full_path, data, buf_size, - cifs_sb->local_nls, + rc = CIFSSMBQAllEAs(xid, pTcon, full_path, NULL, data, + buf_size, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 332dd00f089..0ca9ec4a79c 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, u32 data; void __user *dxferp; int err; + int interface_id; + + if (get_user(interface_id, &sgio32->interface_id)) + return -EFAULT; + if (interface_id != 'S') + return sys_ioctl(fd, cmd, (unsigned long)sgio32); if (get_user(iovec_count, &sgio32->iovec_count)) return -EFAULT; @@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF) COMPATIBLE_IOCTL(TIOCLINUX) COMPATIBLE_IOCTL(TIOCSBRK) COMPATIBLE_IOCTL(TIOCCBRK) +COMPATIBLE_IOCTL(TIOCGSID) COMPATIBLE_IOCTL(TIOCGICOUNT) /* Little t */ COMPATIBLE_IOCTL(TIOCGETD) @@ -1005,6 +1012,9 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND) COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST) COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI) #endif +/* Big V (don't complain on serial console) */ +IGNORE_IOCTL(VT_OPENQRY) +IGNORE_IOCTL(VT_GETMODE) /* Little p (/dev/rtc, /dev/envctrl, etc.) */ COMPATIBLE_IOCTL(RTC_AIE_ON) COMPATIBLE_IOCTL(RTC_AIE_OFF) @@ -1035,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE) #ifdef CONFIG_BLOCK /* loop */ IGNORE_IOCTL(LOOP_CLR_FD) +/* md calls this on random blockdevs */ +IGNORE_IOCTL(RAID_VERSION) /* SG stuff */ COMPATIBLE_IOCTL(SG_SET_TIMEOUT) COMPATIBLE_IOCTL(SG_GET_TIMEOUT) diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index c8afa6b1d91..32a5f46b115 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -121,8 +121,10 @@ static int get_target(const char *symname, struct path *path, ret = -ENOENT; path_put(path); } - } else + } else { ret = -EPERM; + path_put(path); + } } return ret; diff --git a/fs/dcache.c b/fs/dcache.c index 953173a293a..f1358e5c3a5 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -257,6 +257,7 @@ kill_it: if (dentry) goto repeat; } +EXPORT_SYMBOL(dput); /** * d_invalidate - invalidate a dentry @@ -314,6 +315,7 @@ int d_invalidate(struct dentry * dentry) spin_unlock(&dcache_lock); return 0; } +EXPORT_SYMBOL(d_invalidate); /* This should be called _only_ with dcache_lock held */ @@ -328,6 +330,7 @@ struct dentry * dget_locked(struct dentry *dentry) { return __dget_locked(dentry); } +EXPORT_SYMBOL(dget_locked); /** * d_find_alias - grab a hashed alias of inode @@ -384,6 +387,7 @@ struct dentry * d_find_alias(struct inode *inode) } return de; } +EXPORT_SYMBOL(d_find_alias); /* * Try to kill dentries associated with this inode. @@ -408,6 +412,7 @@ restart: } spin_unlock(&dcache_lock); } +EXPORT_SYMBOL(d_prune_aliases); /* * Throw away a dentry - free the inode, dput the parent. This requires that @@ -610,6 +615,7 @@ void shrink_dcache_sb(struct super_block * sb) { __shrink_dcache_sb(sb, NULL, 0); } +EXPORT_SYMBOL(shrink_dcache_sb); /* * destroy a single subtree of dentries for unmount @@ -792,6 +798,7 @@ positive: spin_unlock(&dcache_lock); return 1; } +EXPORT_SYMBOL(have_submounts); /* * Search the dentry child list for the specified parent, @@ -876,6 +883,7 @@ void shrink_dcache_parent(struct dentry * parent) while ((found = select_parent(parent)) != 0) __shrink_dcache_sb(sb, &found, 0); } +EXPORT_SYMBOL(shrink_dcache_parent); /* * Scan `nr' dentries and return the number which remain. @@ -968,6 +976,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) return dentry; } +EXPORT_SYMBOL(d_alloc); struct dentry *d_alloc_name(struct dentry *parent, const char *name) { @@ -1012,6 +1021,7 @@ void d_instantiate(struct dentry *entry, struct inode * inode) spin_unlock(&dcache_lock); security_d_instantiate(entry, inode); } +EXPORT_SYMBOL(d_instantiate); /** * d_instantiate_unique - instantiate a non-aliased dentry @@ -1108,6 +1118,7 @@ struct dentry * d_alloc_root(struct inode * root_inode) } return res; } +EXPORT_SYMBOL(d_alloc_root); static inline struct hlist_head *d_hash(struct dentry *parent, unsigned long hash) @@ -1211,7 +1222,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); spin_unlock(&dcache_lock); security_d_instantiate(new, inode); - d_rehash(dentry); d_move(new, dentry); iput(inode); } else { @@ -1225,6 +1235,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) d_add(dentry, inode); return new; } +EXPORT_SYMBOL(d_splice_alias); /** * d_add_ci - lookup or allocate new dentry with case-exact name @@ -1314,6 +1325,7 @@ err_out: iput(inode); return ERR_PTR(error); } +EXPORT_SYMBOL(d_add_ci); /** * d_lookup - search for a dentry @@ -1357,6 +1369,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name) } while (read_seqretry(&rename_lock, seq)); return dentry; } +EXPORT_SYMBOL(d_lookup); struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) { @@ -1483,6 +1496,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) out: return 0; } +EXPORT_SYMBOL(d_validate); /* * When a file is deleted, we have two options: @@ -1528,6 +1542,7 @@ void d_delete(struct dentry * dentry) fsnotify_nameremove(dentry, isdir); } +EXPORT_SYMBOL(d_delete); static void __d_rehash(struct dentry * entry, struct hlist_head *list) { @@ -1556,6 +1571,7 @@ void d_rehash(struct dentry * entry) spin_unlock(&entry->d_lock); spin_unlock(&dcache_lock); } +EXPORT_SYMBOL(d_rehash); /* * When switching names, the actual string doesn't strictly have to @@ -1702,6 +1718,7 @@ void d_move(struct dentry * dentry, struct dentry * target) d_move_locked(dentry, target); spin_unlock(&dcache_lock); } +EXPORT_SYMBOL(d_move); /** * d_ancestor - search for an ancestor @@ -1868,6 +1885,7 @@ shouldnt_be_hashed: spin_unlock(&dcache_lock); BUG(); } +EXPORT_SYMBOL_GPL(d_materialise_unique); static int prepend(char **buffer, int *buflen, const char *str, int namelen) { @@ -2005,6 +2023,7 @@ char *d_path(const struct path *path, char *buf, int buflen) path_put(&root); return res; } +EXPORT_SYMBOL(d_path); /* * Helper function for dentry_operations.d_dname() members @@ -2171,6 +2190,30 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry) return result; } +int path_is_under(struct path *path1, struct path *path2) +{ + struct vfsmount *mnt = path1->mnt; + struct dentry *dentry = path1->dentry; + int res; + spin_lock(&vfsmount_lock); + if (mnt != path2->mnt) { + for (;;) { + if (mnt->mnt_parent == mnt) { + spin_unlock(&vfsmount_lock); + return 0; + } + if (mnt->mnt_parent == path2->mnt) + break; + mnt = mnt->mnt_parent; + } + dentry = mnt->mnt_mountpoint; + } + res = is_subdir(dentry, path2->dentry); + spin_unlock(&vfsmount_lock); + return res; +} +EXPORT_SYMBOL(path_is_under); + void d_genocide(struct dentry *root) { struct dentry *this_parent = root; @@ -2228,6 +2271,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name) } return ino; } +EXPORT_SYMBOL(find_inode_number); static __initdata unsigned long dhash_entries; static int __init set_dhash_entries(char *str) @@ -2297,6 +2341,7 @@ static void __init dcache_init(void) /* SLAB cache for __getname() consumers */ struct kmem_cache *names_cachep __read_mostly; +EXPORT_SYMBOL(names_cachep); EXPORT_SYMBOL(d_genocide); @@ -2326,26 +2371,3 @@ void __init vfs_caches_init(unsigned long mempages) bdev_cache_init(); chrdev_init(); } - -EXPORT_SYMBOL(d_alloc); -EXPORT_SYMBOL(d_alloc_root); -EXPORT_SYMBOL(d_delete); -EXPORT_SYMBOL(d_find_alias); -EXPORT_SYMBOL(d_instantiate); -EXPORT_SYMBOL(d_invalidate); -EXPORT_SYMBOL(d_lookup); -EXPORT_SYMBOL(d_move); -EXPORT_SYMBOL_GPL(d_materialise_unique); -EXPORT_SYMBOL(d_path); -EXPORT_SYMBOL(d_prune_aliases); -EXPORT_SYMBOL(d_rehash); -EXPORT_SYMBOL(d_splice_alias); -EXPORT_SYMBOL(d_add_ci); -EXPORT_SYMBOL(d_validate); -EXPORT_SYMBOL(dget_locked); -EXPORT_SYMBOL(dput); -EXPORT_SYMBOL(find_inode_number); -EXPORT_SYMBOL(have_submounts); -EXPORT_SYMBOL(names_cachep); -EXPORT_SYMBOL(shrink_dcache_parent); -EXPORT_SYMBOL(shrink_dcache_sb); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index b486169f42b..049d6c36da0 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -160,15 +160,8 @@ static int debugfs_create_by_name(const char *name, mode_t mode, * block. A pointer to that is in the struct vfsmount that we * have around. */ - if (!parent) { - if (debugfs_mount && debugfs_mount->mnt_sb) { - parent = debugfs_mount->mnt_sb->s_root; - } - } - if (!parent) { - pr_debug("debugfs: Ah! can not find a parent!\n"); - return -EFAULT; - } + if (!parent) + parent = debugfs_mount->mnt_sb->s_root; *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); @@ -503,7 +496,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, } d_move(old_dentry, dentry); fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, - old_dentry->d_name.name, S_ISDIR(old_dentry->d_inode->i_mode), + S_ISDIR(old_dentry->d_inode->i_mode), NULL, old_dentry); fsnotify_oldname_free(old_name); unlock_rename(new_dir, old_dir); diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index dc2ad6008b2..4314f0d48d8 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -33,10 +33,10 @@ void dlm_del_ast(struct dlm_lkb *lkb) spin_unlock(&ast_queue_lock); } -void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode) +void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode) { if (lkb->lkb_flags & DLM_IFL_USER) { - dlm_user_add_ast(lkb, type, bastmode); + dlm_user_add_ast(lkb, type, mode); return; } @@ -44,10 +44,21 @@ void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode) if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { kref_get(&lkb->lkb_ref); list_add_tail(&lkb->lkb_astqueue, &ast_queue); + lkb->lkb_ast_first = type; } + + /* sanity check, this should not happen */ + + if ((type == AST_COMP) && (lkb->lkb_ast_type & AST_COMP)) + log_print("repeat cast %d castmode %d lock %x %s", + mode, lkb->lkb_castmode, + lkb->lkb_id, lkb->lkb_resource->res_name); + lkb->lkb_ast_type |= type; - if (bastmode) - lkb->lkb_bastmode = bastmode; + if (type == AST_BAST) + lkb->lkb_bastmode = mode; + else + lkb->lkb_castmode = mode; spin_unlock(&ast_queue_lock); set_bit(WAKE_ASTS, &astd_wakeflags); @@ -59,9 +70,9 @@ static void process_asts(void) struct dlm_ls *ls = NULL; struct dlm_rsb *r = NULL; struct dlm_lkb *lkb; - void (*cast) (void *astparam); - void (*bast) (void *astparam, int mode); - int type = 0, bastmode; + void (*castfn) (void *astparam); + void (*bastfn) (void *astparam, int mode); + int type, first, bastmode, castmode, do_bast, do_cast, last_castmode; repeat: spin_lock(&ast_queue_lock); @@ -75,17 +86,48 @@ repeat: list_del(&lkb->lkb_astqueue); type = lkb->lkb_ast_type; lkb->lkb_ast_type = 0; + first = lkb->lkb_ast_first; + lkb->lkb_ast_first = 0; bastmode = lkb->lkb_bastmode; - + castmode = lkb->lkb_castmode; + castfn = lkb->lkb_astfn; + bastfn = lkb->lkb_bastfn; spin_unlock(&ast_queue_lock); - cast = lkb->lkb_astfn; - bast = lkb->lkb_bastfn; - - if ((type & AST_COMP) && cast) - cast(lkb->lkb_astparam); - if ((type & AST_BAST) && bast) - bast(lkb->lkb_astparam, bastmode); + do_cast = (type & AST_COMP) && castfn; + do_bast = (type & AST_BAST) && bastfn; + + /* Skip a bast if its blocking mode is compatible with the + granted mode of the preceding cast. */ + + if (do_bast) { + if (first == AST_COMP) + last_castmode = castmode; + else + last_castmode = lkb->lkb_castmode_done; + if (dlm_modes_compat(bastmode, last_castmode)) + do_bast = 0; + } + + if (first == AST_COMP) { + if (do_cast) + castfn(lkb->lkb_astparam); + if (do_bast) + bastfn(lkb->lkb_astparam, bastmode); + } else if (first == AST_BAST) { + if (do_bast) + bastfn(lkb->lkb_astparam, bastmode); + if (do_cast) + castfn(lkb->lkb_astparam); + } else { + log_error(ls, "bad ast_first %d ast_type %d", + first, type); + } + + if (do_cast) + lkb->lkb_castmode_done = castmode; + if (do_bast) + lkb->lkb_bastmode_done = bastmode; /* this removes the reference added by dlm_add_ast and may result in the lkb being freed */ diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h index 1b5fc5f428f..bcb1aaba519 100644 --- a/fs/dlm/ast.h +++ b/fs/dlm/ast.h @@ -1,7 +1,7 @@ /****************************************************************************** ******************************************************************************* ** -** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. +** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -13,7 +13,7 @@ #ifndef __ASTD_DOT_H__ #define __ASTD_DOT_H__ -void dlm_add_ast(struct dlm_lkb *lkb, int type, int bastmode); +void dlm_add_ast(struct dlm_lkb *lkb, int type, int mode); void dlm_del_ast(struct dlm_lkb *lkb); void dlm_astd_wake(void); diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 375a2359b3b..29d6139c35f 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -256,7 +256,7 @@ static int print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb, lkb->lkb_status, lkb->lkb_grmode, lkb->lkb_rqmode, - lkb->lkb_highbast, + lkb->lkb_bastmode, rsb_lookup, lkb->lkb_wait_type, lkb->lkb_lvbseq, diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 826d3dc6e0a..f632b58cd22 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -232,11 +232,17 @@ struct dlm_lkb { int8_t lkb_status; /* granted, waiting, convert */ int8_t lkb_rqmode; /* requested lock mode */ int8_t lkb_grmode; /* granted lock mode */ - int8_t lkb_bastmode; /* requested mode */ int8_t lkb_highbast; /* highest mode bast sent for */ + int8_t lkb_wait_type; /* type of reply waiting for */ int8_t lkb_wait_count; int8_t lkb_ast_type; /* type of ast queued for */ + int8_t lkb_ast_first; /* type of first ast queued */ + + int8_t lkb_bastmode; /* req mode of queued bast */ + int8_t lkb_castmode; /* gr mode of queued cast */ + int8_t lkb_bastmode_done; /* last delivered bastmode */ + int8_t lkb_castmode_done; /* last delivered castmode */ struct list_head lkb_idtbl_list; /* lockspace lkbtbl */ struct list_head lkb_statequeue; /* rsb g/c/w list */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 9c0c1db1e10..46ffd3eeaaf 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1,7 +1,7 @@ /****************************************************************************** ******************************************************************************* ** -** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. +** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -307,7 +307,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) lkb->lkb_lksb->sb_status = rv; lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags; - dlm_add_ast(lkb, AST_COMP, 0); + dlm_add_ast(lkb, AST_COMP, lkb->lkb_grmode); } static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) @@ -320,10 +320,12 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) { lkb->lkb_time_bast = ktime_get(); - if (is_master_copy(lkb)) + if (is_master_copy(lkb)) { + lkb->lkb_bastmode = rqmode; /* printed by debugfs */ send_bast(r, lkb, rqmode); - else + } else { dlm_add_ast(lkb, AST_BAST, rqmode); + } } /* @@ -2280,20 +2282,30 @@ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb) if (can_be_queued(lkb)) { error = -EINPROGRESS; add_lkb(r, lkb, DLM_LKSTS_WAITING); - send_blocking_asts(r, lkb); add_timeout(lkb); goto out; } error = -EAGAIN; - if (force_blocking_asts(lkb)) - send_blocking_asts_all(r, lkb); queue_cast(r, lkb, -EAGAIN); - out: return error; } +static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, + int error) +{ + switch (error) { + case -EAGAIN: + if (force_blocking_asts(lkb)) + send_blocking_asts_all(r, lkb); + break; + case -EINPROGRESS: + send_blocking_asts(r, lkb); + break; + } +} + static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error = 0; @@ -2304,7 +2316,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) if (can_be_granted(r, lkb, 1, &deadlk)) { grant_lock(r, lkb); queue_cast(r, lkb, 0); - grant_pending_locks(r); goto out; } @@ -2334,7 +2345,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) if (_can_be_granted(r, lkb, 1)) { grant_lock(r, lkb); queue_cast(r, lkb, 0); - grant_pending_locks(r); goto out; } /* else fall through and move to convert queue */ @@ -2344,28 +2354,47 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) error = -EINPROGRESS; del_lkb(r, lkb); add_lkb(r, lkb, DLM_LKSTS_CONVERT); - send_blocking_asts(r, lkb); add_timeout(lkb); goto out; } error = -EAGAIN; - if (force_blocking_asts(lkb)) - send_blocking_asts_all(r, lkb); queue_cast(r, lkb, -EAGAIN); - out: return error; } +static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, + int error) +{ + switch (error) { + case 0: + grant_pending_locks(r); + /* grant_pending_locks also sends basts */ + break; + case -EAGAIN: + if (force_blocking_asts(lkb)) + send_blocking_asts_all(r, lkb); + break; + case -EINPROGRESS: + send_blocking_asts(r, lkb); + break; + } +} + static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) { remove_lock(r, lkb); queue_cast(r, lkb, -DLM_EUNLOCK); - grant_pending_locks(r); return -DLM_EUNLOCK; } +static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, + int error) +{ + grant_pending_locks(r); +} + /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */ static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb) @@ -2375,12 +2404,18 @@ static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb) error = revert_lock(r, lkb); if (error) { queue_cast(r, lkb, -DLM_ECANCEL); - grant_pending_locks(r); return -DLM_ECANCEL; } return 0; } +static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb, + int error) +{ + if (error) + grant_pending_locks(r); +} + /* * Four stage 3 varieties: * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock() @@ -2402,11 +2437,15 @@ static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) goto out; } - if (is_remote(r)) + if (is_remote(r)) { /* receive_request() calls do_request() on remote node */ error = send_request(r, lkb); - else + } else { error = do_request(r, lkb); + /* for remote locks the request_reply is sent + between do_request and do_request_effects */ + do_request_effects(r, lkb, error); + } out: return error; } @@ -2417,11 +2456,15 @@ static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; - if (is_remote(r)) + if (is_remote(r)) { /* receive_convert() calls do_convert() on remote node */ error = send_convert(r, lkb); - else + } else { error = do_convert(r, lkb); + /* for remote locks the convert_reply is sent + between do_convert and do_convert_effects */ + do_convert_effects(r, lkb, error); + } return error; } @@ -2432,11 +2475,15 @@ static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; - if (is_remote(r)) + if (is_remote(r)) { /* receive_unlock() calls do_unlock() on remote node */ error = send_unlock(r, lkb); - else + } else { error = do_unlock(r, lkb); + /* for remote locks the unlock_reply is sent + between do_unlock and do_unlock_effects */ + do_unlock_effects(r, lkb, error); + } return error; } @@ -2447,11 +2494,15 @@ static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) { int error; - if (is_remote(r)) + if (is_remote(r)) { /* receive_cancel() calls do_cancel() on remote node */ error = send_cancel(r, lkb); - else + } else { error = do_cancel(r, lkb); + /* for remote locks the cancel_reply is sent + between do_cancel and do_cancel_effects */ + do_cancel_effects(r, lkb, error); + } return error; } @@ -3191,6 +3242,7 @@ static void receive_request(struct dlm_ls *ls, struct dlm_message *ms) attach_lkb(r, lkb); error = do_request(r, lkb); send_request_reply(r, lkb, error); + do_request_effects(r, lkb, error); unlock_rsb(r); put_rsb(r); @@ -3226,15 +3278,19 @@ static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms) goto out; receive_flags(lkb, ms); + error = receive_convert_args(ls, lkb, ms); - if (error) - goto out_reply; + if (error) { + send_convert_reply(r, lkb, error); + goto out; + } + reply = !down_conversion(lkb); error = do_convert(r, lkb); - out_reply: if (reply) send_convert_reply(r, lkb, error); + do_convert_effects(r, lkb, error); out: unlock_rsb(r); put_rsb(r); @@ -3266,13 +3322,16 @@ static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) goto out; receive_flags(lkb, ms); + error = receive_unlock_args(ls, lkb, ms); - if (error) - goto out_reply; + if (error) { + send_unlock_reply(r, lkb, error); + goto out; + } error = do_unlock(r, lkb); - out_reply: send_unlock_reply(r, lkb, error); + do_unlock_effects(r, lkb, error); out: unlock_rsb(r); put_rsb(r); @@ -3307,6 +3366,7 @@ static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) error = do_cancel(r, lkb); send_cancel_reply(r, lkb, error); + do_cancel_effects(r, lkb, error); out: unlock_rsb(r); put_rsb(r); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index c010ecfc0d2..26a8bd40400 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -191,6 +191,18 @@ static int do_uevent(struct dlm_ls *ls, int in) return error; } +static int dlm_uevent(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env) +{ + struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); + + add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); + return 0; +} + +static struct kset_uevent_ops dlm_uevent_ops = { + .uevent = dlm_uevent, +}; int __init dlm_lockspace_init(void) { @@ -199,7 +211,7 @@ int __init dlm_lockspace_init(void) INIT_LIST_HEAD(&lslist); spin_lock_init(&lslist_lock); - dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj); + dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj); if (!dlm_kset) { printk(KERN_WARNING "%s: can not create kset\n", __func__); return -ENOMEM; diff --git a/fs/dlm/user.c b/fs/dlm/user.c index e73a4bb572a..a4bfd31ac45 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -173,7 +173,7 @@ static int lkb_is_endoflife(struct dlm_lkb *lkb, int sb_status, int type) /* we could possibly check if the cancel of an orphan has resulted in the lkb being removed and then remove that lkb from the orphans list and free it */ -void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode) +void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode) { struct dlm_ls *ls; struct dlm_user_args *ua; @@ -206,8 +206,10 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode) ast_type = lkb->lkb_ast_type; lkb->lkb_ast_type |= type; - if (bastmode) - lkb->lkb_bastmode = bastmode; + if (type == AST_BAST) + lkb->lkb_bastmode = mode; + else + lkb->lkb_castmode = mode; if (!ast_type) { kref_get(&lkb->lkb_ref); diff --git a/fs/dlm/user.h b/fs/dlm/user.h index 1c968649228..f196091dd7f 100644 --- a/fs/dlm/user.h +++ b/fs/dlm/user.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. + * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions @@ -9,7 +9,7 @@ #ifndef __USER_DOT_H__ #define __USER_DOT_H__ -void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int bastmode); +void dlm_user_add_ast(struct dlm_lkb *lkb, int type, int mode); int dlm_user_init(void); void dlm_user_exit(void); int dlm_device_deregister(struct dlm_ls *ls); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index fbb6e5eed69..7cb0a59f4b9 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, char *cipher_name, size_t *key_size) { char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; - char *full_alg_name; + char *full_alg_name = NULL; int rc; *key_tfm = NULL; @@ -1763,7 +1763,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, if (rc) goto out; *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); - kfree(full_alg_name); if (IS_ERR(*key_tfm)) { rc = PTR_ERR(*key_tfm); printk(KERN_ERR "Unable to allocate crypto cipher with name " @@ -1786,6 +1785,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, goto out; } out: + kfree(full_alg_name); return rc; } diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 9e944057001..678172b61be 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -158,7 +158,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) struct dentry *ecryptfs_dentry = file->f_path.dentry; /* Private value of ecryptfs_dentry allocated in * ecryptfs_lookup() */ - struct dentry *lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); + struct dentry *lower_dentry; struct ecryptfs_file_info *file_info; mount_crypt_stat = &ecryptfs_superblock_to_private( @@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file) | ECRYPTFS_ENCRYPTED); } mutex_unlock(&crypt_stat->cs_mutex); - if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) - && !(file->f_flags & O_RDONLY)) { - rc = -EPERM; - printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " - "file must hence be opened RO\n", __func__); - goto out; - } if (!ecryptfs_inode_to_private(inode)->lower_file) { rc = ecryptfs_init_persistent_file(ecryptfs_dentry); if (rc) { @@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file) goto out; } } + if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) + && !(file->f_flags & O_RDONLY)) { + rc = -EPERM; + printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " + "file must hence be opened RO\n", __func__); + goto out; + } ecryptfs_set_file_lower( file, ecryptfs_inode_to_private(inode)->lower_file); if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { @@ -299,7 +299,6 @@ static int ecryptfs_ioctl(struct inode *inode, struct file *file, const struct file_operations ecryptfs_dir_fops = { .readdir = ecryptfs_readdir, .ioctl = ecryptfs_ioctl, - .mmap = generic_file_mmap, .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 429ca0b3ba0..4a430ab4115 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -282,7 +282,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, goto out; } rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, - ecryptfs_dir_inode->i_sb, 1); + ecryptfs_dir_inode->i_sb, + ECRYPTFS_INTERPOSE_FLAG_D_ADD); if (rc) { printk(KERN_ERR "%s: Error interposing; rc = [%d]\n", __func__, rc); @@ -463,9 +464,6 @@ out_lock: unlock_dir(lower_dir_dentry); dput(lower_new_dentry); dput(lower_old_dentry); - d_drop(lower_old_dentry); - d_drop(new_dentry); - d_drop(old_dentry); return rc; } @@ -614,6 +612,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *lower_new_dentry; struct dentry *lower_old_dir_dentry; struct dentry *lower_new_dir_dentry; + struct dentry *trap = NULL; lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); @@ -621,7 +620,17 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, dget(lower_new_dentry); lower_old_dir_dentry = dget_parent(lower_old_dentry); lower_new_dir_dentry = dget_parent(lower_new_dentry); - lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); + trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); + /* source should not be ancestor of target */ + if (trap == lower_old_dentry) { + rc = -EINVAL; + goto out_lock; + } + /* target should not be ancestor of source */ + if (trap == lower_new_dentry) { + rc = -ENOTEMPTY; + goto out_lock; + } rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry, lower_new_dir_dentry->d_inode, lower_new_dentry); if (rc) @@ -715,31 +724,31 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) /* Released in ecryptfs_put_link(); only release here on error */ buf = kmalloc(len, GFP_KERNEL); if (!buf) { - rc = -ENOMEM; + buf = ERR_PTR(-ENOMEM); goto out; } old_fs = get_fs(); set_fs(get_ds()); rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); set_fs(old_fs); - if (rc < 0) - goto out_free; - else + if (rc < 0) { + kfree(buf); + buf = ERR_PTR(rc); + } else buf[rc] = '\0'; - rc = 0; - nd_set_link(nd, buf); - goto out; -out_free: - kfree(buf); out: - return ERR_PTR(rc); + nd_set_link(nd, buf); + return NULL; } static void ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr) { - /* Free the char* */ - kfree(nd_get_link(nd)); + char *buf = nd_get_link(nd); + if (!IS_ERR(buf)) { + /* Free the char* */ + kfree(buf); + } } /** @@ -772,18 +781,23 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat, } /** - * ecryptfs_truncate + * truncate_upper * @dentry: The ecryptfs layer dentry - * @new_length: The length to expand the file to + * @ia: Address of the ecryptfs inode's attributes + * @lower_ia: Address of the lower inode's attributes * * Function to handle truncations modifying the size of the file. Note * that the file sizes are interpolated. When expanding, we are simply - * writing strings of 0's out. When truncating, we need to modify the - * underlying file size according to the page index interpolations. + * writing strings of 0's out. When truncating, we truncate the upper + * inode and update the lower_ia according to the page index + * interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return, + * the caller must use lower_ia in a call to notify_change() to perform + * the truncation of the lower inode. * * Returns zero on success; non-zero otherwise */ -int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) +static int truncate_upper(struct dentry *dentry, struct iattr *ia, + struct iattr *lower_ia) { int rc = 0; struct inode *inode = dentry->d_inode; @@ -794,8 +808,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) loff_t lower_size_before_truncate; loff_t lower_size_after_truncate; - if (unlikely((new_length == i_size))) + if (unlikely((ia->ia_size == i_size))) { + lower_ia->ia_valid &= ~ATTR_SIZE; goto out; + } crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; /* Set up a fake ecryptfs file, this is used to interface with * the file in the underlying filesystem so that the @@ -815,28 +831,30 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) &fake_ecryptfs_file, ecryptfs_inode_to_private(dentry->d_inode)->lower_file); /* Switch on growing or shrinking file */ - if (new_length > i_size) { + if (ia->ia_size > i_size) { char zero[] = { 0x00 }; + lower_ia->ia_valid &= ~ATTR_SIZE; /* Write a single 0 at the last position of the file; * this triggers code that will fill in 0's throughout * the intermediate portion of the previous end of the * file and the new and of the file */ rc = ecryptfs_write(&fake_ecryptfs_file, zero, - (new_length - 1), 1); - } else { /* new_length < i_size_read(inode) */ - /* We're chopping off all the pages down do the page - * in which new_length is located. Fill in the end of - * that page from (new_length & ~PAGE_CACHE_MASK) to + (ia->ia_size - 1), 1); + } else { /* ia->ia_size < i_size_read(inode) */ + /* We're chopping off all the pages down to the page + * in which ia->ia_size is located. Fill in the end of + * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to * PAGE_CACHE_SIZE with zeros. */ size_t num_zeros = (PAGE_CACHE_SIZE - - (new_length & ~PAGE_CACHE_MASK)); + - (ia->ia_size & ~PAGE_CACHE_MASK)); if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { - rc = vmtruncate(inode, new_length); + rc = vmtruncate(inode, ia->ia_size); if (rc) goto out_free; - rc = vmtruncate(lower_dentry->d_inode, new_length); + lower_ia->ia_size = ia->ia_size; + lower_ia->ia_valid |= ATTR_SIZE; goto out_free; } if (num_zeros) { @@ -848,7 +866,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) goto out_free; } rc = ecryptfs_write(&fake_ecryptfs_file, zeros_virt, - new_length, num_zeros); + ia->ia_size, num_zeros); kfree(zeros_virt); if (rc) { printk(KERN_ERR "Error attempting to zero out " @@ -857,7 +875,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) goto out_free; } } - vmtruncate(inode, new_length); + vmtruncate(inode, ia->ia_size); rc = ecryptfs_write_inode_size_to_metadata(inode); if (rc) { printk(KERN_ERR "Problem with " @@ -870,10 +888,12 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) lower_size_before_truncate = upper_size_to_lower_size(crypt_stat, i_size); lower_size_after_truncate = - upper_size_to_lower_size(crypt_stat, new_length); - if (lower_size_after_truncate < lower_size_before_truncate) - vmtruncate(lower_dentry->d_inode, - lower_size_after_truncate); + upper_size_to_lower_size(crypt_stat, ia->ia_size); + if (lower_size_after_truncate < lower_size_before_truncate) { + lower_ia->ia_size = lower_size_after_truncate; + lower_ia->ia_valid |= ATTR_SIZE; + } else + lower_ia->ia_valid &= ~ATTR_SIZE; } out_free: if (ecryptfs_file_to_private(&fake_ecryptfs_file)) @@ -883,6 +903,33 @@ out: return rc; } +/** + * ecryptfs_truncate + * @dentry: The ecryptfs layer dentry + * @new_length: The length to expand the file to + * + * Simple function that handles the truncation of an eCryptfs inode and + * its corresponding lower inode. + * + * Returns zero on success; non-zero otherwise + */ +int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) +{ + struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length }; + struct iattr lower_ia = { .ia_valid = 0 }; + int rc; + + rc = truncate_upper(dentry, &ia, &lower_ia); + if (!rc && lower_ia.ia_valid & ATTR_SIZE) { + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); + + mutex_lock(&lower_dentry->d_inode->i_mutex); + rc = notify_change(lower_dentry, &lower_ia); + mutex_unlock(&lower_dentry->d_inode->i_mutex); + } + return rc; +} + static int ecryptfs_permission(struct inode *inode, int mask) { @@ -905,6 +952,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) { int rc = 0; struct dentry *lower_dentry; + struct iattr lower_ia; struct inode *inode; struct inode *lower_inode; struct ecryptfs_crypt_stat *crypt_stat; @@ -943,15 +991,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) } } mutex_unlock(&crypt_stat->cs_mutex); + memcpy(&lower_ia, ia, sizeof(lower_ia)); + if (ia->ia_valid & ATTR_FILE) + lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); if (ia->ia_valid & ATTR_SIZE) { - ecryptfs_printk(KERN_DEBUG, - "ia->ia_valid = [0x%x] ATTR_SIZE" " = [0x%x]\n", - ia->ia_valid, ATTR_SIZE); - rc = ecryptfs_truncate(dentry, ia->ia_size); - /* ecryptfs_truncate handles resizing of the lower file */ - ia->ia_valid &= ~ATTR_SIZE; - ecryptfs_printk(KERN_DEBUG, "ia->ia_valid = [%x]\n", - ia->ia_valid); + rc = truncate_upper(dentry, ia, &lower_ia); if (rc < 0) goto out; } @@ -960,17 +1004,32 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) * mode change is for clearing setuid/setgid bits. Allow lower fs * to interpret this in its own way. */ - if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) - ia->ia_valid &= ~ATTR_MODE; + if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) + lower_ia.ia_valid &= ~ATTR_MODE; mutex_lock(&lower_dentry->d_inode->i_mutex); - rc = notify_change(lower_dentry, ia); + rc = notify_change(lower_dentry, &lower_ia); mutex_unlock(&lower_dentry->d_inode->i_mutex); out: fsstack_copy_attr_all(inode, lower_inode); return rc; } +int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct kstat lower_stat; + int rc; + + rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), + ecryptfs_dentry_to_lower(dentry), &lower_stat); + if (!rc) { + generic_fillattr(dentry->d_inode, stat); + stat->blocks = lower_stat.blocks; + } + return rc; +} + int ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) @@ -1100,6 +1159,7 @@ const struct inode_operations ecryptfs_dir_iops = { const struct inode_operations ecryptfs_main_iops = { .permission = ecryptfs_permission, .setattr = ecryptfs_setattr, + .getattr = ecryptfs_getattr, .setxattr = ecryptfs_setxattr, .getxattr = ecryptfs_getxattr, .listxattr = ecryptfs_listxattr, diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 567bc4b9f70..ea2f92101df 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -585,8 +585,8 @@ out: * with as much information as it can before needing * the lower filesystem. * ecryptfs_read_super(): this accesses the lower filesystem and uses - * ecryptfs_interpolate to perform most of the linking - * ecryptfs_interpolate(): links the lower filesystem into ecryptfs + * ecryptfs_interpose to perform most of the linking + * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c) */ static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data, diff --git a/fs/eventfd.c b/fs/eventfd.c index d26402ff06e..7758cc382ef 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -135,26 +135,71 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait) return events; } -static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) +static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) +{ + *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; + ctx->count -= *cnt; +} + +/** + * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. + * @ctx: [in] Pointer to eventfd context. + * @wait: [in] Wait queue to be removed. + * @cnt: [out] Pointer to the 64bit conter value. + * + * Returns zero if successful, or the following error codes: + * + * -EAGAIN : The operation would have blocked. + * + * This is used to atomically remove a wait queue entry from the eventfd wait + * queue head, and read/reset the counter value. + */ +int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, + __u64 *cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&ctx->wqh.lock, flags); + eventfd_ctx_do_read(ctx, cnt); + __remove_wait_queue(&ctx->wqh, wait); + if (*cnt != 0 && waitqueue_active(&ctx->wqh)) + wake_up_locked_poll(&ctx->wqh, POLLOUT); + spin_unlock_irqrestore(&ctx->wqh.lock, flags); + + return *cnt != 0 ? 0 : -EAGAIN; +} +EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); + +/** + * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. + * @ctx: [in] Pointer to eventfd context. + * @no_wait: [in] Different from zero if the operation should not block. + * @cnt: [out] Pointer to the 64bit conter value. + * + * Returns zero if successful, or the following error codes: + * + * -EAGAIN : The operation would have blocked but @no_wait was nonzero. + * -ERESTARTSYS : A signal interrupted the wait operation. + * + * If @no_wait is zero, the function might sleep until the eventfd internal + * counter becomes greater than zero. + */ +ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt) { - struct eventfd_ctx *ctx = file->private_data; ssize_t res; - __u64 ucnt = 0; DECLARE_WAITQUEUE(wait, current); - if (count < sizeof(ucnt)) - return -EINVAL; spin_lock_irq(&ctx->wqh.lock); + *cnt = 0; res = -EAGAIN; if (ctx->count > 0) - res = sizeof(ucnt); - else if (!(file->f_flags & O_NONBLOCK)) { + res = 0; + else if (!no_wait) { __add_wait_queue(&ctx->wqh, &wait); - for (res = 0;;) { + for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (ctx->count > 0) { - res = sizeof(ucnt); + res = 0; break; } if (signal_pending(current)) { @@ -168,18 +213,32 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, __remove_wait_queue(&ctx->wqh, &wait); __set_current_state(TASK_RUNNING); } - if (likely(res > 0)) { - ucnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; - ctx->count -= ucnt; + if (likely(res == 0)) { + eventfd_ctx_do_read(ctx, cnt); if (waitqueue_active(&ctx->wqh)) wake_up_locked_poll(&ctx->wqh, POLLOUT); } spin_unlock_irq(&ctx->wqh.lock); - if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) - return -EFAULT; return res; } +EXPORT_SYMBOL_GPL(eventfd_ctx_read); + +static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct eventfd_ctx *ctx = file->private_data; + ssize_t res; + __u64 cnt; + + if (count < sizeof(cnt)) + return -EINVAL; + res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt); + if (res < 0) + return res; + + return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt); +} static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) diff --git a/fs/exec.c b/fs/exec.c index 632b02e34ec..cce6bbdbdbb 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm, struct vm_area_struct *prev = NULL; unsigned long vm_flags; unsigned long stack_base; + unsigned long stack_size; + unsigned long stack_expand; + unsigned long rlim_stack; #ifdef CONFIG_STACK_GROWSUP /* Limit stack size to 1GB */ @@ -627,10 +630,23 @@ int setup_arg_pages(struct linux_binprm *bprm, goto out_unlock; } + stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; + stack_size = vma->vm_end - vma->vm_start; + /* + * Align this down to a page boundary as expand_stack + * will align it up. + */ + rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; #ifdef CONFIG_STACK_GROWSUP - stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (stack_size + stack_expand > rlim_stack) + stack_base = vma->vm_start + rlim_stack; + else + stack_base = vma->vm_end + stack_expand; #else - stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; + if (stack_size + stack_expand > rlim_stack) + stack_base = vma->vm_end - rlim_stack; + else + stack_base = vma->vm_start - stack_expand; #endif ret = expand_stack(vma, stack_base); if (ret) @@ -941,9 +957,7 @@ void set_task_comm(struct task_struct *tsk, char *buf) int flush_old_exec(struct linux_binprm * bprm) { - char * name; - int i, ch, retval; - char tcomm[sizeof(current->comm)]; + int retval; /* * Make sure we have a private signal table and that @@ -964,6 +978,25 @@ int flush_old_exec(struct linux_binprm * bprm) bprm->mm = NULL; /* We're using it now */ + current->flags &= ~PF_RANDOMIZE; + flush_thread(); + current->personality &= ~bprm->per_clear; + + return 0; + +out: + return retval; +} +EXPORT_SYMBOL(flush_old_exec); + +void setup_new_exec(struct linux_binprm * bprm) +{ + int i, ch; + char * name; + char tcomm[sizeof(current->comm)]; + + arch_pick_mmap_layout(current->mm); + /* This is the point of no return */ current->sas_ss_sp = current->sas_ss_size = 0; @@ -985,9 +1018,6 @@ int flush_old_exec(struct linux_binprm * bprm) tcomm[i] = '\0'; set_task_comm(current, tcomm); - current->flags &= ~PF_RANDOMIZE; - flush_thread(); - /* Set the new mm task size. We have to do that late because it may * depend on TIF_32BIT which is only updated in flush_thread() on * some architectures like powerpc @@ -1003,8 +1033,6 @@ int flush_old_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } - current->personality &= ~bprm->per_clear; - /* * Flush performance counters when crossing a * security domain: @@ -1019,14 +1047,8 @@ int flush_old_exec(struct linux_binprm * bprm) flush_signal_handlers(current, 0); flush_old_files(current->files); - - return 0; - -out: - return retval; } - -EXPORT_SYMBOL(flush_old_exec); +EXPORT_SYMBOL(setup_new_exec); /* * Prepare credentials and lock ->cred_guard_mutex. diff --git a/fs/exofs/common.h b/fs/exofs/common.h index b1b178e6171..f0d520312d8 100644 --- a/fs/exofs/common.h +++ b/fs/exofs/common.h @@ -55,6 +55,8 @@ /* exofs Application specific page/attribute */ # define EXOFS_APAGE_FS_DATA (OSD_APAGE_APP_DEFINED_FIRST + 3) # define EXOFS_ATTR_INODE_DATA 1 +# define EXOFS_ATTR_INODE_FILE_LAYOUT 2 +# define EXOFS_ATTR_INODE_DIR_LAYOUT 3 /* * The maximum number of files we can have is limited by the size of the @@ -206,4 +208,41 @@ enum { (((name_len) + offsetof(struct exofs_dir_entry, name) + \ EXOFS_DIR_ROUND) & ~EXOFS_DIR_ROUND) +/* + * The on-disk (optional) layout structure. + * sits in an EXOFS_ATTR_INODE_FILE_LAYOUT or EXOFS_ATTR_INODE_DIR_LAYOUT + * attribute, attached to any inode, usually to a directory. + */ + +enum exofs_inode_layout_gen_functions { + LAYOUT_MOVING_WINDOW = 0, + LAYOUT_IMPLICT = 1, +}; + +struct exofs_on_disk_inode_layout { + __le16 gen_func; /* One of enum exofs_inode_layout_gen_functions */ + __le16 pad; + union { + /* gen_func == LAYOUT_MOVING_WINDOW (default) */ + struct exofs_layout_sliding_window { + __le32 num_devices; /* first n devices in global-table*/ + } sliding_window __packed; + + /* gen_func == LAYOUT_IMPLICT */ + struct exofs_layout_implict_list { + struct exofs_dt_data_map data_map; + /* Variable array of size data_map.cb_num_comps. These + * are device indexes of the devices in the global table + */ + __le32 dev_indexes[]; + } implict __packed; + }; +} __packed; + +static inline size_t exofs_on_disk_inode_layout_size(unsigned max_devs) +{ + return sizeof(struct exofs_on_disk_inode_layout) + + max_devs * sizeof(__le32); +} + #endif /*ifndef __EXOFS_COM_H__*/ diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h index c35fd462398..59b8bf2825c 100644 --- a/fs/exofs/exofs.h +++ b/fs/exofs/exofs.h @@ -55,12 +55,28 @@ /* u64 has problems with printk this will cast it to unsigned long long */ #define _LLU(x) (unsigned long long)(x) +struct exofs_layout { + osd_id s_pid; /* partition ID of file system*/ + + /* Our way of looking at the data_map */ + unsigned stripe_unit; + unsigned mirrors_p1; + + unsigned group_width; + u64 group_depth; + unsigned group_count; + + enum exofs_inode_layout_gen_functions lay_func; + + unsigned s_numdevs; /* Num of devices in array */ + struct osd_dev *s_ods[0]; /* Variable length */ +}; + /* * our extension to the in-memory superblock */ struct exofs_sb_info { struct exofs_fscb s_fscb; /* Written often, pre-allocate*/ - osd_id s_pid; /* partition ID of file system*/ int s_timeout; /* timeout for OSD operations */ uint64_t s_nextid; /* highest object ID used */ uint32_t s_numfiles; /* number of files on fs */ @@ -69,22 +85,27 @@ struct exofs_sb_info { atomic_t s_curr_pending; /* number of pending commands */ uint8_t s_cred[OSD_CAP_LEN]; /* credential for the fscb */ - struct pnfs_osd_data_map data_map; /* Default raid to use */ - unsigned s_numdevs; /* Num of devices in array */ - struct osd_dev *s_ods[1]; /* Variable length, minimum 1 */ + struct pnfs_osd_data_map data_map; /* Default raid to use + * FIXME: Needed ? + */ +/* struct exofs_layout dir_layout;*/ /* Default dir layout */ + struct exofs_layout layout; /* Default files layout, + * contains the variable osd_dev + * array. Keep last */ + struct osd_dev *_min_one_dev[1]; /* Place holder for one dev */ }; /* * our extension to the in-memory inode */ struct exofs_i_info { + struct inode vfs_inode; /* normal in-memory inode */ + wait_queue_head_t i_wq; /* wait queue for inode */ unsigned long i_flags; /* various atomic flags */ uint32_t i_data[EXOFS_IDATA];/*short symlink names and device #s*/ uint32_t i_dir_start_lookup; /* which page to start lookup */ - wait_queue_head_t i_wq; /* wait queue for inode */ uint64_t i_commit_size; /* the object's written length */ uint8_t i_cred[OSD_CAP_LEN];/* all-powerful credential */ - struct inode vfs_inode; /* normal in-memory inode */ }; static inline osd_id exofs_oi_objno(struct exofs_i_info *oi) @@ -101,7 +122,7 @@ struct exofs_io_state { void *private; exofs_io_done_fn done; - struct exofs_sb_info *sbi; + struct exofs_layout *layout; struct osd_obj_id obj; u8 *cred; @@ -109,7 +130,11 @@ struct exofs_io_state { loff_t offset; unsigned long length; void *kern_buff; - struct bio *bio; + + struct page **pages; + unsigned nr_pages; + unsigned pgbase; + unsigned pages_consumed; /* Attributes */ unsigned in_attr_len; @@ -122,6 +147,9 @@ struct exofs_io_state { struct exofs_per_dev_state { struct osd_request *or; struct bio *bio; + loff_t offset; + unsigned length; + unsigned dev; } per_dev[]; }; @@ -175,6 +203,12 @@ static inline struct exofs_i_info *exofs_i(struct inode *inode) } /* + * Given a layout, object_number and stripe_index return the associated global + * dev_index + */ +unsigned exofs_layout_od_id(struct exofs_layout *layout, + osd_id obj_no, unsigned layout_index); +/* * Maximum count of links to a file */ #define EXOFS_LINK_MAX 32000 @@ -189,7 +223,8 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj, u64 offset, void *p, unsigned length); -int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** ios); +int exofs_get_io_state(struct exofs_layout *layout, + struct exofs_io_state **ios); void exofs_put_io_state(struct exofs_io_state *ios); int exofs_check_io(struct exofs_io_state *ios, u64 *resid); diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 2afbcebeda7..5514f3c2c2f 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -41,16 +41,18 @@ enum { BIO_MAX_PAGES_KMALLOC = (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec), + MAX_PAGES_KMALLOC = + PAGE_SIZE / sizeof(struct page *), }; struct page_collect { struct exofs_sb_info *sbi; - struct request_queue *req_q; struct inode *inode; unsigned expected_pages; struct exofs_io_state *ios; - struct bio *bio; + struct page **pages; + unsigned alloc_pages; unsigned nr_pages; unsigned long length; loff_t pg_first; /* keep 64bit also in 32-arches */ @@ -62,15 +64,12 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages, struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; pcol->sbi = sbi; - /* Create master bios on first Q, later on cloning, each clone will be - * allocated on it's destination Q - */ - pcol->req_q = osd_request_queue(sbi->s_ods[0]); pcol->inode = inode; pcol->expected_pages = expected_pages; pcol->ios = NULL; - pcol->bio = NULL; + pcol->pages = NULL; + pcol->alloc_pages = 0; pcol->nr_pages = 0; pcol->length = 0; pcol->pg_first = -1; @@ -80,7 +79,8 @@ static void _pcol_reset(struct page_collect *pcol) { pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages); - pcol->bio = NULL; + pcol->pages = NULL; + pcol->alloc_pages = 0; pcol->nr_pages = 0; pcol->length = 0; pcol->pg_first = -1; @@ -90,38 +90,43 @@ static void _pcol_reset(struct page_collect *pcol) * it might not end here. don't be left with nothing */ if (!pcol->expected_pages) - pcol->expected_pages = BIO_MAX_PAGES_KMALLOC; + pcol->expected_pages = MAX_PAGES_KMALLOC; } static int pcol_try_alloc(struct page_collect *pcol) { - int pages = min_t(unsigned, pcol->expected_pages, - BIO_MAX_PAGES_KMALLOC); + unsigned pages = min_t(unsigned, pcol->expected_pages, + MAX_PAGES_KMALLOC); if (!pcol->ios) { /* First time allocate io_state */ - int ret = exofs_get_io_state(pcol->sbi, &pcol->ios); + int ret = exofs_get_io_state(&pcol->sbi->layout, &pcol->ios); if (ret) return ret; } + /* TODO: easily support bio chaining */ + pages = min_t(unsigned, pages, + pcol->sbi->layout.group_width * BIO_MAX_PAGES_KMALLOC); + for (; pages; pages >>= 1) { - pcol->bio = bio_kmalloc(GFP_KERNEL, pages); - if (likely(pcol->bio)) + pcol->pages = kmalloc(pages * sizeof(struct page *), + GFP_KERNEL); + if (likely(pcol->pages)) { + pcol->alloc_pages = pages; return 0; + } } - EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n", + EXOFS_ERR("Failed to kmalloc expected_pages=%u\n", pcol->expected_pages); return -ENOMEM; } static void pcol_free(struct page_collect *pcol) { - if (pcol->bio) { - bio_put(pcol->bio); - pcol->bio = NULL; - } + kfree(pcol->pages); + pcol->pages = NULL; if (pcol->ios) { exofs_put_io_state(pcol->ios); @@ -132,11 +137,10 @@ static void pcol_free(struct page_collect *pcol) static int pcol_add_page(struct page_collect *pcol, struct page *page, unsigned len) { - int added_len = bio_add_pc_page(pcol->req_q, pcol->bio, page, len, 0); - if (unlikely(len != added_len)) + if (unlikely(pcol->nr_pages >= pcol->alloc_pages)) return -ENOMEM; - ++pcol->nr_pages; + pcol->pages[pcol->nr_pages++] = page; pcol->length += len; return 0; } @@ -181,7 +185,6 @@ static void update_write_page(struct page *page, int ret) */ static int __readpages_done(struct page_collect *pcol, bool do_unlock) { - struct bio_vec *bvec; int i; u64 resid; u64 good_bytes; @@ -193,13 +196,13 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock) else good_bytes = pcol->length - resid; - EXOFS_DBGMSG("readpages_done(0x%lx) good_bytes=0x%llx" + EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx" " length=0x%lx nr_pages=%u\n", pcol->inode->i_ino, _LLU(good_bytes), pcol->length, pcol->nr_pages); - __bio_for_each_segment(bvec, pcol->bio, i, 0) { - struct page *page = bvec->bv_page; + for (i = 0; i < pcol->nr_pages; i++) { + struct page *page = pcol->pages[i]; struct inode *inode = page->mapping->host; int page_stat; @@ -218,11 +221,11 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock) ret = update_read_page(page, page_stat); if (do_unlock) unlock_page(page); - length += bvec->bv_len; + length += PAGE_SIZE; } pcol_free(pcol); - EXOFS_DBGMSG("readpages_done END\n"); + EXOFS_DBGMSG2("readpages_done END\n"); return ret; } @@ -238,11 +241,10 @@ static void readpages_done(struct exofs_io_state *ios, void *p) static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw) { - struct bio_vec *bvec; int i; - __bio_for_each_segment(bvec, pcol->bio, i, 0) { - struct page *page = bvec->bv_page; + for (i = 0; i < pcol->nr_pages; i++) { + struct page *page = pcol->pages[i]; if (rw == READ) update_read_page(page, ret); @@ -260,13 +262,14 @@ static int read_exec(struct page_collect *pcol, bool is_sync) struct page_collect *pcol_copy = NULL; int ret; - if (!pcol->bio) + if (!pcol->pages) return 0; /* see comment in _readpage() about sync reads */ WARN_ON(is_sync && (pcol->nr_pages != 1)); - ios->bio = pcol->bio; + ios->pages = pcol->pages; + ios->nr_pages = pcol->nr_pages; ios->length = pcol->length; ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT; @@ -290,7 +293,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync) atomic_inc(&pcol->sbi->s_curr_pending); - EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n", + EXOFS_DBGMSG2("read_exec obj=0x%llx start=0x%llx length=0x%lx\n", ios->obj.id, _LLU(ios->offset), pcol->length); /* pages ownership was passed to pcol_copy */ @@ -366,7 +369,7 @@ try_again: goto try_again; } - if (!pcol->bio) { + if (!pcol->pages) { ret = pcol_try_alloc(pcol); if (unlikely(ret)) goto fail; @@ -448,7 +451,6 @@ static int exofs_readpage(struct file *file, struct page *page) static void writepages_done(struct exofs_io_state *ios, void *p) { struct page_collect *pcol = p; - struct bio_vec *bvec; int i; u64 resid; u64 good_bytes; @@ -462,13 +464,13 @@ static void writepages_done(struct exofs_io_state *ios, void *p) else good_bytes = pcol->length - resid; - EXOFS_DBGMSG("writepages_done(0x%lx) good_bytes=0x%llx" + EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx" " length=0x%lx nr_pages=%u\n", pcol->inode->i_ino, _LLU(good_bytes), pcol->length, pcol->nr_pages); - __bio_for_each_segment(bvec, pcol->bio, i, 0) { - struct page *page = bvec->bv_page; + for (i = 0; i < pcol->nr_pages; i++) { + struct page *page = pcol->pages[i]; struct inode *inode = page->mapping->host; int page_stat; @@ -485,12 +487,12 @@ static void writepages_done(struct exofs_io_state *ios, void *p) EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n", inode->i_ino, page->index, page_stat); - length += bvec->bv_len; + length += PAGE_SIZE; } pcol_free(pcol); kfree(pcol); - EXOFS_DBGMSG("writepages_done END\n"); + EXOFS_DBGMSG2("writepages_done END\n"); } static int write_exec(struct page_collect *pcol) @@ -500,7 +502,7 @@ static int write_exec(struct page_collect *pcol) struct page_collect *pcol_copy = NULL; int ret; - if (!pcol->bio) + if (!pcol->pages) return 0; pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL); @@ -512,9 +514,8 @@ static int write_exec(struct page_collect *pcol) *pcol_copy = *pcol; - pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ - - ios->bio = pcol_copy->bio; + ios->pages = pcol_copy->pages; + ios->nr_pages = pcol_copy->nr_pages; ios->offset = pcol_copy->pg_first << PAGE_CACHE_SHIFT; ios->length = pcol_copy->length; ios->done = writepages_done; @@ -527,7 +528,7 @@ static int write_exec(struct page_collect *pcol) } atomic_inc(&pcol->sbi->s_curr_pending); - EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n", + EXOFS_DBGMSG2("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n", pcol->inode->i_ino, pcol->pg_first, _LLU(ios->offset), pcol->length); /* pages ownership was passed to pcol_copy */ @@ -605,7 +606,7 @@ try_again: goto try_again; } - if (!pcol->bio) { + if (!pcol->pages) { ret = pcol_try_alloc(pcol); if (unlikely(ret)) goto fail; @@ -616,7 +617,7 @@ try_again: ret = pcol_add_page(pcol, page, len); if (unlikely(ret)) { - EXOFS_DBGMSG("Failed pcol_add_page " + EXOFS_DBGMSG2("Failed pcol_add_page " "nr_pages=%u total_length=0x%lx\n", pcol->nr_pages, pcol->length); @@ -663,7 +664,7 @@ static int exofs_writepages(struct address_space *mapping, if (expected_pages < 32L) expected_pages = 32L; - EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx " + EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx " "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n", mapping->host->i_ino, wbc->range_start, wbc->range_end, mapping->nrpages, start, end, expected_pages); @@ -859,20 +860,33 @@ int exofs_setattr(struct dentry *dentry, struct iattr *iattr) return error; } +static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF( + EXOFS_APAGE_FS_DATA, + EXOFS_ATTR_INODE_FILE_LAYOUT, + 0); +static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF( + EXOFS_APAGE_FS_DATA, + EXOFS_ATTR_INODE_DIR_LAYOUT, + 0); + /* - * Read an inode from the OSD, and return it as is. We also return the size - * attribute in the 'obj_size' argument. + * Read the Linux inode info from the OSD, and return it as is. In exofs the + * inode info is in an application specific page/attribute of the osd-object. */ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi, - struct exofs_fcb *inode, uint64_t *obj_size) + struct exofs_fcb *inode) { struct exofs_sb_info *sbi = sb->s_fs_info; - struct osd_attr attrs[2]; + struct osd_attr attrs[] = { + [0] = g_attr_inode_data, + [1] = g_attr_inode_file_layout, + [2] = g_attr_inode_dir_layout, + }; struct exofs_io_state *ios; + struct exofs_on_disk_inode_layout *layout; int ret; - *obj_size = ~0; - ret = exofs_get_io_state(sbi, &ios); + ret = exofs_get_io_state(&sbi->layout, &ios); if (unlikely(ret)) { EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__); return ret; @@ -882,14 +896,25 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi, exofs_make_credential(oi->i_cred, &ios->obj); ios->cred = oi->i_cred; - attrs[0] = g_attr_inode_data; - attrs[1] = g_attr_logical_length; + attrs[1].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs); + attrs[2].len = exofs_on_disk_inode_layout_size(sbi->layout.s_numdevs); + ios->in_attr = attrs; ios->in_attr_len = ARRAY_SIZE(attrs); ret = exofs_sbi_read(ios); - if (ret) + if (unlikely(ret)) { + EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n", + _LLU(ios->obj.id), ret); + memset(inode, 0, sizeof(*inode)); + inode->i_mode = 0040000 | (0777 & ~022); + /* If object is lost on target we might as well enable it's + * delete. + */ + if ((ret == -ENOENT) || (ret == -EINVAL)) + ret = 0; goto out; + } ret = extract_attr_from_ios(ios, &attrs[0]); if (ret) { @@ -901,11 +926,33 @@ static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi, ret = extract_attr_from_ios(ios, &attrs[1]); if (ret) { - EXOFS_ERR("%s: extract_attr of logical_length failed\n", - __func__); + EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__); + goto out; + } + if (attrs[1].len) { + layout = attrs[1].val_ptr; + if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) { + EXOFS_ERR("%s: unsupported files layout %d\n", + __func__, layout->gen_func); + ret = -ENOTSUPP; + goto out; + } + } + + ret = extract_attr_from_ios(ios, &attrs[2]); + if (ret) { + EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__); goto out; } - *obj_size = get_unaligned_be64(attrs[1].val_ptr); + if (attrs[2].len) { + layout = attrs[2].val_ptr; + if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) { + EXOFS_ERR("%s: unsupported meta-data layout %d\n", + __func__, layout->gen_func); + ret = -ENOTSUPP; + goto out; + } + } out: exofs_put_io_state(ios); @@ -925,7 +972,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) struct exofs_i_info *oi; struct exofs_fcb fcb; struct inode *inode; - uint64_t obj_size; int ret; inode = iget_locked(sb, ino); @@ -937,7 +983,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) __oi_init(oi); /* read the inode from the osd */ - ret = exofs_get_inode(sb, oi, &fcb, &obj_size); + ret = exofs_get_inode(sb, oi, &fcb); if (ret) goto bad_inode; @@ -958,13 +1004,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) inode->i_blkbits = EXOFS_BLKSHIFT; inode->i_generation = le32_to_cpu(fcb.i_generation); - if ((inode->i_size != obj_size) && - (!exofs_inode_is_fast_symlink(inode))) { - EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n", - inode->i_size, _LLU(obj_size)); - /* FIXME: call exofs_inode_recovery() */ - } - oi->i_dir_start_lookup = 0; if ((inode->i_nlink == 0) && (inode->i_mode == 0)) { @@ -1043,7 +1082,7 @@ static void create_done(struct exofs_io_state *ios, void *p) if (unlikely(ret)) { EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx", - _LLU(exofs_oi_objno(oi)), _LLU(sbi->s_pid)); + _LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid)); /*TODO: When FS is corrupted creation can fail, object already * exist. Get rid of this asynchronous creation, if exist * increment the obj counter and try the next object. Until we @@ -1104,7 +1143,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode) mark_inode_dirty(inode); - ret = exofs_get_io_state(sbi, &ios); + ret = exofs_get_io_state(&sbi->layout, &ios); if (unlikely(ret)) { EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n"); return ERR_PTR(ret); @@ -1170,8 +1209,10 @@ static int exofs_update_inode(struct inode *inode, int do_sync) int ret; args = kzalloc(sizeof(*args), GFP_KERNEL); - if (!args) + if (!args) { + EXOFS_DBGMSG("Faild kzalloc of args\n"); return -ENOMEM; + } fcb = &args->fcb; @@ -1200,7 +1241,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync) } else memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data)); - ret = exofs_get_io_state(sbi, &ios); + ret = exofs_get_io_state(&sbi->layout, &ios); if (unlikely(ret)) { EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__); goto free_args; @@ -1234,7 +1275,8 @@ static int exofs_update_inode(struct inode *inode, int do_sync) free_args: kfree(args); out: - EXOFS_DBGMSG("ret=>%d\n", ret); + EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n", + inode->i_ino, do_sync, ret); return ret; } @@ -1283,7 +1325,7 @@ void exofs_delete_inode(struct inode *inode) clear_inode(inode); - ret = exofs_get_io_state(sbi, &ios); + ret = exofs_get_io_state(&sbi->layout, &ios); if (unlikely(ret)) { EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__); return; diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c index 5bad01fa1f9..5293bc411d1 100644 --- a/fs/exofs/ios.c +++ b/fs/exofs/ios.c @@ -23,9 +23,13 @@ */ #include <scsi/scsi_device.h> +#include <asm/div64.h> #include "exofs.h" +#define EXOFS_DBGMSG2(M...) do {} while (0) +/* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */ + void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj) { osd_sec_init_nosec_doall_caps(cred_a, obj, false, true); @@ -64,21 +68,24 @@ out: return ret; } -int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** pios) +int exofs_get_io_state(struct exofs_layout *layout, + struct exofs_io_state **pios) { struct exofs_io_state *ios; /*TODO: Maybe use kmem_cach per sbi of size - * exofs_io_state_size(sbi->s_numdevs) + * exofs_io_state_size(layout->s_numdevs) */ - ios = kzalloc(exofs_io_state_size(sbi->s_numdevs), GFP_KERNEL); + ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL); if (unlikely(!ios)) { + EXOFS_DBGMSG("Faild kzalloc bytes=%d\n", + exofs_io_state_size(layout->s_numdevs)); *pios = NULL; return -ENOMEM; } - ios->sbi = sbi; - ios->obj.partition = sbi->s_pid; + ios->layout = layout; + ios->obj.partition = layout->s_pid; *pios = ios; return 0; } @@ -101,6 +108,29 @@ void exofs_put_io_state(struct exofs_io_state *ios) } } +unsigned exofs_layout_od_id(struct exofs_layout *layout, + osd_id obj_no, unsigned layout_index) +{ +/* switch (layout->lay_func) { + case LAYOUT_MOVING_WINDOW: + {*/ + unsigned dev_mod = obj_no; + + return (layout_index + dev_mod * layout->mirrors_p1) % + layout->s_numdevs; +/* } + case LAYOUT_FUNC_IMPLICT: + return layout->devs[layout_index]; + }*/ +} + +static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios, + unsigned layout_index) +{ + return ios->layout->s_ods[ + exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)]; +} + static void _sync_done(struct exofs_io_state *ios, void *p) { struct completion *waiting = p; @@ -168,6 +198,21 @@ static int exofs_io_execute(struct exofs_io_state *ios) return ret; } +static void _clear_bio(struct bio *bio) +{ + struct bio_vec *bv; + unsigned i; + + __bio_for_each_segment(bv, bio, i, 0) { + unsigned this_count = bv->bv_len; + + if (likely(PAGE_SIZE == this_count)) + clear_highpage(bv->bv_page); + else + zero_user(bv->bv_page, bv->bv_offset, this_count); + } +} + int exofs_check_io(struct exofs_io_state *ios, u64 *resid) { enum osd_err_priority acumulated_osd_err = 0; @@ -176,16 +221,25 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid) for (i = 0; i < ios->numdevs; i++) { struct osd_sense_info osi; - int ret = osd_req_decode_sense(ios->per_dev[i].or, &osi); + struct osd_request *or = ios->per_dev[i].or; + int ret; + + if (unlikely(!or)) + continue; + ret = osd_req_decode_sense(or, &osi); if (likely(!ret)) continue; - if (unlikely(ret == -EFAULT)) { - EXOFS_DBGMSG("%s: EFAULT Need page clear\n", __func__); - /*FIXME: All the pages in this device range should: - * clear_highpage(page); - */ + if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) { + /* start read offset passed endof file */ + _clear_bio(ios->per_dev[i].bio); + EXOFS_DBGMSG("start read offset passed end of file " + "offset=0x%llx, length=0x%llx\n", + _LLU(ios->per_dev[i].offset), + _LLU(ios->per_dev[i].length)); + + continue; /* we recovered */ } if (osi.osd_err_pri >= acumulated_osd_err) { @@ -205,14 +259,259 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid) return acumulated_lin_err; } +/* + * L - logical offset into the file + * + * U - The number of bytes in a stripe within a group + * + * U = stripe_unit * group_width + * + * T - The number of bytes striped within a group of component objects + * (before advancing to the next group) + * + * T = stripe_unit * group_width * group_depth + * + * S - The number of bytes striped across all component objects + * before the pattern repeats + * + * S = stripe_unit * group_width * group_depth * group_count + * + * M - The "major" (i.e., across all components) stripe number + * + * M = L / S + * + * G - Counts the groups from the beginning of the major stripe + * + * G = (L - (M * S)) / T [or (L % S) / T] + * + * H - The byte offset within the group + * + * H = (L - (M * S)) % T [or (L % S) % T] + * + * N - The "minor" (i.e., across the group) stripe number + * + * N = H / U + * + * C - The component index coresponding to L + * + * C = (H - (N * U)) / stripe_unit + G * group_width + * [or (L % U) / stripe_unit + G * group_width] + * + * O - The component offset coresponding to L + * + * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit + */ +struct _striping_info { + u64 obj_offset; + u64 group_length; + u64 total_group_length; + u64 Major; + unsigned dev; + unsigned unit_off; +}; + +static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset, + struct _striping_info *si) +{ + u32 stripe_unit = ios->layout->stripe_unit; + u32 group_width = ios->layout->group_width; + u64 group_depth = ios->layout->group_depth; + + u32 U = stripe_unit * group_width; + u64 T = U * group_depth; + u64 S = T * ios->layout->group_count; + u64 M = div64_u64(file_offset, S); + + /* + G = (L - (M * S)) / T + H = (L - (M * S)) % T + */ + u64 LmodS = file_offset - M * S; + u32 G = div64_u64(LmodS, T); + u64 H = LmodS - G * T; + + u32 N = div_u64(H, U); + + /* "H - (N * U)" is just "H % U" so it's bound to u32 */ + si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width; + si->dev *= ios->layout->mirrors_p1; + + div_u64_rem(file_offset, stripe_unit, &si->unit_off); + + si->obj_offset = si->unit_off + (N * stripe_unit) + + (M * group_depth * stripe_unit); + + si->group_length = T - H; + si->total_group_length = T; + si->Major = M; +} + +static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg, + unsigned pgbase, struct exofs_per_dev_state *per_dev, + int cur_len) +{ + unsigned pg = *cur_pg; + struct request_queue *q = + osd_request_queue(exofs_ios_od(ios, per_dev->dev)); + + per_dev->length += cur_len; + + if (per_dev->bio == NULL) { + unsigned pages_in_stripe = ios->layout->group_width * + (ios->layout->stripe_unit / PAGE_SIZE); + unsigned bio_size = (ios->nr_pages + pages_in_stripe) / + ios->layout->group_width; + + per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size); + if (unlikely(!per_dev->bio)) { + EXOFS_DBGMSG("Faild to allocate BIO size=%u\n", + bio_size); + return -ENOMEM; + } + } + + while (cur_len > 0) { + unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len); + unsigned added_len; + + BUG_ON(ios->nr_pages <= pg); + cur_len -= pglen; + + added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg], + pglen, pgbase); + if (unlikely(pglen != added_len)) + return -ENOMEM; + pgbase = 0; + ++pg; + } + BUG_ON(cur_len); + + *cur_pg = pg; + return 0; +} + +static int _prepare_one_group(struct exofs_io_state *ios, u64 length, + struct _striping_info *si, unsigned first_comp) +{ + unsigned stripe_unit = ios->layout->stripe_unit; + unsigned mirrors_p1 = ios->layout->mirrors_p1; + unsigned devs_in_group = ios->layout->group_width * mirrors_p1; + unsigned dev = si->dev; + unsigned first_dev = dev - (dev % devs_in_group); + unsigned comp = first_comp + (dev - first_dev); + unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0; + unsigned cur_pg = ios->pages_consumed; + int ret = 0; + + while (length) { + struct exofs_per_dev_state *per_dev = &ios->per_dev[comp]; + unsigned cur_len, page_off = 0; + + if (!per_dev->length) { + per_dev->dev = dev; + if (dev < si->dev) { + per_dev->offset = si->obj_offset + stripe_unit - + si->unit_off; + cur_len = stripe_unit; + } else if (dev == si->dev) { + per_dev->offset = si->obj_offset; + cur_len = stripe_unit - si->unit_off; + page_off = si->unit_off & ~PAGE_MASK; + BUG_ON(page_off && (page_off != ios->pgbase)); + } else { /* dev > si->dev */ + per_dev->offset = si->obj_offset - si->unit_off; + cur_len = stripe_unit; + } + + if (max_comp < comp) + max_comp = comp; + + dev += mirrors_p1; + dev = (dev % devs_in_group) + first_dev; + } else { + cur_len = stripe_unit; + } + if (cur_len >= length) + cur_len = length; + + ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev, + cur_len); + if (unlikely(ret)) + goto out; + + comp += mirrors_p1; + comp = (comp % devs_in_group) + first_comp; + + length -= cur_len; + } +out: + ios->numdevs = max_comp + mirrors_p1; + ios->pages_consumed = cur_pg; + return ret; +} + +static int _prepare_for_striping(struct exofs_io_state *ios) +{ + u64 length = ios->length; + struct _striping_info si; + unsigned devs_in_group = ios->layout->group_width * + ios->layout->mirrors_p1; + unsigned first_comp = 0; + int ret = 0; + + _calc_stripe_info(ios, ios->offset, &si); + + if (!ios->pages) { + if (ios->kern_buff) { + struct exofs_per_dev_state *per_dev = &ios->per_dev[0]; + + per_dev->offset = si.obj_offset; + per_dev->dev = si.dev; + + /* no cross device without page array */ + BUG_ON((ios->layout->group_width > 1) && + (si.unit_off + ios->length > + ios->layout->stripe_unit)); + } + ios->numdevs = ios->layout->mirrors_p1; + return 0; + } + + while (length) { + if (length < si.group_length) + si.group_length = length; + + ret = _prepare_one_group(ios, si.group_length, &si, first_comp); + if (unlikely(ret)) + goto out; + + length -= si.group_length; + + si.group_length = si.total_group_length; + si.unit_off = 0; + ++si.Major; + si.obj_offset = si.Major * ios->layout->stripe_unit * + ios->layout->group_depth; + + si.dev = (si.dev - (si.dev % devs_in_group)) + devs_in_group; + si.dev %= ios->layout->s_numdevs; + + first_comp += devs_in_group; + first_comp %= ios->layout->s_numdevs; + } + +out: + return ret; +} + int exofs_sbi_create(struct exofs_io_state *ios) { int i, ret; - for (i = 0; i < ios->sbi->s_numdevs; i++) { + for (i = 0; i < ios->layout->s_numdevs; i++) { struct osd_request *or; - or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); + or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); ret = -ENOMEM; @@ -233,10 +532,10 @@ int exofs_sbi_remove(struct exofs_io_state *ios) { int i, ret; - for (i = 0; i < ios->sbi->s_numdevs; i++) { + for (i = 0; i < ios->layout->s_numdevs; i++) { struct osd_request *or; - or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); + or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); ret = -ENOMEM; @@ -253,51 +552,74 @@ out: return ret; } -int exofs_sbi_write(struct exofs_io_state *ios) +static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) { - int i, ret; + struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp]; + unsigned dev = ios->per_dev[cur_comp].dev; + unsigned last_comp = cur_comp + ios->layout->mirrors_p1; + int ret = 0; - for (i = 0; i < ios->sbi->s_numdevs; i++) { + if (ios->pages && !master_dev->length) + return 0; /* Just an empty slot */ + + for (; cur_comp < last_comp; ++cur_comp, ++dev) { + struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; struct osd_request *or; - or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); + or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); ret = -ENOMEM; goto out; } - ios->per_dev[i].or = or; - ios->numdevs++; + per_dev->or = or; + per_dev->offset = master_dev->offset; - if (ios->bio) { + if (ios->pages) { struct bio *bio; - if (i != 0) { + if (per_dev != master_dev) { bio = bio_kmalloc(GFP_KERNEL, - ios->bio->bi_max_vecs); + master_dev->bio->bi_max_vecs); if (unlikely(!bio)) { + EXOFS_DBGMSG( + "Faild to allocate BIO size=%u\n", + master_dev->bio->bi_max_vecs); ret = -ENOMEM; goto out; } - __bio_clone(bio, ios->bio); + __bio_clone(bio, master_dev->bio); bio->bi_bdev = NULL; bio->bi_next = NULL; - ios->per_dev[i].bio = bio; + per_dev->length = master_dev->length; + per_dev->bio = bio; + per_dev->dev = dev; } else { - bio = ios->bio; + bio = master_dev->bio; + /* FIXME: bio_set_dir() */ + bio->bi_rw |= (1 << BIO_RW); } - osd_req_write(or, &ios->obj, ios->offset, bio, - ios->length); -/* EXOFS_DBGMSG("write sync=%d\n", sync);*/ + osd_req_write(or, &ios->obj, per_dev->offset, bio, + per_dev->length); + EXOFS_DBGMSG("write(0x%llx) offset=0x%llx " + "length=0x%llx dev=%d\n", + _LLU(ios->obj.id), _LLU(per_dev->offset), + _LLU(per_dev->length), dev); } else if (ios->kern_buff) { - osd_req_write_kern(or, &ios->obj, ios->offset, + ret = osd_req_write_kern(or, &ios->obj, per_dev->offset, ios->kern_buff, ios->length); -/* EXOFS_DBGMSG("write_kern sync=%d\n", sync);*/ + if (unlikely(ret)) + goto out; + EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx " + "length=0x%llx dev=%d\n", + _LLU(ios->obj.id), _LLU(per_dev->offset), + _LLU(ios->length), dev); } else { osd_req_set_attributes(or, &ios->obj); -/* EXOFS_DBGMSG("set_attributes sync=%d\n", sync);*/ + EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n", + _LLU(ios->obj.id), ios->out_attr_len, dev); } if (ios->out_attr) @@ -308,54 +630,93 @@ int exofs_sbi_write(struct exofs_io_state *ios) osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len); } - ret = exofs_io_execute(ios); out: return ret; } -int exofs_sbi_read(struct exofs_io_state *ios) +int exofs_sbi_write(struct exofs_io_state *ios) { - int i, ret; + int i; + int ret; - for (i = 0; i < 1; i++) { - struct osd_request *or; - unsigned first_dev = (unsigned)ios->obj.id; + ret = _prepare_for_striping(ios); + if (unlikely(ret)) + return ret; - first_dev %= ios->sbi->s_numdevs; - or = osd_start_request(ios->sbi->s_ods[first_dev], GFP_KERNEL); - if (unlikely(!or)) { - EXOFS_ERR("%s: osd_start_request failed\n", __func__); - ret = -ENOMEM; - goto out; - } - ios->per_dev[i].or = or; - ios->numdevs++; + for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) { + ret = _sbi_write_mirror(ios, i); + if (unlikely(ret)) + return ret; + } - if (ios->bio) { - osd_req_read(or, &ios->obj, ios->offset, ios->bio, - ios->length); -/* EXOFS_DBGMSG("read sync=%d\n", sync);*/ - } else if (ios->kern_buff) { - osd_req_read_kern(or, &ios->obj, ios->offset, - ios->kern_buff, ios->length); -/* EXOFS_DBGMSG("read_kern sync=%d\n", sync);*/ - } else { - osd_req_get_attributes(or, &ios->obj); -/* EXOFS_DBGMSG("get_attributes sync=%d\n", sync);*/ - } + ret = exofs_io_execute(ios); + return ret; +} - if (ios->out_attr) - osd_req_add_set_attr_list(or, ios->out_attr, - ios->out_attr_len); +static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp) +{ + struct osd_request *or; + struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; + unsigned first_dev = (unsigned)ios->obj.id; - if (ios->in_attr) - osd_req_add_get_attr_list(or, ios->in_attr, - ios->in_attr_len); + if (ios->pages && !per_dev->length) + return 0; /* Just an empty slot */ + + first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1; + or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL); + if (unlikely(!or)) { + EXOFS_ERR("%s: osd_start_request failed\n", __func__); + return -ENOMEM; } - ret = exofs_io_execute(ios); + per_dev->or = or; + + if (ios->pages) { + osd_req_read(or, &ios->obj, per_dev->offset, + per_dev->bio, per_dev->length); + EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx" + " dev=%d\n", _LLU(ios->obj.id), + _LLU(per_dev->offset), _LLU(per_dev->length), + first_dev); + } else if (ios->kern_buff) { + int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset, + ios->kern_buff, ios->length); + EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx " + "length=0x%llx dev=%d ret=>%d\n", + _LLU(ios->obj.id), _LLU(per_dev->offset), + _LLU(ios->length), first_dev, ret); + if (unlikely(ret)) + return ret; + } else { + osd_req_get_attributes(or, &ios->obj); + EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n", + _LLU(ios->obj.id), ios->in_attr_len, first_dev); + } + if (ios->out_attr) + osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len); -out: + if (ios->in_attr) + osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len); + + return 0; +} + +int exofs_sbi_read(struct exofs_io_state *ios) +{ + int i; + int ret; + + ret = _prepare_for_striping(ios); + if (unlikely(ret)) + return ret; + + for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) { + ret = _sbi_read_mirror(ios, i); + if (unlikely(ret)) + return ret; + } + + ret = exofs_io_execute(ios); return ret; } @@ -380,42 +741,82 @@ int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr) return -EIO; } +static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp, + struct osd_attr *attr) +{ + int last_comp = cur_comp + ios->layout->mirrors_p1; + + for (; cur_comp < last_comp; ++cur_comp) { + struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; + struct osd_request *or; + + or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL); + if (unlikely(!or)) { + EXOFS_ERR("%s: osd_start_request failed\n", __func__); + return -ENOMEM; + } + per_dev->or = or; + + osd_req_set_attributes(or, &ios->obj); + osd_req_add_set_attr_list(or, attr, 1); + } + + return 0; +} + int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) { struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info; struct exofs_io_state *ios; - struct osd_attr attr; - __be64 newsize; + struct exofs_trunc_attr { + struct osd_attr attr; + __be64 newsize; + } *size_attrs; + struct _striping_info si; int i, ret; - if (exofs_get_io_state(sbi, &ios)) - return -ENOMEM; + ret = exofs_get_io_state(&sbi->layout, &ios); + if (unlikely(ret)) + return ret; + + size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs), + GFP_KERNEL); + if (unlikely(!size_attrs)) { + ret = -ENOMEM; + goto out; + } ios->obj.id = exofs_oi_objno(oi); ios->cred = oi->i_cred; - newsize = cpu_to_be64(size); - attr = g_attr_logical_length; - attr.val_ptr = &newsize; + ios->numdevs = ios->layout->s_numdevs; + _calc_stripe_info(ios, size, &si); - for (i = 0; i < sbi->s_numdevs; i++) { - struct osd_request *or; + for (i = 0; i < ios->layout->group_width; ++i) { + struct exofs_trunc_attr *size_attr = &size_attrs[i]; + u64 obj_size; - or = osd_start_request(sbi->s_ods[i], GFP_KERNEL); - if (unlikely(!or)) { - EXOFS_ERR("%s: osd_start_request failed\n", __func__); - ret = -ENOMEM; - goto out; - } - ios->per_dev[i].or = or; - ios->numdevs++; + if (i < si.dev) + obj_size = si.obj_offset + + ios->layout->stripe_unit - si.unit_off; + else if (i == si.dev) + obj_size = si.obj_offset; + else /* i > si.dev */ + obj_size = si.obj_offset - si.unit_off; - osd_req_set_attributes(or, &ios->obj); - osd_req_add_set_attr_list(or, &attr, 1); + size_attr->newsize = cpu_to_be64(obj_size); + size_attr->attr = g_attr_logical_length; + size_attr->attr.val_ptr = &size_attr->newsize; + + ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1, + &size_attr->attr); + if (unlikely(ret)) + goto out; } ret = exofs_io_execute(ios); out: + kfree(size_attrs); exofs_put_io_state(ios); return ret; } diff --git a/fs/exofs/super.c b/fs/exofs/super.c index a1d1e77b12e..6cf5e4e84d6 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -210,7 +210,7 @@ int exofs_sync_fs(struct super_block *sb, int wait) sbi = sb->s_fs_info; fscb = &sbi->s_fscb; - ret = exofs_get_io_state(sbi, &ios); + ret = exofs_get_io_state(&sbi->layout, &ios); if (ret) goto out; @@ -264,12 +264,12 @@ static void _exofs_print_device(const char *msg, const char *dev_path, void exofs_free_sbi(struct exofs_sb_info *sbi) { - while (sbi->s_numdevs) { - int i = --sbi->s_numdevs; - struct osd_dev *od = sbi->s_ods[i]; + while (sbi->layout.s_numdevs) { + int i = --sbi->layout.s_numdevs; + struct osd_dev *od = sbi->layout.s_ods[i]; if (od) { - sbi->s_ods[i] = NULL; + sbi->layout.s_ods[i] = NULL; osduld_put_device(od); } } @@ -298,7 +298,8 @@ static void exofs_put_super(struct super_block *sb) msecs_to_jiffies(100)); } - _exofs_print_device("Unmounting", NULL, sbi->s_ods[0], sbi->s_pid); + _exofs_print_device("Unmounting", NULL, sbi->layout.s_ods[0], + sbi->layout.s_pid); exofs_free_sbi(sbi); sb->s_fs_info = NULL; @@ -307,6 +308,8 @@ static void exofs_put_super(struct super_block *sb) static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs, struct exofs_device_table *dt) { + u64 stripe_length; + sbi->data_map.odm_num_comps = le32_to_cpu(dt->dt_data_map.cb_num_comps); sbi->data_map.odm_stripe_unit = @@ -320,14 +323,63 @@ static int _read_and_match_data_map(struct exofs_sb_info *sbi, unsigned numdevs, sbi->data_map.odm_raid_algorithm = le32_to_cpu(dt->dt_data_map.cb_raid_algorithm); -/* FIXME: Hard coded mirror only for now. if not so do not mount */ - if ((sbi->data_map.odm_num_comps != numdevs) || - (sbi->data_map.odm_stripe_unit != EXOFS_BLKSIZE) || - (sbi->data_map.odm_raid_algorithm != PNFS_OSD_RAID_0) || - (sbi->data_map.odm_mirror_cnt != (numdevs - 1))) +/* FIXME: Only raid0 for now. if not so, do not mount */ + if (sbi->data_map.odm_num_comps != numdevs) { + EXOFS_ERR("odm_num_comps(%u) != numdevs(%u)\n", + sbi->data_map.odm_num_comps, numdevs); return -EINVAL; - else - return 0; + } + if (sbi->data_map.odm_raid_algorithm != PNFS_OSD_RAID_0) { + EXOFS_ERR("Only RAID_0 for now\n"); + return -EINVAL; + } + if (0 != (numdevs % (sbi->data_map.odm_mirror_cnt + 1))) { + EXOFS_ERR("Data Map wrong, numdevs=%d mirrors=%d\n", + numdevs, sbi->data_map.odm_mirror_cnt); + return -EINVAL; + } + + if (0 != (sbi->data_map.odm_stripe_unit & ~PAGE_MASK)) { + EXOFS_ERR("Stripe Unit(0x%llx)" + " must be Multples of PAGE_SIZE(0x%lx)\n", + _LLU(sbi->data_map.odm_stripe_unit), PAGE_SIZE); + return -EINVAL; + } + + sbi->layout.stripe_unit = sbi->data_map.odm_stripe_unit; + sbi->layout.mirrors_p1 = sbi->data_map.odm_mirror_cnt + 1; + + if (sbi->data_map.odm_group_width) { + sbi->layout.group_width = sbi->data_map.odm_group_width; + sbi->layout.group_depth = sbi->data_map.odm_group_depth; + if (!sbi->layout.group_depth) { + EXOFS_ERR("group_depth == 0 && group_width != 0\n"); + return -EINVAL; + } + sbi->layout.group_count = sbi->data_map.odm_num_comps / + sbi->layout.mirrors_p1 / + sbi->data_map.odm_group_width; + } else { + if (sbi->data_map.odm_group_depth) { + printk(KERN_NOTICE "Warning: group_depth ignored " + "group_width == 0 && group_depth == %d\n", + sbi->data_map.odm_group_depth); + sbi->data_map.odm_group_depth = 0; + } + sbi->layout.group_width = sbi->data_map.odm_num_comps / + sbi->layout.mirrors_p1; + sbi->layout.group_depth = -1; + sbi->layout.group_count = 1; + } + + stripe_length = (u64)sbi->layout.group_width * sbi->layout.stripe_unit; + if (stripe_length >= (1ULL << 32)) { + EXOFS_ERR("Total Stripe length(0x%llx)" + " >= 32bit is not supported\n", _LLU(stripe_length)); + return -EINVAL; + } + + return 0; } /* @odi is valid only as long as @fscb_dev is valid */ @@ -361,7 +413,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi, { struct exofs_sb_info *sbi = *psbi; struct osd_dev *fscb_od; - struct osd_obj_id obj = {.partition = sbi->s_pid, + struct osd_obj_id obj = {.partition = sbi->layout.s_pid, .id = EXOFS_DEVTABLE_ID}; struct exofs_device_table *dt; unsigned table_bytes = table_count * sizeof(dt->dt_dev_table[0]) + @@ -376,9 +428,9 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi, return -ENOMEM; } - fscb_od = sbi->s_ods[0]; - sbi->s_ods[0] = NULL; - sbi->s_numdevs = 0; + fscb_od = sbi->layout.s_ods[0]; + sbi->layout.s_ods[0] = NULL; + sbi->layout.s_numdevs = 0; ret = exofs_read_kern(fscb_od, sbi->s_cred, &obj, 0, dt, table_bytes); if (unlikely(ret)) { EXOFS_ERR("ERROR: reading device table\n"); @@ -397,14 +449,15 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi, goto out; if (likely(numdevs > 1)) { - unsigned size = numdevs * sizeof(sbi->s_ods[0]); + unsigned size = numdevs * sizeof(sbi->layout.s_ods[0]); sbi = krealloc(sbi, sizeof(*sbi) + size, GFP_KERNEL); if (unlikely(!sbi)) { ret = -ENOMEM; goto out; } - memset(&sbi->s_ods[1], 0, size - sizeof(sbi->s_ods[0])); + memset(&sbi->layout.s_ods[1], 0, + size - sizeof(sbi->layout.s_ods[0])); *psbi = sbi; } @@ -427,8 +480,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi, * line. We always keep them in device-table order. */ if (fscb_od && osduld_device_same(fscb_od, &odi)) { - sbi->s_ods[i] = fscb_od; - ++sbi->s_numdevs; + sbi->layout.s_ods[i] = fscb_od; + ++sbi->layout.s_numdevs; fscb_od = NULL; continue; } @@ -441,8 +494,8 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info **psbi, goto out; } - sbi->s_ods[i] = od; - ++sbi->s_numdevs; + sbi->layout.s_ods[i] = od; + ++sbi->layout.s_numdevs; /* Read the fscb of the other devices to make sure the FS * partition is there. @@ -499,9 +552,15 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) goto free_sbi; } - sbi->s_ods[0] = od; - sbi->s_numdevs = 1; - sbi->s_pid = opts->pid; + /* Default layout in case we do not have a device-table */ + sbi->layout.stripe_unit = PAGE_SIZE; + sbi->layout.mirrors_p1 = 1; + sbi->layout.group_width = 1; + sbi->layout.group_depth = -1; + sbi->layout.group_count = 1; + sbi->layout.s_ods[0] = od; + sbi->layout.s_numdevs = 1; + sbi->layout.s_pid = opts->pid; sbi->s_timeout = opts->timeout; /* fill in some other data by hand */ @@ -514,7 +573,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) sb->s_bdev = NULL; sb->s_dev = 0; - obj.partition = sbi->s_pid; + obj.partition = sbi->layout.s_pid; obj.id = EXOFS_SUPER_ID; exofs_make_credential(sbi->s_cred, &obj); @@ -578,13 +637,13 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) goto free_sbi; } - _exofs_print_device("Mounting", opts->dev_name, sbi->s_ods[0], - sbi->s_pid); + _exofs_print_device("Mounting", opts->dev_name, sbi->layout.s_ods[0], + sbi->layout.s_pid); return 0; free_sbi: EXOFS_ERR("Unable to mount exofs on %s pid=0x%llx err=%d\n", - opts->dev_name, sbi->s_pid, ret); + opts->dev_name, sbi->layout.s_pid, ret); exofs_free_sbi(sbi); return ret; } @@ -627,7 +686,7 @@ static int exofs_statfs(struct dentry *dentry, struct kstatfs *buf) uint8_t cred_a[OSD_CAP_LEN]; int ret; - ret = exofs_get_io_state(sbi, &ios); + ret = exofs_get_io_state(&sbi->layout, &ios); if (ret) { EXOFS_DBGMSG("exofs_get_io_state failed.\n"); return ret; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index af7b62699ea..4cedc91ec59 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -361,14 +361,11 @@ struct ext4_new_group_data { so set the magic i_delalloc_reserve_flag after taking the inode allocation semaphore for */ #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 - /* Call ext4_da_update_reserve_space() after successfully - allocating the blocks */ -#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008 /* caller is from the direct IO path, request to creation of an unitialized extents if not allocated, split the uninitialized extent if blocks has been preallocated already*/ -#define EXT4_GET_BLOCKS_DIO 0x0010 -#define EXT4_GET_BLOCKS_CONVERT 0x0020 +#define EXT4_GET_BLOCKS_DIO 0x0008 +#define EXT4_GET_BLOCKS_CONVERT 0x0010 #define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\ EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) /* Convert extent to initialized after direct IO complete */ @@ -1017,7 +1014,7 @@ struct ext4_sb_info { atomic_t s_lock_busy; /* locality groups */ - struct ext4_locality_group *s_locality_groups; + struct ext4_locality_group __percpu *s_locality_groups; /* for write statistics */ unsigned long s_sectors_written_start; @@ -1443,6 +1440,8 @@ extern int ext4_block_truncate_page(handle_t *handle, extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern qsize_t *ext4_get_reserved_space(struct inode *inode); extern int flush_aio_dio_completed_IO(struct inode *inode); +extern void ext4_da_update_reserve_space(struct inode *inode, + int used, int quota_claim); /* ioctl.c */ extern long ext4_ioctl(struct file *, unsigned int, unsigned long); extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 7d7b74e9468..765a4826b11 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3132,7 +3132,19 @@ out: unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, newblock + max_blocks, allocated - max_blocks); + allocated = max_blocks; } + + /* + * If we have done fallocate with the offset that is already + * delayed allocated, we would have block reservation + * and quota reservation done in the delayed write path. + * But fallocate would have already updated quota and block + * count for this offset. So cancel these reservation + */ + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) + ext4_da_update_reserve_space(inode, allocated, 0); + map_out: set_buffer_mapped(bh_result); out1: @@ -3368,9 +3380,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, /* previous routine could use block we allocated */ newblock = ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); + if (allocated > max_blocks) + allocated = max_blocks; set_buffer_new(bh_result); /* + * Update reserved blocks/metadata blocks after successful + * block allocation which had been deferred till now. + */ + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) + ext4_da_update_reserve_space(inode, allocated, 1); + + /* * Cache the extent and update transaction to commit on fdatasync only * when it is _not_ an uninitialized extent. */ diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 9630583cef2..56eee3d796c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -116,11 +116,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp) * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); - path.mnt = mnt->mnt_parent; - path.dentry = mnt->mnt_mountpoint; - path_get(&path); + path.mnt = mnt; + path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); - path_put(&path); if (!IS_ERR(cp)) { memcpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c818972c830..e11952404e0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1053,11 +1053,12 @@ static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) * Called with i_data_sem down, which is important since we can call * ext4_discard_preallocations() from here. */ -static void ext4_da_update_reserve_space(struct inode *inode, int used) +void ext4_da_update_reserve_space(struct inode *inode, + int used, int quota_claim) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); - int mdb_free = 0; + int mdb_free = 0, allocated_meta_blocks = 0; spin_lock(&ei->i_block_reservation_lock); if (unlikely(used > ei->i_reserved_data_blocks)) { @@ -1073,6 +1074,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) ei->i_reserved_data_blocks -= used; used += ei->i_allocated_meta_blocks; ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; + allocated_meta_blocks = ei->i_allocated_meta_blocks; ei->i_allocated_meta_blocks = 0; percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); @@ -1090,9 +1092,23 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* Update quota subsystem */ - vfs_dq_claim_block(inode, used); - if (mdb_free) - vfs_dq_release_reservation_block(inode, mdb_free); + if (quota_claim) { + vfs_dq_claim_block(inode, used); + if (mdb_free) + vfs_dq_release_reservation_block(inode, mdb_free); + } else { + /* + * We did fallocate with an offset that is already delayed + * allocated. So on delayed allocated writeback we should + * not update the quota for allocated blocks. But then + * converting an fallocate region to initialized region would + * have caused a metadata allocation. So claim quota for + * that + */ + if (allocated_meta_blocks) + vfs_dq_claim_block(inode, allocated_meta_blocks); + vfs_dq_release_reservation_block(inode, mdb_free + used); + } /* * If we have done all the pending block allocations and if @@ -1292,18 +1308,20 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, */ EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; } - } + /* + * Update reserved blocks/metadata blocks after successful + * block allocation which had been deferred till now. We don't + * support fallocate for non extent files. So we can update + * reserve space here. + */ + if ((retval > 0) && + (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) + ext4_da_update_reserve_space(inode, retval, 1); + } if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) EXT4_I(inode)->i_delalloc_reserved_flag = 0; - /* - * Update reserved blocks/metadata blocks after successful - * block allocation which had been deferred till now. - */ - if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE)) - ext4_da_update_reserve_space(inode, retval); - up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && buffer_mapped(bh)) { int ret = check_block_validity(inode, "file system " @@ -1835,24 +1853,12 @@ repeat: * later. Real quota accounting is done at pages writeout * time. */ - if (vfs_dq_reserve_block(inode, md_needed + 1)) { - /* - * We tend to badly over-estimate the amount of - * metadata blocks which are needed, so if we have - * reserved any metadata blocks, try to force out the - * inode and see if we have any better luck. - */ - if (md_reserved && retries++ <= 3) - goto retry; + if (vfs_dq_reserve_block(inode, md_needed + 1)) return -EDQUOT; - } if (ext4_claim_free_blocks(sbi, md_needed + 1)) { vfs_dq_release_reservation_block(inode, md_needed + 1); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { - retry: - if (md_reserved) - write_inode_now(inode, (retries == 3)); yield(); goto repeat; } @@ -2213,10 +2219,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) * variables are updated after the blocks have been allocated. */ new.b_state = 0; - get_blocks_flags = (EXT4_GET_BLOCKS_CREATE | - EXT4_GET_BLOCKS_DELALLOC_RESERVE); + get_blocks_flags = EXT4_GET_BLOCKS_CREATE; if (mpd->b_state & (1 << BH_Delay)) - get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE; + get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; + blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, &new, get_blocks_flags); if (blks < 0) { @@ -3032,7 +3038,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - int ret, retries = 0; + int ret, retries = 0, quota_retries = 0; struct page *page; pgoff_t index; unsigned from, to; @@ -3091,6 +3097,22 @@ retry: if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; + + if ((ret == -EDQUOT) && + EXT4_I(inode)->i_reserved_meta_blocks && + (quota_retries++ < 3)) { + /* + * Since we often over-estimate the number of meta + * data blocks required, we may sometimes get a + * spurios out of quota error even though there would + * be enough space once we write the data blocks and + * find out how many meta data blocks were _really_ + * required. So try forcing the inode write to see if + * that helps. + */ + write_inode_now(inode, (quota_retries == 3)); + goto retry; + } out: return ret; } diff --git a/fs/fcntl.c b/fs/fcntl.c index 2cf93ec40a6..97e01dc0d95 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -618,60 +618,90 @@ static DEFINE_RWLOCK(fasync_lock); static struct kmem_cache *fasync_cache __read_mostly; /* - * fasync_helper() is used by almost all character device drivers - * to set up the fasync queue. It returns negative on error, 0 if it did - * no changes and positive if it added/deleted the entry. + * Remove a fasync entry. If successfully removed, return + * positive and clear the FASYNC flag. If no entry exists, + * do nothing and return 0. + * + * NOTE! It is very important that the FASYNC flag always + * match the state "is the filp on a fasync list". + * + * We always take the 'filp->f_lock', in since fasync_lock + * needs to be irq-safe. */ -int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) +static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *fa, **fp; - struct fasync_struct *new = NULL; int result = 0; - if (on) { - new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); - if (!new) - return -ENOMEM; + spin_lock(&filp->f_lock); + write_lock_irq(&fasync_lock); + for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { + if (fa->fa_file != filp) + continue; + *fp = fa->fa_next; + kmem_cache_free(fasync_cache, fa); + filp->f_flags &= ~FASYNC; + result = 1; + break; } + write_unlock_irq(&fasync_lock); + spin_unlock(&filp->f_lock); + return result; +} + +/* + * Add a fasync entry. Return negative on error, positive if + * added, and zero if did nothing but change an existing one. + * + * NOTE! It is very important that the FASYNC flag always + * match the state "is the filp on a fasync list". + */ +static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) +{ + struct fasync_struct *new, *fa, **fp; + int result = 0; + + new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); + if (!new) + return -ENOMEM; - /* - * We need to take f_lock first since it's not an IRQ-safe - * lock. - */ spin_lock(&filp->f_lock); write_lock_irq(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { - if (fa->fa_file == filp) { - if(on) { - fa->fa_fd = fd; - kmem_cache_free(fasync_cache, new); - } else { - *fp = fa->fa_next; - kmem_cache_free(fasync_cache, fa); - result = 1; - } - goto out; - } + if (fa->fa_file != filp) + continue; + fa->fa_fd = fd; + kmem_cache_free(fasync_cache, new); + goto out; } - if (on) { - new->magic = FASYNC_MAGIC; - new->fa_file = filp; - new->fa_fd = fd; - new->fa_next = *fapp; - *fapp = new; - result = 1; - } + new->magic = FASYNC_MAGIC; + new->fa_file = filp; + new->fa_fd = fd; + new->fa_next = *fapp; + *fapp = new; + result = 1; + filp->f_flags |= FASYNC; + out: - if (on) - filp->f_flags |= FASYNC; - else - filp->f_flags &= ~FASYNC; write_unlock_irq(&fasync_lock); spin_unlock(&filp->f_lock); return result; } +/* + * fasync_helper() is used by almost all character device drivers + * to set up the fasync queue, and for regular files by the file + * lease code. It returns negative on error, 0 if it did no changes + * and positive if it added/deleted the entry. + */ +int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) +{ + if (!on) + return fasync_remove_entry(filp, fapp); + return fasync_add_entry(fd, filp, fapp); +} + EXPORT_SYMBOL(fasync_helper); void __kill_fasync(struct fasync_struct *fa, int sig, int band) diff --git a/fs/file.c b/fs/file.c index 87e129030ab..38039af6766 100644 --- a/fs/file.c +++ b/fs/file.c @@ -478,7 +478,7 @@ repeat: error = fd; #if 1 /* Sanity check */ - if (rcu_dereference(fdt->fd[fd]) != NULL) { + if (rcu_dereference_raw(fdt->fd[fd]) != NULL) { printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); rcu_assign_pointer(fdt->fd[fd], NULL); } diff --git a/fs/file_table.c b/fs/file_table.c index 69652c5bd5f..b98404b5438 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -253,6 +253,7 @@ void __fput(struct file *file) if (file->f_op && file->f_op->release) file->f_op->release(inode, file); security_file_free(file); + ima_file_free(file); if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) cdev_put(inode->i_cdev); fops_put(file->f_op); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 51d9e33d634..eb7e9423691 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -865,13 +865,10 @@ static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, down_read(&fc->killsb); err = -ENOENT; - if (!fc->sb) - goto err_unlock; - - err = fuse_reverse_inval_inode(fc->sb, outarg.ino, - outarg.off, outarg.len); - -err_unlock: + if (fc->sb) { + err = fuse_reverse_inval_inode(fc->sb, outarg.ino, + outarg.off, outarg.len); + } up_read(&fc->killsb); return err; @@ -884,10 +881,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_inval_entry_out outarg; - int err = -EINVAL; - char buf[FUSE_NAME_MAX+1]; + int err = -ENOMEM; + char *buf; struct qstr name; + buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); + if (!buf) + goto err; + + err = -EINVAL; if (size < sizeof(outarg)) goto err; @@ -910,16 +912,14 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, down_read(&fc->killsb); err = -ENOENT; - if (!fc->sb) - goto err_unlock; - - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); - -err_unlock: + if (fc->sb) + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name); up_read(&fc->killsb); + kfree(buf); return err; err: + kfree(buf); fuse_copy_finish(cs); return err; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c18913a777a..a9f5e137f1d 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -828,6 +828,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, if (!page) break; + if (mapping_writably_mapped(mapping)) + flush_dcache_page(page); + pagefault_disable(); tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); pagefault_enable(); diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 7b8da941526..0c1d0b82dcf 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -1061,8 +1061,8 @@ out: int gfs2_releasepage(struct page *page, gfp_t gfp_mask) { - struct inode *aspace = page->mapping->host; - struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info; + struct address_space *mapping = page->mapping; + struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct buffer_head *bh, *head; struct gfs2_bufdata *bd; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 6d47379e794..583e823307a 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, *ptr++ = cpu_to_be64(bn++); break; } - } while (state != ALLOC_DATA); + } while ((state != ALLOC_DATA) || !dblock); ip->i_height = height; gfs2_add_inode_blocks(&ip->i_inode, alloced); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f455a03a09e..454d4b4eb36 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -19,7 +19,6 @@ #include <linux/list.h> #include <linux/wait.h> #include <linux/module.h> -#include <linux/rwsem.h> #include <asm/uaccess.h> #include <linux/seq_file.h> #include <linux/debugfs.h> @@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); -static DECLARE_RWSEM(gfs2_umount_flush_sem); static struct dentry *gfs2_root; static struct workqueue_struct *glock_workqueue; struct workqueue_struct *gfs2_delete_workqueue; @@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp, static void glock_free(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_sbd; - struct inode *aspace = gl->gl_aspace; + struct address_space *mapping = gfs2_glock2aspace(gl); + struct kmem_cache *cachep = gfs2_glock_cachep; - if (aspace) - gfs2_aspace_put(aspace); + GLOCK_BUG_ON(gl, mapping && mapping->nrpages); trace_gfs2_glock_put(gl); - sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); + if (mapping) + cachep = gfs2_glock_aspace_cachep; + sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl); } /** @@ -712,7 +712,6 @@ static void glock_work_func(struct work_struct *work) finish_xmote(gl, gl->gl_reply); drop_ref = 1; } - down_read(&gfs2_umount_flush_sem); spin_lock(&gl->gl_spin); if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && @@ -725,7 +724,6 @@ static void glock_work_func(struct work_struct *work) } run_queue(gl, 0); spin_unlock(&gl->gl_spin); - up_read(&gfs2_umount_flush_sem); if (!delay || queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl); @@ -750,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp) { + struct super_block *s = sdp->sd_vfs; struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; struct gfs2_glock *gl, *tmp; unsigned int hash = gl_hash(sdp, &name); - int error; + struct address_space *mapping; read_lock(gl_lock_addr(hash)); gl = search_bucket(hash, sdp, &name); @@ -765,10 +764,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, if (!create) return -ENOENT; - gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); + if (glops->go_flags & GLOF_ASPACE) + gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL); + else + gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); if (!gl) return -ENOMEM; + atomic_inc(&sdp->sd_glock_disposal); gl->gl_flags = 0; gl->gl_name = name; atomic_set(&gl->gl_ref, 1); @@ -783,18 +786,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_tchange = jiffies; gl->gl_object = NULL; gl->gl_sbd = sdp; - gl->gl_aspace = NULL; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); INIT_WORK(&gl->gl_delete, delete_work_func); - /* If this glock protects actual on-disk data or metadata blocks, - create a VFS inode to manage the pages/buffers holding them. */ - if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { - gl->gl_aspace = gfs2_aspace_get(sdp); - if (!gl->gl_aspace) { - error = -ENOMEM; - goto fail; - } + mapping = gfs2_glock2aspace(gl); + if (mapping) { + mapping->a_ops = &gfs2_meta_aops; + mapping->host = s->s_bdev->bd_inode; + mapping->flags = 0; + mapping_set_gfp_mask(mapping, GFP_NOFS); + mapping->assoc_mapping = NULL; + mapping->backing_dev_info = s->s_bdi; + mapping->writeback_index = 0; } write_lock(gl_lock_addr(hash)); @@ -811,10 +814,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, *glp = gl; return 0; - -fail: - kmem_cache_free(gfs2_glock_cachep, gl); - return error; } /** @@ -1509,35 +1508,13 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp) void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) { - unsigned long t; unsigned int x; - int cont; - t = jiffies; - - for (;;) { - cont = 0; - for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { - if (examine_bucket(clear_glock, sdp, x)) - cont = 1; - } - - if (!cont) - break; - - if (time_after_eq(jiffies, - t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { - fs_warn(sdp, "Unmount seems to be stalled. " - "Dumping lock state...\n"); - gfs2_dump_lockstate(sdp); - t = jiffies; - } - - down_write(&gfs2_umount_flush_sem); - invalidate_inodes(sdp->sd_vfs); - up_write(&gfs2_umount_flush_sem); - msleep(10); - } + for (x = 0; x < GFS2_GL_HASH_SIZE; x++) + examine_bucket(clear_glock, sdp, x); + flush_workqueue(glock_workqueue); + wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); + gfs2_dump_lockstate(sdp); } void gfs2_glock_finish_truncate(struct gfs2_inode *ip) @@ -1681,7 +1658,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) dtime *= 1000000/HZ; /* demote time in uSec */ if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) dtime = 0; - gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n", + gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", state2str(gl->gl_state), gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 13f0bd22813..2bda1911b15 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -123,7 +123,7 @@ struct lm_lockops { int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); void (*lm_unmount) (struct gfs2_sbd *sdp); void (*lm_withdraw) (struct gfs2_sbd *sdp); - void (*lm_put_lock) (struct kmem_cache *cachep, void *gl); + void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl); unsigned int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, unsigned int flags); void (*lm_cancel) (struct gfs2_glock *gl); @@ -180,6 +180,13 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) return gl->gl_state == LM_ST_SHARED; } +static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) +{ + if (gl->gl_ops->go_flags & GLOF_ASPACE) + return (struct address_space *)(gl + 1); + return NULL; +} + int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 78554acc060..38e3749d476 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -87,7 +87,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) static void rgrp_go_sync(struct gfs2_glock *gl) { - struct address_space *metamapping = gl->gl_aspace->i_mapping; + struct address_space *metamapping = gfs2_glock2aspace(gl); int error; if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) @@ -113,7 +113,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { - struct address_space *mapping = gl->gl_aspace->i_mapping; + struct address_space *mapping = gfs2_glock2aspace(gl); BUG_ON(!(flags & DIO_METADATA)); gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); @@ -134,7 +134,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) static void inode_go_sync(struct gfs2_glock *gl) { struct gfs2_inode *ip = gl->gl_object; - struct address_space *metamapping = gl->gl_aspace->i_mapping; + struct address_space *metamapping = gfs2_glock2aspace(gl); int error; if (ip && !S_ISREG(ip->i_inode.i_mode)) @@ -183,7 +183,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); if (flags & DIO_METADATA) { - struct address_space *mapping = gl->gl_aspace->i_mapping; + struct address_space *mapping = gfs2_glock2aspace(gl); truncate_inode_pages(mapping, 0); if (ip) { set_bit(GIF_INVALID, &ip->i_flags); @@ -282,7 +282,8 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) static int rgrp_go_demote_ok(const struct gfs2_glock *gl) { - return !gl->gl_aspace->i_mapping->nrpages; + const struct address_space *mapping = (const struct address_space *)(gl + 1); + return !mapping->nrpages; } /** @@ -387,8 +388,7 @@ static void iopen_go_callback(struct gfs2_glock *gl) struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; if (gl->gl_demote_state == LM_ST_UNLOCKED && - gl->gl_state == LM_ST_SHARED && - ip && test_bit(GIF_USER, &ip->i_flags)) { + gl->gl_state == LM_ST_SHARED && ip) { gfs2_glock_hold(gl); if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gfs2_glock_put_nolock(gl); @@ -407,6 +407,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = { .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, .go_min_hold_time = HZ / 5, + .go_flags = GLOF_ASPACE, }; const struct gfs2_glock_operations gfs2_rgrp_glops = { @@ -418,6 +419,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = { .go_dump = gfs2_rgrp_dump, .go_type = LM_TYPE_RGRP, .go_min_hold_time = HZ / 5, + .go_flags = GLOF_ASPACE, }; const struct gfs2_glock_operations gfs2_trans_glops = { diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 4792200978c..b8025e51cab 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -162,6 +162,8 @@ struct gfs2_glock_operations { void (*go_callback) (struct gfs2_glock *gl); const int go_type; const unsigned long go_min_hold_time; + const unsigned long go_flags; +#define GLOF_ASPACE 1 }; enum { @@ -225,7 +227,6 @@ struct gfs2_glock { struct gfs2_sbd *gl_sbd; - struct inode *gl_aspace; struct list_head gl_ail_list; atomic_t gl_ail_count; struct delayed_work gl_work; @@ -258,7 +259,6 @@ enum { GIF_INVALID = 0, GIF_QD_LOCKED = 1, GIF_SW_PAGED = 3, - GIF_USER = 4, /* user inode, not metadata addr space */ }; @@ -451,7 +451,6 @@ struct gfs2_tune { unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ unsigned int gt_new_files_jdata; unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ - unsigned int gt_stall_secs; /* Detects trouble! */ unsigned int gt_complain_secs; unsigned int gt_statfs_quantum; unsigned int gt_statfs_slow; @@ -544,6 +543,8 @@ struct gfs2_sbd { struct gfs2_holder sd_live_gh; struct gfs2_glock *sd_rename_gl; struct gfs2_glock *sd_trans_gl; + wait_queue_head_t sd_glock_wait; + atomic_t sd_glock_disposal; /* Inode Stuff */ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 6e220f4eee7..b1bf2694fb2 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -45,7 +45,7 @@ static int iget_test(struct inode *inode, void *opaque) struct gfs2_inode *ip = GFS2_I(inode); u64 *no_addr = opaque; - if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags)) + if (ip->i_no_addr == *no_addr) return 1; return 0; @@ -58,7 +58,6 @@ static int iget_set(struct inode *inode, void *opaque) inode->i_ino = (unsigned long)*no_addr; ip->i_no_addr = *no_addr; - set_bit(GIF_USER, &ip->i_flags); return 0; } @@ -84,7 +83,7 @@ static int iget_skip_test(struct inode *inode, void *opaque) struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_skip_data *data = opaque; - if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){ + if (ip->i_no_addr == data->no_addr) { if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){ data->skipped = 1; return 0; @@ -103,7 +102,6 @@ static int iget_skip_set(struct inode *inode, void *opaque) return 1; inode->i_ino = (unsigned long)(data->no_addr); ip->i_no_addr = data->no_addr; - set_bit(GIF_USER, &ip->i_flags); return 0; } diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 46df988323b..569b46240f6 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -21,6 +21,7 @@ static void gdlm_ast(void *arg) { struct gfs2_glock *gl = arg; unsigned ret = gl->gl_state; + struct gfs2_sbd *sdp = gl->gl_sbd; BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); @@ -29,7 +30,12 @@ static void gdlm_ast(void *arg) switch (gl->gl_lksb.sb_status) { case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ - kmem_cache_free(gfs2_glock_cachep, gl); + if (gl->gl_ops->go_flags & GLOF_ASPACE) + kmem_cache_free(gfs2_glock_aspace_cachep, gl); + else + kmem_cache_free(gfs2_glock_cachep, gl); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); return; case -DLM_ECANCEL: /* Cancel while getting lock */ ret |= LM_OUT_CANCELED; @@ -164,14 +170,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl, return LM_OUT_ASYNC; } -static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr) +static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) { - struct gfs2_glock *gl = ptr; - struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; + struct gfs2_sbd *sdp = gl->gl_sbd; + struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; if (gl->gl_lksb.sb_lkid == 0) { kmem_cache_free(cachep, gl); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); return; } diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index de97632ba32..adc260fbea9 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -528,9 +528,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) gfs2_pin(sdp, bd->bd_bh); tr->tr_num_databuf_new++; sdp->sd_log_num_databuf++; - list_add(&le->le_list, &sdp->sd_log_le_databuf); + list_add_tail(&le->le_list, &sdp->sd_log_le_databuf); } else { - list_add(&le->le_list, &sdp->sd_log_le_ordered); + list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); } out: gfs2_log_unlock(sdp); diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 5b31f7741a8..a88fadc704b 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -52,6 +52,22 @@ static void gfs2_init_glock_once(void *foo) atomic_set(&gl->gl_ail_count, 0); } +static void gfs2_init_gl_aspace_once(void *foo) +{ + struct gfs2_glock *gl = foo; + struct address_space *mapping = (struct address_space *)(gl + 1); + + gfs2_init_glock_once(gl); + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + spin_lock_init(&mapping->tree_lock); + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); +} + /** * init_gfs2_fs - Register GFS2 as a filesystem * @@ -78,6 +94,14 @@ static int __init init_gfs2_fs(void) if (!gfs2_glock_cachep) goto fail; + gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)", + sizeof(struct gfs2_glock) + + sizeof(struct address_space), + 0, 0, gfs2_init_gl_aspace_once); + + if (!gfs2_glock_aspace_cachep) + goto fail; + gfs2_inode_cachep = kmem_cache_create("gfs2_inode", sizeof(struct gfs2_inode), 0, SLAB_RECLAIM_ACCOUNT| @@ -144,6 +168,9 @@ fail: if (gfs2_inode_cachep) kmem_cache_destroy(gfs2_inode_cachep); + if (gfs2_glock_aspace_cachep) + kmem_cache_destroy(gfs2_glock_aspace_cachep); + if (gfs2_glock_cachep) kmem_cache_destroy(gfs2_glock_cachep); @@ -169,6 +196,7 @@ static void __exit exit_gfs2_fs(void) kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_bufdata_cachep); kmem_cache_destroy(gfs2_inode_cachep); + kmem_cache_destroy(gfs2_glock_aspace_cachep); kmem_cache_destroy(gfs2_glock_cachep); gfs2_sys_uninit(); diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 6f68a5f18eb..0bb12c80937 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -93,49 +93,13 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb return err; } -static const struct address_space_operations aspace_aops = { +const struct address_space_operations gfs2_meta_aops = { .writepage = gfs2_aspace_writepage, .releasepage = gfs2_releasepage, .sync_page = block_sync_page, }; /** - * gfs2_aspace_get - Create and initialize a struct inode structure - * @sdp: the filesystem the aspace is in - * - * Right now a struct inode is just a struct inode. Maybe Linux - * will supply a more lightweight address space construct (that works) - * in the future. - * - * Make sure pages/buffers in this aspace aren't in high memory. - * - * Returns: the aspace - */ - -struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp) -{ - struct inode *aspace; - struct gfs2_inode *ip; - - aspace = new_inode(sdp->sd_vfs); - if (aspace) { - mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS); - aspace->i_mapping->a_ops = &aspace_aops; - aspace->i_size = MAX_LFS_FILESIZE; - ip = GFS2_I(aspace); - clear_bit(GIF_USER, &ip->i_flags); - insert_inode_hash(aspace); - } - return aspace; -} - -void gfs2_aspace_put(struct inode *aspace) -{ - remove_inode_hash(aspace); - iput(aspace); -} - -/** * gfs2_meta_sync - Sync all buffers associated with a glock * @gl: The glock * @@ -143,7 +107,7 @@ void gfs2_aspace_put(struct inode *aspace) void gfs2_meta_sync(struct gfs2_glock *gl) { - struct address_space *mapping = gl->gl_aspace->i_mapping; + struct address_space *mapping = gfs2_glock2aspace(gl); int error; filemap_fdatawrite(mapping); @@ -164,7 +128,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl) struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) { - struct address_space *mapping = gl->gl_aspace->i_mapping; + struct address_space *mapping = gfs2_glock2aspace(gl); struct gfs2_sbd *sdp = gl->gl_sbd; struct page *page; struct buffer_head *bh; @@ -344,8 +308,10 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta) { - struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host); + struct address_space *mapping = bh->b_page->mapping; + struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct gfs2_bufdata *bd = bh->b_private; + if (test_clear_buffer_pinned(bh)) { list_del_init(&bd->bd_le.le_list); if (meta) { diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index de270c2f9b6..6a1d9ba1641 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -37,8 +37,16 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh, 0, from_head - to_head); } -struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp); -void gfs2_aspace_put(struct inode *aspace); +extern const struct address_space_operations gfs2_meta_aops; + +static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) +{ + struct inode *inode = mapping->host; + if (mapping->a_ops == &gfs2_meta_aops) + return (((struct gfs2_glock *)mapping) - 1)->gl_sbd; + else + return inode->i_sb->s_fs_info; +} void gfs2_meta_sync(struct gfs2_glock *gl); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index edfee24f363..a054b526dc0 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -65,7 +65,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt) gt->gt_quota_scale_den = 1; gt->gt_new_files_jdata = 0; gt->gt_max_readahead = 1 << 18; - gt->gt_stall_secs = 600; gt->gt_complain_secs = 10; } @@ -82,6 +81,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) gfs2_tune_init(&sdp->sd_tune); + init_waitqueue_head(&sdp->sd_glock_wait); + atomic_set(&sdp->sd_glock_disposal, 0); spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); @@ -723,7 +724,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) goto fail; } - error = -EINVAL; + error = -EUSERS; if (!gfs2_jindex_size(sdp)) { fs_err(sdp, "no journals!\n"); goto fail_jindex; @@ -983,9 +984,17 @@ static const match_table_t nolock_tokens = { { Opt_err, NULL }, }; +static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_sbd; + kmem_cache_free(cachep, gl); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); +} + static const struct lm_lockops nolock_ops = { .lm_proto_name = "lock_nolock", - .lm_put_lock = kmem_cache_free, + .lm_put_lock = nolock_put_lock, .lm_tokens = &nolock_tokens, }; @@ -1231,10 +1240,9 @@ fail_sb: fail_locking: init_locking(sdp, &mount_gh, UNDO); fail_lm: + invalidate_inodes(sb); gfs2_gl_hash_clear(sdp); gfs2_lm_unmount(sdp); - while (invalidate_inodes(sb)) - yield(); fail_sys: gfs2_sys_fs_del(sdp); fail: diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 78f73ca1ef3..4e64352d49d 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -976,121 +976,62 @@ out: } /** - * gfs2_readlinki - return the contents of a symlink - * @ip: the symlink's inode - * @buf: a pointer to the buffer to be filled - * @len: a pointer to the length of @buf + * gfs2_follow_link - Follow a symbolic link + * @dentry: The dentry of the link + * @nd: Data that we pass to vfs_follow_link() * - * If @buf is too small, a piece of memory is kmalloc()ed and needs - * to be freed by the caller. + * This can handle symlinks of any size. * - * Returns: errno + * Returns: 0 on success or error code */ -static int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) +static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) { + struct gfs2_inode *ip = GFS2_I(dentry->d_inode); struct gfs2_holder i_gh; struct buffer_head *dibh; unsigned int x; + char *buf; int error; gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); error = gfs2_glock_nq(&i_gh); if (error) { gfs2_holder_uninit(&i_gh); - return error; + nd_set_link(nd, ERR_PTR(error)); + return NULL; } if (!ip->i_disksize) { gfs2_consist_inode(ip); - error = -EIO; + buf = ERR_PTR(-EIO); goto out; } error = gfs2_meta_inode_buffer(ip, &dibh); - if (error) + if (error) { + buf = ERR_PTR(error); goto out; - - x = ip->i_disksize + 1; - if (x > *len) { - *buf = kmalloc(x, GFP_NOFS); - if (!*buf) { - error = -ENOMEM; - goto out_brelse; - } } - memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x); - *len = x; - -out_brelse: + x = ip->i_disksize + 1; + buf = kmalloc(x, GFP_NOFS); + if (!buf) + buf = ERR_PTR(-ENOMEM); + else + memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), x); brelse(dibh); out: gfs2_glock_dq_uninit(&i_gh); - return error; -} - -/** - * gfs2_readlink - Read the value of a symlink - * @dentry: the symlink - * @buf: the buffer to read the symlink data into - * @size: the size of the buffer - * - * Returns: errno - */ - -static int gfs2_readlink(struct dentry *dentry, char __user *user_buf, - int user_size) -{ - struct gfs2_inode *ip = GFS2_I(dentry->d_inode); - char array[GFS2_FAST_NAME_SIZE], *buf = array; - unsigned int len = GFS2_FAST_NAME_SIZE; - int error; - - error = gfs2_readlinki(ip, &buf, &len); - if (error) - return error; - - if (user_size > len - 1) - user_size = len - 1; - - if (copy_to_user(user_buf, buf, user_size)) - error = -EFAULT; - else - error = user_size; - - if (buf != array) - kfree(buf); - - return error; + nd_set_link(nd, buf); + return NULL; } -/** - * gfs2_follow_link - Follow a symbolic link - * @dentry: The dentry of the link - * @nd: Data that we pass to vfs_follow_link() - * - * This can handle symlinks of any size. It is optimised for symlinks - * under GFS2_FAST_NAME_SIZE. - * - * Returns: 0 on success or error code - */ - -static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd) +static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p) { - struct gfs2_inode *ip = GFS2_I(dentry->d_inode); - char array[GFS2_FAST_NAME_SIZE], *buf = array; - unsigned int len = GFS2_FAST_NAME_SIZE; - int error; - - error = gfs2_readlinki(ip, &buf, &len); - if (!error) { - error = vfs_follow_link(nd, buf); - if (buf != array) - kfree(buf); - } - - return ERR_PTR(error); + char *s = nd_get_link(nd); + if (!IS_ERR(s)) + kfree(s); } /** @@ -1425,8 +1366,9 @@ const struct inode_operations gfs2_dir_iops = { }; const struct inode_operations gfs2_symlink_iops = { - .readlink = gfs2_readlink, + .readlink = generic_readlink, .follow_link = gfs2_follow_link, + .put_link = gfs2_put_link, .permission = gfs2_permission, .setattr = gfs2_setattr, .getattr = gfs2_getattr, diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 0608f490c29..503b842f3ba 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -591,11 +591,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip) u64 rgrp_count = ip->i_disksize; int error; - if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) { - gfs2_consist_inode(ip); - return -EIO; - } - + do_div(rgrp_count, sizeof(struct gfs2_rindex)); clear_rgrpdi(sdp); file_ra_state_init(&ra_state, inode->i_mapping); @@ -915,7 +911,7 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) { BUG_ON(ip->i_alloc != NULL); - ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_KERNEL); + ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS); return ip->i_alloc; } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index c282ad41f3d..e5e22629da6 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -21,6 +21,7 @@ #include <linux/gfs2_ondisk.h> #include <linux/crc32.h> #include <linux/time.h> +#include <linux/wait.h> #include "gfs2.h" #include "incore.h" @@ -721,8 +722,7 @@ static int gfs2_write_inode(struct inode *inode, int sync) int ret = 0; /* Check this is a "normal" inode, etc */ - if (!test_bit(GIF_USER, &ip->i_flags) || - (current->flags & PF_MEMALLOC)) + if (current->flags & PF_MEMALLOC) return 0; ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (ret) @@ -859,6 +859,7 @@ restart: gfs2_clear_rgrpd(sdp); gfs2_jindex_free(sdp); /* Take apart glock structures and buffer lists */ + invalidate_inodes(sdp->sd_vfs); gfs2_gl_hash_clear(sdp); /* Unmount the locking protocol */ gfs2_lm_unmount(sdp); @@ -1193,7 +1194,7 @@ static void gfs2_drop_inode(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); - if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) { + if (inode->i_nlink) { struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags)) clear_nlink(inode); @@ -1211,18 +1212,12 @@ static void gfs2_clear_inode(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); - /* This tells us its a "real" inode and not one which only - * serves to contain an address space (see rgrp.c, meta_io.c) - * which therefore doesn't have its own glocks. - */ - if (test_bit(GIF_USER, &ip->i_flags)) { - ip->i_gl->gl_object = NULL; - gfs2_glock_put(ip->i_gl); - ip->i_gl = NULL; - if (ip->i_iopen_gh.gh_gl) { - ip->i_iopen_gh.gh_gl->gl_object = NULL; - gfs2_glock_dq_uninit(&ip->i_iopen_gh); - } + ip->i_gl->gl_object = NULL; + gfs2_glock_put(ip->i_gl); + ip->i_gl = NULL; + if (ip->i_iopen_gh.gh_gl) { + ip->i_iopen_gh.gh_gl->gl_object = NULL; + gfs2_glock_dq_uninit(&ip->i_iopen_gh); } } @@ -1357,9 +1352,6 @@ static void gfs2_delete_inode(struct inode *inode) struct gfs2_holder gh; int error; - if (!test_bit(GIF_USER, &ip->i_flags)) - goto out; - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (unlikely(error)) { gfs2_glock_dq_uninit(&ip->i_iopen_gh); diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 0dc34621f6a..a0db1c94317 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -478,7 +478,6 @@ TUNE_ATTR(complain_secs, 0); TUNE_ATTR(statfs_slow, 0); TUNE_ATTR(new_files_jdata, 0); TUNE_ATTR(quota_simul_sync, 1); -TUNE_ATTR(stall_secs, 1); TUNE_ATTR(statfs_quantum, 1); TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); @@ -491,7 +490,6 @@ static struct attribute *tune_attrs[] = { &tune_attr_complain_secs.attr, &tune_attr_statfs_slow.attr, &tune_attr_quota_simul_sync.attr, - &tune_attr_stall_secs.attr, &tune_attr_statfs_quantum.attr, &tune_attr_quota_scale.attr, &tune_attr_new_files_jdata.attr, diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index f6a7efa34eb..226f2bfbf16 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -21,6 +21,7 @@ #include "util.h" struct kmem_cache *gfs2_glock_cachep __read_mostly; +struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly; struct kmem_cache *gfs2_inode_cachep __read_mostly; struct kmem_cache *gfs2_bufdata_cachep __read_mostly; struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index 33e96b0ce9a..b432e04600d 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h @@ -145,6 +145,7 @@ gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__); extern struct kmem_cache *gfs2_glock_cachep; +extern struct kmem_cache *gfs2_glock_aspace_cachep; extern struct kmem_cache *gfs2_inode_cachep; extern struct kmem_cache *gfs2_bufdata_cachep; extern struct kmem_cache *gfs2_rgrpd_cachep; diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c index 1aa88c4e096..6a2f04bf3df 100644 --- a/fs/hpfs/anode.c +++ b/fs/hpfs/anode.c @@ -353,7 +353,7 @@ int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos, } int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, - unsigned len, char *buf) + unsigned len, const char *buf) { struct buffer_head *bh; char *data; diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c index 940d6d150be..67d9d36b3d5 100644 --- a/fs/hpfs/dentry.c +++ b/fs/hpfs/dentry.c @@ -20,8 +20,8 @@ static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr) if (l == 1) if (qstr->name[0]=='.') goto x; if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; - hpfs_adjust_length((char *)qstr->name, &l); - /*if (hpfs_chk_name((char *)qstr->name,&l))*/ + hpfs_adjust_length(qstr->name, &l); + /*if (hpfs_chk_name(qstr->name,&l))*/ /*return -ENAMETOOLONG;*/ /*return -ENOENT;*/ x: @@ -38,14 +38,16 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst { unsigned al=a->len; unsigned bl=b->len; - hpfs_adjust_length((char *)a->name, &al); - /*hpfs_adjust_length((char *)b->name, &bl);*/ + hpfs_adjust_length(a->name, &al); + /*hpfs_adjust_length(b->name, &bl);*/ /* 'a' is the qstr of an already existing dentry, so the name * must be valid. 'b' must be validated first. */ - if (hpfs_chk_name((char *)b->name, &bl)) return 1; - if (hpfs_compare_names(dentry->d_sb, (char *)a->name, al, (char *)b->name, bl, 0)) return 1; + if (hpfs_chk_name(b->name, &bl)) + return 1; + if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0)) + return 1; return 0; } diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 8865c94f55f..26e3964a4b8 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -59,7 +59,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) struct hpfs_dirent *de; int lc; long old_pos; - char *tempname; + unsigned char *tempname; int c1, c2 = 0; int ret = 0; @@ -158,11 +158,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { filp->f_pos = old_pos; - if (tempname != (char *)de->name) kfree(tempname); + if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); goto out; } - if (tempname != (char *)de->name) kfree(tempname); + if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); } out: @@ -187,7 +187,7 @@ out: struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; @@ -197,7 +197,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name struct hpfs_inode_info *hpfs_result; lock_kernel(); - if ((err = hpfs_chk_name((char *)name, &len))) { + if ((err = hpfs_chk_name(name, &len))) { if (err == -ENAMETOOLONG) { unlock_kernel(); return ERR_PTR(-ENAMETOOLONG); @@ -209,7 +209,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name * '.' and '..' will never be passed here. */ - de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *) name, len, NULL, &qbh); + de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh); /* * This is not really a bailout, just means file not found. @@ -250,7 +250,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name hpfs_result = hpfs_i(result); if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; - hpfs_decide_conv(result, (char *)name, len); + hpfs_decide_conv(result, name, len); if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c index fe83c2b7d2d..9b2ffadfc8c 100644 --- a/fs/hpfs/dnode.c +++ b/fs/hpfs/dnode.c @@ -158,7 +158,8 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno /* Add an entry to dnode and don't care if it grows over 2048 bytes */ -struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, unsigned char *name, +struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, + const unsigned char *name, unsigned namelen, secno down_ptr) { struct hpfs_dirent *de; @@ -223,7 +224,7 @@ static void fix_up_ptrs(struct super_block *s, struct dnode *d) /* Add an entry to dnode and do dnode splitting if required */ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, - unsigned char *name, unsigned namelen, + const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, dnode_secno down_ptr) { struct quad_buffer_head qbh, qbh1, qbh2; @@ -231,7 +232,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, dnode_secno adno, rdno; struct hpfs_dirent *de; struct hpfs_dirent nde; - char *nname; + unsigned char *nname; int h; int pos; struct buffer_head *bh; @@ -305,7 +306,9 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, pos++; } copy_de(new_de = &nde, de); - memcpy(name = nname, de->name, namelen = de->namelen); + memcpy(nname, de->name, de->namelen); + name = nname; + namelen = de->namelen; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); @@ -368,7 +371,8 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, * I hope, now it's finally bug-free. */ -int hpfs_add_dirent(struct inode *i, unsigned char *name, unsigned namelen, +int hpfs_add_dirent(struct inode *i, + const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, int cdepth) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); @@ -897,7 +901,8 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, /* Find a dirent in tree */ -struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, char *name, unsigned len, +struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, + const unsigned char *name, unsigned len, dnode_secno *dd, struct quad_buffer_head *qbh) { struct dnode *dnode; @@ -988,8 +993,8 @@ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno) struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, struct fnode *f, struct quad_buffer_head *qbh) { - char *name1; - char *name2; + unsigned char *name1; + unsigned char *name2; int name1len, name2len; struct dnode *d; dnode_secno dno, downd; diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c index 547a8384571..45e53d972b4 100644 --- a/fs/hpfs/ea.c +++ b/fs/hpfs/ea.c @@ -62,8 +62,8 @@ static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size) return ret; } -static void set_indirect_ea(struct super_block *s, int ano, secno a, char *data, - int size) +static void set_indirect_ea(struct super_block *s, int ano, secno a, + const char *data, int size) { hpfs_ea_write(s, a, ano, 0, size, data); } @@ -186,7 +186,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si * This driver can't change sizes of eas ('cause I just don't need it). */ -void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data, int size) +void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key, + const char *data, int size) { fnode_secno fno = inode->i_ino; struct super_block *s = inode->i_sb; diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 701ca54c086..97bf738cd5d 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -215,7 +215,7 @@ secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_heade secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned); void hpfs_remove_btree(struct super_block *, struct bplus_header *); int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *); -int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, char *); +int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, const char *); void hpfs_ea_remove(struct super_block *, secno, int, unsigned); void hpfs_truncate_btree(struct super_block *, secno, int, unsigned); void hpfs_remove_fnode(struct super_block *, fnode_secno fno); @@ -244,13 +244,17 @@ extern const struct file_operations hpfs_dir_ops; void hpfs_add_pos(struct inode *, loff_t *); void hpfs_del_pos(struct inode *, loff_t *); -struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, unsigned char *, unsigned, secno); -int hpfs_add_dirent(struct inode *, unsigned char *, unsigned, struct hpfs_dirent *, int); +struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, + const unsigned char *, unsigned, secno); +int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned, + struct hpfs_dirent *, int); int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *); -struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, char *, unsigned, dnode_secno *, struct quad_buffer_head *); +struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, + const unsigned char *, unsigned, dnode_secno *, + struct quad_buffer_head *); void hpfs_remove_dtree(struct super_block *, dnode_secno); struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *); @@ -259,7 +263,8 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct f void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned); int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int); char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *); -void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int); +void hpfs_set_ea(struct inode *, struct fnode *, const char *, + const char *, int); /* file.c */ @@ -282,7 +287,7 @@ void hpfs_delete_inode(struct inode *); unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); -char *hpfs_load_code_page(struct super_block *, secno); +unsigned char *hpfs_load_code_page(struct super_block *, secno); secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp); struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **); @@ -292,12 +297,13 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino); /* name.c */ unsigned char hpfs_upcase(unsigned char *, unsigned char); -int hpfs_chk_name(unsigned char *, unsigned *); -char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int); -int hpfs_compare_names(struct super_block *, unsigned char *, unsigned, unsigned char *, unsigned, int); -int hpfs_is_name_long(unsigned char *, unsigned); -void hpfs_adjust_length(unsigned char *, unsigned *); -void hpfs_decide_conv(struct inode *, unsigned char *, unsigned); +int hpfs_chk_name(const unsigned char *, unsigned *); +unsigned char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int); +int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned, + const unsigned char *, unsigned, int); +int hpfs_is_name_long(const unsigned char *, unsigned); +void hpfs_adjust_length(const unsigned char *, unsigned *); +void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned); /* namei.c */ diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index fe703ae46bc..ff90affb94e 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -46,7 +46,7 @@ void hpfs_read_inode(struct inode *i) struct fnode *fnode; struct super_block *sb = i->i_sb; struct hpfs_inode_info *hpfs_inode = hpfs_i(i); - unsigned char *ea; + void *ea; int ea_size; if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) { @@ -112,7 +112,7 @@ void hpfs_read_inode(struct inode *i) } } if (fnode->dirflag) { - unsigned n_dnodes, n_subdirs; + int n_dnodes, n_subdirs; i->i_mode |= S_IFDIR; i->i_op = &hpfs_dir_iops; i->i_fop = &hpfs_dir_ops; diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c index c4724589b2e..840d033ecee 100644 --- a/fs/hpfs/map.c +++ b/fs/hpfs/map.c @@ -35,7 +35,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, * lowercasing table */ -char *hpfs_load_code_page(struct super_block *s, secno cps) +unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) { struct buffer_head *bh; secno cpds; @@ -71,7 +71,7 @@ char *hpfs_load_code_page(struct super_block *s, secno cps) brelse(bh); return NULL; } - ptr = (char *)cpd + cpd->offs[cpi] + 6; + ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6; if (!(cp_table = kmalloc(256, GFP_KERNEL))) { printk("HPFS: out of memory for code page table\n"); brelse(bh); @@ -217,7 +217,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { unsigned p, pp = 0; - unsigned char *d = (char *)dnode; + unsigned char *d = (unsigned char *)dnode; int b = 0; if (dnode->magic != DNODE_MAGIC) { hpfs_error(s, "bad magic on dnode %08x", secno); diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c index 1f4a964384e..f24736d7a43 100644 --- a/fs/hpfs/name.c +++ b/fs/hpfs/name.c @@ -8,16 +8,16 @@ #include "hpfs_fn.h" -static char *text_postfix[]={ +static const char *text_postfix[]={ ".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF", ".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS", ".RC", ".TEX", ".TXT", ".Y", ""}; -static char *text_prefix[]={ +static const char *text_prefix[]={ "AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ", "MAKEFILE", "READ.ME", "README", "TERMCAP", ""}; -void hpfs_decide_conv(struct inode *inode, unsigned char *name, unsigned len) +void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i; @@ -71,7 +71,7 @@ static inline unsigned char locase(unsigned char *dir, unsigned char a) return dir[a]; } -int hpfs_chk_name(unsigned char *name, unsigned *len) +int hpfs_chk_name(const unsigned char *name, unsigned *len) { int i; if (*len > 254) return -ENAMETOOLONG; @@ -83,10 +83,10 @@ int hpfs_chk_name(unsigned char *name, unsigned *len) return 0; } -char *hpfs_translate_name(struct super_block *s, unsigned char *from, +unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from, unsigned len, int lc, int lng) { - char *to; + unsigned char *to; int i; if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) { printk("HPFS: Long name flag mismatch - name "); @@ -103,8 +103,9 @@ char *hpfs_translate_name(struct super_block *s, unsigned char *from, return to; } -int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1, - unsigned char *n2, unsigned l2, int last) +int hpfs_compare_names(struct super_block *s, + const unsigned char *n1, unsigned l1, + const unsigned char *n2, unsigned l2, int last) { unsigned l = l1 < l2 ? l1 : l2; unsigned i; @@ -120,7 +121,7 @@ int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1, return 0; } -int hpfs_is_name_long(unsigned char *name, unsigned len) +int hpfs_is_name_long(const unsigned char *name, unsigned len) { int i,j; for (i = 0; i < len && name[i] != '.'; i++) @@ -134,7 +135,7 @@ int hpfs_is_name_long(unsigned char *name, unsigned len) /* OS/2 clears dots and spaces at the end of file name, so we have to */ -void hpfs_adjust_length(unsigned char *name, unsigned *len) +void hpfs_adjust_length(const unsigned char *name, unsigned *len) { if (!*len) return; if (*len == 1 && name[0] == '.') return; diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 82b9c4ba9ed..11c2b4080f6 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -11,7 +11,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh0; struct buffer_head *bh; @@ -24,7 +24,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) int r; struct hpfs_dirent dee; int err; - if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; + if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; lock_kernel(); err = -ENOSPC; fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); @@ -62,7 +62,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result->i_mode &= ~0222; mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail3; if (r == -1) { @@ -121,7 +121,7 @@ bail: static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct inode *result = NULL; struct buffer_head *bh; @@ -130,7 +130,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc int r; struct hpfs_dirent dee; int err; - if ((err = hpfs_chk_name((char *)name, &len))) + if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; lock_kernel(); err = -ENOSPC; @@ -155,7 +155,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_op = &hpfs_file_iops; result->i_fop = &hpfs_file_ops; result->i_nlink = 1; - hpfs_decide_conv(result, (char *)name, len); + hpfs_decide_conv(result, name, len); hpfs_i(result)->i_parent_dir = dir->i_ino; result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); result->i_ctime.tv_nsec = 0; @@ -170,7 +170,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_i(result)->mmu_private = 0; mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail2; if (r == -1) { @@ -211,7 +211,7 @@ bail: static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct buffer_head *bh; struct fnode *fnode; @@ -220,7 +220,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t struct hpfs_dirent dee; struct inode *result = NULL; int err; - if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; + if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM; if (!new_valid_dev(rdev)) return -EINVAL; @@ -256,7 +256,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t init_special_inode(result, mode, rdev); mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail2; if (r == -1) { @@ -289,7 +289,7 @@ bail: static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct buffer_head *bh; struct fnode *fnode; @@ -298,7 +298,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy struct hpfs_dirent dee; struct inode *result; int err; - if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; + if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err; lock_kernel(); if (hpfs_sb(dir->i_sb)->sb_eas < 2) { unlock_kernel(); @@ -335,7 +335,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_data.a_ops = &hpfs_symlink_aops; mutex_lock(&hpfs_i(dir)->i_mutex); - r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); + r = hpfs_add_dirent(dir, name, len, &dee, 0); if (r == 1) goto bail2; if (r == -1) { @@ -345,7 +345,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy fnode->len = len; memcpy(fnode->name, name, len > 15 ? 15 : len); fnode->up = dir->i_ino; - hpfs_set_ea(result, fnode, "SYMLINK", (char *)symlink, strlen(symlink)); + hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink)); mark_buffer_dirty(bh); brelse(bh); @@ -369,7 +369,7 @@ bail: static int hpfs_unlink(struct inode *dir, struct dentry *dentry) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; @@ -381,12 +381,12 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) int err; lock_kernel(); - hpfs_adjust_length((char *)name, &len); + hpfs_adjust_length(name, &len); again: mutex_lock(&hpfs_i(inode)->i_parent_mutex); mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; - de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); + de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) goto out; @@ -413,22 +413,25 @@ again: mutex_unlock(&hpfs_i(dir)->i_mutex); mutex_unlock(&hpfs_i(inode)->i_parent_mutex); - d_drop(dentry); - spin_lock(&dentry->d_lock); - if (atomic_read(&dentry->d_count) > 1 || - generic_permission(inode, MAY_WRITE, NULL) || + dentry_unhash(dentry); + if (!d_unhashed(dentry)) { + dput(dentry); + unlock_kernel(); + return -ENOSPC; + } + if (generic_permission(inode, MAY_WRITE, NULL) || !S_ISREG(inode->i_mode) || get_write_access(inode)) { - spin_unlock(&dentry->d_lock); d_rehash(dentry); + dput(dentry); } else { struct iattr newattrs; - spin_unlock(&dentry->d_lock); /*printk("HPFS: truncating file before delete.\n");*/ newattrs.ia_size = 0; newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; err = notify_change(dentry, &newattrs); put_write_access(inode); + dput(dentry); if (!err) goto again; } @@ -451,7 +454,7 @@ out: static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) { - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; @@ -462,12 +465,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) int err; int r; - hpfs_adjust_length((char *)name, &len); + hpfs_adjust_length(name, &len); lock_kernel(); mutex_lock(&hpfs_i(inode)->i_parent_mutex); mutex_lock(&hpfs_i(dir)->i_mutex); err = -ENOENT; - de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); + de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); if (!de) goto out; @@ -546,10 +549,10 @@ const struct address_space_operations hpfs_symlink_aops = { static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - char *old_name = (char *)old_dentry->d_name.name; - int old_len = old_dentry->d_name.len; - char *new_name = (char *)new_dentry->d_name.name; - int new_len = new_dentry->d_name.len; + const unsigned char *old_name = old_dentry->d_name.name; + unsigned old_len = old_dentry->d_name.len; + const unsigned char *new_name = new_dentry->d_name.name; + unsigned new_len = new_dentry->d_name.len; struct inode *i = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct quad_buffer_head qbh, qbh1; @@ -560,9 +563,9 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct buffer_head *bh; struct fnode *fnode; int err; - if ((err = hpfs_chk_name((char *)new_name, &new_len))) return err; + if ((err = hpfs_chk_name(new_name, &new_len))) return err; err = 0; - hpfs_adjust_length((char *)old_name, &old_len); + hpfs_adjust_length(old_name, &old_len); lock_kernel(); /* order doesn't matter, due to VFS exclusion */ @@ -579,7 +582,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto end1; } - if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) { + if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { hpfs_error(i->i_sb, "lookup succeeded but map dirent failed"); err = -ENOENT; goto end1; @@ -590,7 +593,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode) { int r; if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) { - if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, (char *)new_name, new_len, NULL, &qbh1))) { + if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) { clear_nlink(new_inode); copy_de(nde, &de); memcpy(nde->name, new_name, new_len); @@ -618,7 +621,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, } if (new_dir == old_dir) - if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) { + if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) { hpfs_unlock_creation(i->i_sb); hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); err = -ENOENT; @@ -648,7 +651,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, brelse(bh); } hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv; - hpfs_decide_conv(i, (char *)new_name, new_len); + hpfs_decide_conv(i, new_name, new_len); end1: if (old_dir != new_dir) mutex_unlock(&hpfs_i(new_dir)->i_mutex); diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index a5089a6dd67..2e4dfa8593d 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -646,22 +646,27 @@ static const struct super_operations hppfs_sbops = { static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { - struct dentry *proc_dentry; - - proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; + struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, buflen); } static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct dentry *proc_dentry; - - proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; + struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd); } +static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) +{ + struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; + + if (proc_dentry->d_inode->i_op->put_link) + proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie); +} + static const struct inode_operations hppfs_dir_iops = { .lookup = hppfs_lookup, }; @@ -669,6 +674,7 @@ static const struct inode_operations hppfs_dir_iops = { static const struct inode_operations hppfs_link_iops = { .readlink = hppfs_readlink, .follow_link = hppfs_follow_link, + .put_link = hppfs_put_link, }; static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) @@ -712,7 +718,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent) struct vfsmount *proc_mnt; int err = -ENOENT; - proc_mnt = do_kern_mount("proc", 0, "proc", NULL); + proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt); if (IS_ERR(proc_mnt)) goto out; diff --git a/fs/internal.h b/fs/internal.h index e96a1667d74..8a03a5447bd 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -70,6 +70,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); extern void __init mnt_init(void); +extern spinlock_t vfsmount_lock; + /* * fs_struct.c */ diff --git a/fs/libfs.c b/fs/libfs.c index 6e8d17e1dc4..9e50bcf5585 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -338,28 +338,14 @@ int simple_readpage(struct file *file, struct page *page) return 0; } -int simple_prepare_write(struct file *file, struct page *page, - unsigned from, unsigned to) -{ - if (!PageUptodate(page)) { - if (to - from != PAGE_CACHE_SIZE) - zero_user_segments(page, - 0, from, - to, PAGE_CACHE_SIZE); - } - return 0; -} - int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *page; pgoff_t index; - unsigned from; index = pos >> PAGE_CACHE_SHIFT; - from = pos & (PAGE_CACHE_SIZE - 1); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -367,43 +353,59 @@ int simple_write_begin(struct file *file, struct address_space *mapping, *pagep = page; - return simple_prepare_write(file, page, from, from+len); -} - -static int simple_commit_write(struct file *file, struct page *page, - unsigned from, unsigned to) -{ - struct inode *inode = page->mapping->host; - loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; + if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { + unsigned from = pos & (PAGE_CACHE_SIZE - 1); - if (!PageUptodate(page)) - SetPageUptodate(page); - /* - * No need to use i_size_read() here, the i_size - * cannot change under us because we hold the i_mutex. - */ - if (pos > inode->i_size) - i_size_write(inode, pos); - set_page_dirty(page); + zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); + } return 0; } +/** + * simple_write_end - .write_end helper for non-block-device FSes + * @available: See .write_end of address_space_operations + * @file: " + * @mapping: " + * @pos: " + * @len: " + * @copied: " + * @page: " + * @fsdata: " + * + * simple_write_end does the minimum needed for updating a page after writing is + * done. It has the same API signature as the .write_end of + * address_space_operations vector. So it can just be set onto .write_end for + * FSes that don't need any other processing. i_mutex is assumed to be held. + * Block based filesystems should use generic_write_end(). + * NOTE: Even though i_size might get updated by this function, mark_inode_dirty + * is not called, so a filesystem that actually does store data in .write_inode + * should extend on what's done here with a call to mark_inode_dirty() in the + * case that i_size has changed. + */ int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + struct inode *inode = page->mapping->host; + loff_t last_pos = pos + copied; /* zero the stale part of the page if we did a short copy */ if (copied < len) { - void *kaddr = kmap_atomic(page, KM_USER0); - memset(kaddr + from + copied, 0, len - copied); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + + zero_user(page, from + copied, len - copied); } - simple_commit_write(file, page, from, from+copied); + if (!PageUptodate(page)) + SetPageUptodate(page); + /* + * No need to use i_size_read() here, the i_size + * cannot change under us because we hold the i_mutex. + */ + if (last_pos > inode->i_size) + i_size_write(inode, last_pos); + set_page_dirty(page); unlock_page(page); page_cache_release(page); @@ -853,7 +855,6 @@ EXPORT_SYMBOL(simple_getattr); EXPORT_SYMBOL(simple_link); EXPORT_SYMBOL(simple_lookup); EXPORT_SYMBOL(simple_pin_fs); -EXPORT_UNUSED_SYMBOL(simple_prepare_write); EXPORT_SYMBOL(simple_readpage); EXPORT_SYMBOL(simple_release_fs); EXPORT_SYMBOL(simple_rename); diff --git a/fs/locks.c b/fs/locks.c index a8794f233bc..ae9ded026b7 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1182,8 +1182,9 @@ int __break_lease(struct inode *inode, unsigned int mode) struct file_lock *fl; unsigned long break_time; int i_have_this_lease = 0; + int want_write = (mode & O_ACCMODE) != O_RDONLY; - new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK); + new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); lock_kernel(); @@ -1197,7 +1198,7 @@ int __break_lease(struct inode *inode, unsigned int mode) if (fl->fl_owner == current->files) i_have_this_lease = 1; - if (mode & FMODE_WRITE) { + if (want_write) { /* If we want write access, we have to revoke any lease. */ future = F_UNLCK | F_INPROGRESS; } else if (flock->fl_type & F_INPROGRESS) { diff --git a/fs/namei.c b/fs/namei.c index b55440baf7a..0741c69b331 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -561,6 +561,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata dget(dentry); } mntget(path->mnt); + nd->last_type = LAST_BIND; cookie = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(cookie); if (!IS_ERR(cookie)) { @@ -688,33 +689,20 @@ static __always_inline void follow_dotdot(struct nameidata *nd) set_root(nd); while(1) { - struct vfsmount *parent; struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } - spin_lock(&dcache_lock); if (nd->path.dentry != nd->path.mnt->mnt_root) { - nd->path.dentry = dget(nd->path.dentry->d_parent); - spin_unlock(&dcache_lock); + /* rare case of legitimate dget_parent()... */ + nd->path.dentry = dget_parent(nd->path.dentry); dput(old); break; } - spin_unlock(&dcache_lock); - spin_lock(&vfsmount_lock); - parent = nd->path.mnt->mnt_parent; - if (parent == nd->path.mnt) { - spin_unlock(&vfsmount_lock); + if (!follow_up(&nd->path)) break; - } - mntget(parent); - nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint); - spin_unlock(&vfsmount_lock); - dput(old); - mntput(nd->path.mnt); - nd->path.mnt = parent; } follow_mount(&nd->path); } @@ -822,6 +810,17 @@ fail: } /* + * This is a temporary kludge to deal with "automount" symlinks; proper + * solution is to trigger them on follow_mount(), so that do_lookup() + * would DTRT. To be killed before 2.6.34-final. + */ +static inline int follow_on_final(struct inode *inode, unsigned lookup_flags) +{ + return inode && unlikely(inode->i_op->follow_link) && + ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode)); +} + +/* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. @@ -941,8 +940,7 @@ last_component: if (err) break; inode = next.dentry->d_inode; - if ((lookup_flags & LOOKUP_FOLLOW) - && inode && inode->i_op->follow_link) { + if (follow_on_final(inode, lookup_flags)) { err = do_follow_link(&next, nd); if (err) goto return_err; @@ -1336,7 +1334,7 @@ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); - audit_inode_child(victim->d_name.name, victim, dir); + audit_inode_child(victim, dir); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) @@ -1492,7 +1490,7 @@ int may_open(struct path *path, int acc_mode, int flag) * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { - if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) + if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; @@ -1536,7 +1534,7 @@ static int handle_truncate(struct path *path) * what get passed to sys_open(). */ static int __open_namei_create(struct nameidata *nd, struct path *path, - int flag, int mode) + int open_flag, int mode) { int error; struct dentry *dir = nd->path.dentry; @@ -1554,7 +1552,7 @@ out_unlock: if (error) return error; /* Don't check for write permission, don't truncate */ - return may_open(&nd->path, 0, flag & ~O_TRUNC); + return may_open(&nd->path, 0, open_flag & ~O_TRUNC); } /* @@ -1603,11 +1601,12 @@ struct file *do_filp_open(int dfd, const char *pathname, struct file *filp; struct nameidata nd; int error; - struct path path, save; + struct path path; struct dentry *dir; int count = 0; int will_truncate; int flag = open_to_namei_flags(open_flag); + int force_reval = 0; /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only @@ -1619,7 +1618,7 @@ struct file *do_filp_open(int dfd, const char *pathname, open_flag |= O_DSYNC; if (!acc_mode) - acc_mode = MAY_OPEN | ACC_MODE(flag); + acc_mode = MAY_OPEN | ACC_MODE(open_flag); /* O_TRUNC implies we need access checks for write permissions */ if (flag & O_TRUNC) @@ -1659,9 +1658,12 @@ struct file *do_filp_open(int dfd, const char *pathname, /* * Create - we need to know the parent. */ +reval: error = path_init(dfd, pathname, LOOKUP_PARENT, &nd); if (error) return ERR_PTR(error); + if (force_reval) + nd.flags |= LOOKUP_REVAL; error = path_walk(pathname, &nd); if (error) { if (nd.root.mnt) @@ -1721,7 +1723,7 @@ do_last: error = mnt_want_write(nd.path.mnt); if (error) goto exit_mutex_unlock; - error = __open_namei_create(&nd, &path, flag, mode); + error = __open_namei_create(&nd, &path, open_flag, mode); if (error) { mnt_drop_write(nd.path.mnt); goto exit; @@ -1731,8 +1733,7 @@ do_last: if (nd.root.mnt) path_put(&nd.root); if (!IS_ERR(filp)) { - error = ima_path_check(&filp->f_path, filp->f_mode & - (MAY_READ | MAY_WRITE | MAY_EXEC)); + error = ima_file_check(filp, acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); @@ -1784,7 +1785,7 @@ ok: if (error) goto exit; } - error = may_open(&nd.path, acc_mode, flag); + error = may_open(&nd.path, acc_mode, open_flag); if (error) { if (will_truncate) mnt_drop_write(nd.path.mnt); @@ -1792,8 +1793,7 @@ ok: } filp = nameidata_to_filp(&nd); if (!IS_ERR(filp)) { - error = ima_path_check(&filp->f_path, filp->f_mode & - (MAY_READ | MAY_WRITE | MAY_EXEC)); + error = ima_file_check(filp, acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); @@ -1853,17 +1853,7 @@ do_link: error = security_inode_follow_link(path.dentry, &nd); if (error) goto exit_dput; - save = nd.path; - path_get(&save); error = __do_follow_link(&path, &nd); - if (error == -ESTALE) { - /* nd.path had been dropped */ - nd.path = save; - path_get(&nd.path); - nd.flags |= LOOKUP_REVAL; - error = __do_follow_link(&path, &nd); - } - path_put(&save); path_put(&path); if (error) { /* Does someone understand code flow here? Or it is only @@ -1873,6 +1863,10 @@ do_link: release_open_intent(&nd); if (nd.root.mnt) path_put(&nd.root); + if (error == -ESTALE && !force_reval) { + force_reval = 1; + goto reval; + } return ERR_PTR(error); } nd.flags &= ~LOOKUP_PARENT; @@ -2268,8 +2262,11 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); - if (!error) + if (!error) { error = dir->i_op->unlink(dir, dentry); + if (!error) + dentry->d_inode->i_flags |= S_DEAD; + } } mutex_unlock(&dentry->d_inode->i_mutex); @@ -2622,6 +2619,8 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, else error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (!error) { + if (target) + target->i_flags |= S_DEAD; if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry, new_dentry); } @@ -2664,11 +2663,9 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); - if (!error) { - const char *new_name = old_dentry->d_name.name; - fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, + if (!error) + fsnotify_move(old_dir, new_dir, old_name, is_dir, new_dentry->d_inode, old_dentry); - } fsnotify_oldname_free(old_name); return error; diff --git a/fs/namespace.c b/fs/namespace.c index 7d70d63ceb2..8174c8ab5c7 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -573,7 +573,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root, mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { - if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) + if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); @@ -737,6 +737,21 @@ static void m_stop(struct seq_file *m, void *v) up_read(&namespace_sem); } +int mnt_had_events(struct proc_mounts *p) +{ + struct mnt_namespace *ns = p->ns; + int res = 0; + + spin_lock(&vfsmount_lock); + if (p->event != ns->event) { + p->event = ns->event; + res = 1; + } + spin_unlock(&vfsmount_lock); + + return res; +} + struct proc_fs_info { int flag; const char *str; @@ -965,10 +980,12 @@ EXPORT_SYMBOL(may_umount_tree); int may_umount(struct vfsmount *mnt) { int ret = 1; + down_read(&namespace_sem); spin_lock(&vfsmount_lock); if (propagate_mount_busy(mnt, 2)) ret = 0; spin_unlock(&vfsmount_lock); + up_read(&namespace_sem); return ret; } @@ -1119,8 +1136,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; int retval; + int lookup_flags = 0; + + if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) + return -EINVAL; + + if (!(flags & UMOUNT_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; - retval = user_path(name, &path); + retval = user_path_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; retval = -EINVAL; @@ -1244,6 +1268,21 @@ void drop_collected_mounts(struct vfsmount *mnt) release_mounts(&umount_list); } +int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, + struct vfsmount *root) +{ + struct vfsmount *mnt; + int res = f(root, arg); + if (res) + return res; + list_for_each_entry(mnt, &root->mnt_list, mnt_list) { + res = f(mnt, arg); + if (res) + return res; + } + return 0; +} + static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end) { struct vfsmount *p; @@ -1352,12 +1391,12 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt, if (err) goto out_cleanup_ids; + spin_lock(&vfsmount_lock); + if (IS_MNT_SHARED(dest_mnt)) { for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } - - spin_lock(&vfsmount_lock); if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, path); @@ -1534,8 +1573,12 @@ static int do_remount(struct path *path, int flags, int mnt_flags, err = change_mount_flags(path->mnt, flags); else err = do_remount_sb(sb, flags, data, 0); - if (!err) + if (!err) { + spin_lock(&vfsmount_lock); + mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK; path->mnt->mnt_flags = mnt_flags; + spin_unlock(&vfsmount_lock); + } up_write(&sb->s_umount); if (!err) { security_sb_post_remount(path->mnt, flags, data); @@ -1665,6 +1708,8 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, { int err; + mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL); + down_write(&namespace_sem); /* Something was mounted here while we slept */ while (d_mountpoint(path->dentry) && @@ -2306,17 +2351,13 @@ void __init mnt_init(void) void put_mnt_ns(struct mnt_namespace *ns) { - struct vfsmount *root; LIST_HEAD(umount_list); - if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock)) + if (!atomic_dec_and_test(&ns->count)) return; - root = ns->root; - ns->root = NULL; - spin_unlock(&vfsmount_lock); down_write(&namespace_sem); spin_lock(&vfsmount_lock); - umount_tree(root, 0, &umount_list); + umount_tree(ns->root, 0, &umount_list); spin_unlock(&vfsmount_lock); up_write(&namespace_sem); release_mounts(&umount_list); diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 59e5673b459..a43d07e7b92 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -95,8 +95,7 @@ config ROOT_NFS Most people say N here. config NFS_FSCACHE - bool "Provide NFS client caching support (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "Provide NFS client caching support" depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y help Say Y here if you want NFS data to be cached locally on disc through diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e1d415e9784..0d289823e85 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, data->res.fattr = &data->fattr; data->res.eof = 0; data->res.count = bytes; + nfs_fattr_init(&data->fattr); msg.rpc_argp = &data->args; msg.rpc_resp = &data->res; @@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) data->res.count = 0; data->res.fattr = &data->fattr; data->res.verf = &data->verf; + nfs_fattr_init(&data->fattr); NFS_PROTO(data->inode)->commit_setup(data, &msg); @@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, data->res.fattr = &data->fattr; data->res.count = bytes; data->res.verf = &data->verf; + nfs_fattr_init(&data->fattr); task_setup_data.task = &data->task; task_setup_data.callback_data = data; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 6b891328f33..63f2071d644 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp) { dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); + if (gfp & __GFP_WAIT) + nfs_wb_page(page->mapping->host, page); /* If PagePrivate() is set, then the page is not freeable */ if (PagePrivate(page)) return 0; diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index fa588006588..237874f1af2 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) */ int nfs_fscache_release_page(struct page *page, gfp_t gfp) { - struct nfs_inode *nfsi = NFS_I(page->mapping->host); - struct fscache_cookie *cookie = nfsi->fscache; - - BUG_ON(!cookie); - if (PageFsCache(page)) { + struct nfs_inode *nfsi = NFS_I(page->mapping->host); + struct fscache_cookie *cookie = nfsi->fscache; + + BUG_ON(!cookie); dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", cookie, page, nfsi); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index faa091865ad..7570573bdb3 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -574,14 +574,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) nfs_revalidate_inode(server, inode); } -static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred) +static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred) { struct nfs_open_context *ctx; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (ctx != NULL) { - ctx->path.dentry = dget(dentry); - ctx->path.mnt = mntget(mnt); + ctx->path = *path; + path_get(&ctx->path); ctx->cred = get_rpccred(cred); ctx->state = NULL; ctx->lockowner = current->files; @@ -686,7 +686,7 @@ int nfs_open(struct inode *inode, struct file *filp) cred = rpc_lookup_cred(); if (IS_ERR(cred)) return PTR_ERR(cred); - ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred); + ctx = alloc_nfs_open_context(&filp->f_path, cred); put_rpccred(cred); if (ctx == NULL) return -ENOMEM; @@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (fattr->valid & NFS_ATTR_FATTR_MODE) { if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { + umode_t newmode = inode->i_mode & S_IFMT; + newmode |= fattr->mode & S_IALLUGO; + inode->i_mode = newmode; invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; - inode->i_mode = fattr->mode; } } else if (server->caps & NFS_CAP_MODE) invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h index 46d779abafd..1d8d5c813b0 100644 --- a/fs/nfs/iostat.h +++ b/fs/nfs/iostat.h @@ -57,12 +57,12 @@ static inline void nfs_add_fscache_stats(struct inode *inode, } #endif -static inline struct nfs_iostats *nfs_alloc_iostats(void) +static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void) { return alloc_percpu(struct nfs_iostats); } -static inline void nfs_free_iostats(struct nfs_iostats *stats) +static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats) { if (stats != NULL) free_percpu(stats); diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 0adefc40cc8..59047f8d7d7 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -120,7 +120,7 @@ static struct { { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, - { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, + { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, }; struct mountres { diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5e078b222b4..7bc2da8efd4 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -699,7 +699,7 @@ static struct { { NFSERR_BAD_COOKIE, -EBADCOOKIE }, { NFSERR_NOTSUPP, -ENOTSUPP }, { NFSERR_TOOSMALL, -ETOOSMALL }, - { NFSERR_SERVERFAULT, -ESERVERFAULT }, + { NFSERR_SERVERFAULT, -EREMOTEIO }, { NFSERR_BADTYPE, -EBADTYPE }, { NFSERR_JUKEBOX, -EJUKEBOX }, { -1, -EIO } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 865265bdca0..0c6fda33d66 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -146,6 +146,7 @@ enum { NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ + NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */ }; struct nfs4_state { @@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_state_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); +extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 198d51d17c1..84d83be25a9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, if (state == NULL) break; nfs4_state_mark_reclaim_nograce(clp, state); - case -NFS4ERR_STALE_CLIENTID: + goto do_state_recovery; case -NFS4ERR_STALE_STATEID: - case -NFS4ERR_EXPIRED: - nfs4_schedule_state_recovery(clp); - ret = nfs4_wait_clnt_recover(clp); - if (ret == 0) - exception->retry = 1; -#if !defined(CONFIG_NFS_V4_1) - break; -#else /* !defined(CONFIG_NFS_V4_1) */ - if (!nfs4_has_session(server->nfs_client)) + if (state == NULL) break; - /* FALLTHROUGH */ + nfs4_state_mark_reclaim_reboot(clp, state); + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_EXPIRED: + goto do_state_recovery; +#if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: @@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, nfs4_schedule_state_recovery(clp); exception->retry = 1; break; -#endif /* !defined(CONFIG_NFS_V4_1) */ +#endif /* defined(CONFIG_NFS_V4_1) */ case -NFS4ERR_FILE_OPEN: if (exception->timeout > HZ) { /* We have retried a decent amount, time to @@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, } /* We failed to handle the error */ return nfs4_map_errors(ret); +do_state_recovery: + nfs4_schedule_state_recovery(clp); + ret = nfs4_wait_clnt_recover(clp); + if (ret == 0) + exception->retry = 1; + return ret; } @@ -722,8 +724,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); if (p->o_arg.seqid == NULL) goto err_free; - p->path.mnt = mntget(path->mnt); - p->path.dentry = dget(path->dentry); + path_get(path); + p->path = *path; p->dir = parent; p->owner = sp; atomic_inc(&sp->so_count); @@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in status = PTR_ERR(state); if (IS_ERR(state)) goto err_opendata_put; + if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) + set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); *res = state; @@ -1940,8 +1944,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) calldata->res.seqid = calldata->arg.seqid; calldata->res.server = server; calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; - calldata->path.mnt = mntget(path->mnt); - calldata->path.dentry = dget(path->dentry); + path_get(path); + calldata->path = *path; msg.rpc_argp = &calldata->arg, msg.rpc_resp = &calldata->res, @@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, if (state == NULL) break; nfs4_state_mark_reclaim_nograce(clp, state); - case -NFS4ERR_STALE_CLIENTID: + goto do_state_recovery; case -NFS4ERR_STALE_STATEID: + if (state == NULL) + break; + nfs4_state_mark_reclaim_reboot(clp, state); + case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_EXPIRED: - rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); - nfs4_schedule_state_recovery(clp); - if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) - rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); - task->tk_status = 0; - return -EAGAIN; + goto do_state_recovery; #if defined(CONFIG_NFS_V4_1) case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, } task->tk_status = nfs4_map_errors(task->tk_status); return 0; +do_state_recovery: + rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); + nfs4_schedule_state_recovery(clp); + if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) + rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); + task->tk_status = 0; + return -EAGAIN; } static int @@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { .rpc_release = nfs4_lock_release, }; +static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) +{ + struct nfs_client *clp = server->nfs_client; + struct nfs4_state *state = lsp->ls_state; + + switch (error) { + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_EXPIRED: + if (new_lock_owner != 0 || + (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) + nfs4_state_mark_reclaim_nograce(clp, state); + lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; + break; + case -NFS4ERR_STALE_STATEID: + if (new_lock_owner != 0 || + (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) + nfs4_state_mark_reclaim_reboot(clp, state); + lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; + }; +} + static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) { struct nfs4_lockdata *data; @@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f ret = nfs4_wait_for_completion_rpc_task(task); if (ret == 0) { ret = data->rpc_status; + if (ret) + nfs4_handle_setlk_error(data->server, data->lsp, + data->arg.new_lock_owner, ret); } else data->cancelled = 1; rpc_put_task(task); @@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock { struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; - int status; + int status = -ENOLCK; + if ((fl_flags & FL_POSIX) && + !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) + goto out; /* Is this a delegated open? */ status = nfs4_set_lock_state(state, request); if (status != 0) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6d263ed79e9..c1e2733f4fa 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } -static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) +int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e437fd6a819..5cd5184b56d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr, * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ - status = -ESERVERFAULT; + status = -EREMOTEIO; if (memcmp(id.data, res->sr_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { @@ -5774,7 +5774,7 @@ static struct { { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, - { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, + { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, @@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat) } if (stat <= 10000 || stat > 10100) { /* The server is looney tunes. */ - return -ESERVERFAULT; + return -EREMOTEIO; } /* If we cannot translate the error, the recovery routines should * handle it. diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index e2975939126..a12c45b65dd 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req) kref_put(&req->wb_kref, nfs_free_request); } +static int nfs_wait_bit_uninterruptible(void *word) +{ + io_schedule(); + return 0; +} + /** * nfs_wait_on_request - Wait for a request to complete. * @req: request to wait upon. @@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req) int nfs_wait_on_request(struct nfs_page *req) { - int ret = 0; - - if (!test_bit(PG_BUSY, &req->wb_flags)) - goto out; - ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, - nfs_wait_bit_killable, TASK_KILLABLE); -out: - return ret; + return wait_on_bit(&req->wb_flags, PG_BUSY, + nfs_wait_bit_uninterruptible, + TASK_UNINTERRUPTIBLE); } /** diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ce907efc550..f1afee4eea7 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *); static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); +static void nfs_put_super(struct super_block *); static void nfs_kill_super(struct super_block *); static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); @@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = { .alloc_inode = nfs_alloc_inode, .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, + .put_super = nfs_put_super, .statfs = nfs_statfs, .clear_inode = nfs_clear_inode, .umount_begin = nfs_umount_begin, @@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = { .alloc_inode = nfs_alloc_inode, .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, + .put_super = nfs_put_super, .statfs = nfs_statfs, .clear_inode = nfs4_clear_inode, .umount_begin = nfs_umount_begin, @@ -2258,6 +2261,17 @@ error_splat_super: } /* + * Ensure that we unregister the bdi before kill_anon_super + * releases the device name + */ +static void nfs_put_super(struct super_block *s) +{ + struct nfs_server *server = NFS_SB(s); + + bdi_unregister(&server->backing_dev_info); +} + +/* * Destroy an NFS2/3 superblock */ static void nfs_kill_super(struct super_block *s) @@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s) struct nfs_server *server = NFS_SB(s); kill_anon_super(s); - bdi_unregister(&server->backing_dev_info); nfs_fscache_release_super_cookie(s); nfs_free_server(server); } diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index 70e1fbbaaea..ad4d2e787b2 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -15,8 +15,10 @@ #include "callback.h" +#ifdef CONFIG_NFS_V4 static const int nfs_set_port_min = 0; static const int nfs_set_port_max = 65535; +#endif static struct ctl_table_header *nfs_callback_sysctl_table; static ctl_table nfs_cb_sysctls[] = { diff --git a/fs/nfs/write.c b/fs/nfs/write.c index d171696017f..d63d964a039 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) -void nfs_commitdata_release(void *data) +static void nfs_commitdata_release(void *data) { struct nfs_write_data *wdata = data; @@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) break; } ret = nfs_wait_on_request(req); + nfs_release_request(req); if (ret < 0) goto out; } @@ -1597,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, struct nfs_page *req; int ret; - if (PageFsCache(page)) - nfs_fscache_release_page(page, GFP_KERNEL); + nfs_fscache_release_page(page, GFP_KERNEL); req = nfs_find_and_lock_request(page); ret = PTR_ERR(req); diff --git a/fs/nfsctl.c b/fs/nfsctl.c index d3854d94b7c..bf9cbd242dd 100644 --- a/fs/nfsctl.c +++ b/fs/nfsctl.c @@ -36,10 +36,9 @@ static struct file *do_open(char *name, int flags) return ERR_PTR(error); if (flags == O_RDWR) - error = may_open(&nd.path, MAY_READ|MAY_WRITE, - FMODE_READ|FMODE_WRITE); + error = may_open(&nd.path, MAY_READ|MAY_WRITE, flags); else - error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE); + error = may_open(&nd.path, MAY_WRITE, flags); if (!error) return dentry_open(nd.path.dentry, nd.path.mnt, flags, diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index c487810a236..a0c4016413f 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) { - struct svc_export *exp; u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); - exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); - /* - * We shouldn't have accepting an nfsv4 request at all if we - * don't have a pseudoexport!: - */ - if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT) - exp = ERR_PTR(-ESERVERFAULT); - return exp; + return rqst_exp_find(rqstp, FSID_NUM, fsidv); } /* diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index a8587e90fd5..bbf72d8f9fc 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2121,9 +2121,15 @@ out_acl: * and this is the root of a cross-mounted filesystem. */ if (ignore_crossmnt == 0 && - exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) { - err = vfs_getattr(exp->ex_path.mnt->mnt_parent, - exp->ex_path.mnt->mnt_mountpoint, &stat); + dentry == exp->ex_path.mnt->mnt_root) { + struct path path = exp->ex_path; + path_get(&path); + while (follow_up(&path)) { + if (path.dentry != path.mnt->mnt_root) + break; + } + err = vfs_getattr(path.mnt, path.dentry, &stat); + path_put(&path); if (err) goto out_nfserr; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index c194793b642..15dc2deaac5 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -361,7 +361,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, * If we are changing the size of the file, then * we need to break all leases. */ - host_err = break_lease(inode, FMODE_WRITE | O_NONBLOCK); + host_err = break_lease(inode, O_WRONLY | O_NONBLOCK); if (host_err == -EWOULDBLOCK) host_err = -ETIMEDOUT; if (host_err) /* ENOMEM or EWOULDBLOCK */ @@ -734,7 +734,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, * Check to see if there are any leases on this file. * This may block while leases are broken. */ - host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? FMODE_WRITE : 0)); + host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); if (host_err == -EWOULDBLOCK) host_err = -ETIMEDOUT; if (host_err) /* NOMEM or WOULDBLOCK */ @@ -752,6 +752,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, flags, current_cred()); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); + else + host_err = ima_file_check(*filp, access); out_nfserr: err = nfserrno(host_err); out: @@ -2127,7 +2129,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, */ path.mnt = exp->ex_path.mnt; path.dentry = dentry; - err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC)); nfsd_out: return err? nfserrno(err) : 0; } diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 187dd07ba86..9d1e5de91af 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -388,8 +388,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) ret = -ENOENT; goto out; } - if (blocknrp != NULL) - *blocknrp = blocknr; + *blocknrp = blocknr; out: kunmap_atomic(kaddr, KM_USER0); diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 76d803e060a..0092840492e 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -224,7 +224,7 @@ fail: * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. */ static int -nilfs_match(int len, const char * const name, struct nilfs_dir_entry *de) +nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de) { if (len != de->name_len) return 0; @@ -349,11 +349,11 @@ done: * Entry is guaranteed to be valid. */ struct nilfs_dir_entry * -nilfs_find_entry(struct inode *dir, struct dentry *dentry, +nilfs_find_entry(struct inode *dir, const struct qstr *qstr, struct page **res_page) { - const char *name = dentry->d_name.name; - int namelen = dentry->d_name.len; + const unsigned char *name = qstr->name; + int namelen = qstr->len; unsigned reclen = NILFS_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = dir_pages(dir); @@ -424,13 +424,13 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) return de; } -ino_t nilfs_inode_by_name(struct inode *dir, struct dentry *dentry) +ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) { ino_t res = 0; struct nilfs_dir_entry *de; struct page *page; - de = nilfs_find_entry(dir, dentry, &page); + de = nilfs_find_entry(dir, qstr, &page); if (de) { res = le64_to_cpu(de->inode); kunmap(page); @@ -465,7 +465,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, int nilfs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned chunk_size = nilfs_chunk_size(dir); unsigned reclen = NILFS_DIR_REC_LEN(namelen); diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index d6b2b83de36..313d0a21da4 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -26,6 +26,7 @@ #include <linux/capability.h> /* capable() */ #include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */ #include <linux/vmalloc.h> +#include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */ #include <linux/nilfs2_fs.h> #include "nilfs.h" #include "segment.h" @@ -107,20 +108,28 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + + ret = mnt_want_write(filp->f_path.mnt); + if (ret) + return ret; + + ret = -EFAULT; if (copy_from_user(&cpmode, argp, sizeof(cpmode))) - return -EFAULT; + goto out; mutex_lock(&nilfs->ns_mount_mutex); + nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_cpfile_change_cpmode( cpfile, cpmode.cm_cno, cpmode.cm_mode); - if (unlikely(ret < 0)) { + if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); - mutex_unlock(&nilfs->ns_mount_mutex); - return ret; - } - nilfs_transaction_commit(inode->i_sb); /* never fails */ + else + nilfs_transaction_commit(inode->i_sb); /* never fails */ + mutex_unlock(&nilfs->ns_mount_mutex); +out: + mnt_drop_write(filp->f_path.mnt); return ret; } @@ -135,16 +144,23 @@ nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + + ret = mnt_want_write(filp->f_path.mnt); + if (ret) + return ret; + + ret = -EFAULT; if (copy_from_user(&cno, argp, sizeof(cno))) - return -EFAULT; + goto out; nilfs_transaction_begin(inode->i_sb, &ti, 0); ret = nilfs_cpfile_delete_checkpoint(cpfile, cno); - if (unlikely(ret < 0)) { + if (unlikely(ret < 0)) nilfs_transaction_abort(inode->i_sb); - return ret; - } - nilfs_transaction_commit(inode->i_sb); /* never fails */ + else + nilfs_transaction_commit(inode->i_sb); /* never fails */ +out: + mnt_drop_write(filp->f_path.mnt); return ret; } @@ -496,12 +512,19 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, if (!capable(CAP_SYS_ADMIN)) return -EPERM; + ret = mnt_want_write(filp->f_path.mnt); + if (ret) + return ret; + + ret = -EFAULT; if (copy_from_user(argv, argp, sizeof(argv))) - return -EFAULT; + goto out; + ret = -EINVAL; nsegs = argv[4].v_nmembs; if (argv[4].v_size != argsz[4]) - return -EINVAL; + goto out; + /* * argv[4] points to segment numbers this ioctl cleans. We * use kmalloc() for its buffer because memory used for the @@ -509,9 +532,10 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, */ kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base, nsegs * sizeof(__u64)); - if (IS_ERR(kbufs[4])) - return PTR_ERR(kbufs[4]); - + if (IS_ERR(kbufs[4])) { + ret = PTR_ERR(kbufs[4]); + goto out; + } nilfs = NILFS_SB(inode->i_sb)->s_nilfs; for (n = 0; n < 4; n++) { @@ -563,10 +587,12 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, nilfs_remove_all_gcinode(nilfs); clear_nilfs_gc_running(nilfs); - out_free: +out_free: while (--n >= 0) vfree(kbufs[n]); kfree(kbufs[4]); +out: + mnt_drop_write(filp->f_path.mnt); return ret; } @@ -575,13 +601,17 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, { __u64 cno; int ret; + struct the_nilfs *nilfs; ret = nilfs_construct_segment(inode->i_sb); if (ret < 0) return ret; if (argp != NULL) { - cno = NILFS_SB(inode->i_sb)->s_nilfs->ns_cno - 1; + nilfs = NILFS_SB(inode->i_sb)->s_nilfs; + down_read(&nilfs->ns_segctor_sem); + cno = nilfs->ns_cno - 1; + up_read(&nilfs->ns_segctor_sem); if (copy_to_user(argp, &cno, sizeof(cno))) return -EFAULT; } diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 07ba838ef08..ad6ed2cf19b 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -67,7 +67,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if (dentry->d_name.len > NILFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); - ino = nilfs_inode_by_name(dir, dentry); + ino = nilfs_inode_by_name(dir, &dentry->d_name); inode = NULL; if (ino) { inode = nilfs_iget(dir->i_sb, ino); @@ -81,10 +81,7 @@ struct dentry *nilfs_get_parent(struct dentry *child) { unsigned long ino; struct inode *inode; - struct dentry dotdot; - - dotdot.d_name.name = ".."; - dotdot.d_name.len = 2; + struct qstr dotdot = {.name = "..", .len = 2}; ino = nilfs_inode_by_name(child->d_inode, &dotdot); if (!ino) @@ -296,7 +293,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) int err; err = -ENOENT; - de = nilfs_find_entry(dir, dentry, &page); + de = nilfs_find_entry(dir, &dentry->d_name, &page); if (!de) goto out; @@ -389,7 +386,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, return err; err = -ENOENT; - old_de = nilfs_find_entry(old_dir, old_dentry, &old_page); + old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_de) goto out; @@ -409,7 +406,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out_dir; err = -ENOENT; - new_de = nilfs_find_entry(new_dir, new_dentry, &new_page); + new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; inc_nlink(old_inode); diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 4da6f67e9a9..8723e5bfd07 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -217,10 +217,10 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir) /* dir.c */ extern int nilfs_add_link(struct dentry *, struct inode *); -extern ino_t nilfs_inode_by_name(struct inode *, struct dentry *); +extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *); extern int nilfs_make_empty(struct inode *, struct inode *); extern struct nilfs_dir_entry * -nilfs_find_entry(struct inode *, struct dentry *, struct page **); +nilfs_find_entry(struct inode *, const struct qstr *, struct page **); extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *); extern int nilfs_empty_dir(struct inode *); extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **); diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index c9c96c7825d..017bedc761a 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -39,7 +39,6 @@ enum { NILFS_SEG_FAIL_IO, NILFS_SEG_FAIL_MAGIC, NILFS_SEG_FAIL_SEQ, - NILFS_SEG_FAIL_CHECKSUM_SEGSUM, NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, NILFS_SEG_FAIL_CHECKSUM_FULL, NILFS_SEG_FAIL_CONSISTENCY, @@ -71,10 +70,6 @@ static int nilfs_warn_segment_error(int err) printk(KERN_WARNING "NILFS warning: Sequence number mismatch\n"); break; - case NILFS_SEG_FAIL_CHECKSUM_SEGSUM: - printk(KERN_WARNING - "NILFS warning: Checksum error in segment summary\n"); - break; case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: printk(KERN_WARNING "NILFS warning: Checksum error in super root\n"); @@ -206,19 +201,15 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block, * @pseg_start: start disk block number of partial segment * @seg_seq: sequence number requested * @ssi: pointer to nilfs_segsum_info struct to store information - * @full_check: full check flag - * (0: only checks segment summary CRC, 1: data CRC) */ static int load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, - u64 seg_seq, struct nilfs_segsum_info *ssi, - int full_check) + u64 seg_seq, struct nilfs_segsum_info *ssi) { struct buffer_head *bh_sum; struct nilfs_segment_summary *sum; - unsigned long offset, nblock; - u64 check_bytes; - u32 crc, crc_sum; + unsigned long nblock; + u32 crc; int ret = NILFS_SEG_FAIL_IO; bh_sum = sb_bread(sbi->s_super, pseg_start); @@ -237,34 +228,24 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, ret = NILFS_SEG_FAIL_SEQ; goto failed; } - if (full_check) { - offset = sizeof(sum->ss_datasum); - check_bytes = - ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits); - nblock = ssi->nblocks; - crc_sum = le32_to_cpu(sum->ss_datasum); - ret = NILFS_SEG_FAIL_CHECKSUM_FULL; - } else { /* only checks segment summary */ - offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum); - check_bytes = ssi->sumbytes; - nblock = ssi->nsumblk; - crc_sum = le32_to_cpu(sum->ss_sumsum); - ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM; - } + nblock = ssi->nblocks; if (unlikely(nblock == 0 || nblock > sbi->s_nilfs->ns_blocks_per_segment)) { /* This limits the number of blocks read in the CRC check */ ret = NILFS_SEG_FAIL_CONSISTENCY; goto failed; } - if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes, + if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum), + ((u64)nblock << sbi->s_super->s_blocksize_bits), pseg_start, nblock)) { ret = NILFS_SEG_FAIL_IO; goto failed; } - if (crc == crc_sum) + if (crc == le32_to_cpu(sum->ss_datasum)) ret = 0; + else + ret = NILFS_SEG_FAIL_CHECKSUM_FULL; failed: brelse(bh_sum); out: @@ -598,7 +579,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { - ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); + ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi); if (ret) { if (ret == NILFS_SEG_FAIL_IO) { err = -EIO; @@ -821,7 +802,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, for (;;) { /* Load segment summary */ - ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); + ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi); if (ret) { if (ret == NILFS_SEG_FAIL_IO) goto failed; diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 645c78656aa..ab56fe44e37 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -40,6 +40,11 @@ struct nilfs_write_info { }; +static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, + struct the_nilfs *nilfs); +static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf); + + static struct kmem_cache *nilfs_segbuf_cachep; static void nilfs_segbuf_init_once(void *obj) @@ -302,6 +307,19 @@ void nilfs_truncate_logs(struct list_head *logs, } } +int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs) +{ + struct nilfs_segment_buffer *segbuf; + int ret = 0; + + list_for_each_entry(segbuf, logs, sb_list) { + ret = nilfs_segbuf_write(segbuf, nilfs); + if (ret) + break; + } + return ret; +} + int nilfs_wait_on_logs(struct list_head *logs) { struct nilfs_segment_buffer *segbuf; diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h index 6af1630fb40..94dfd3517bc 100644 --- a/fs/nilfs2/segbuf.h +++ b/fs/nilfs2/segbuf.h @@ -166,13 +166,10 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf, segbuf->sb_sum.nfileblk++; } -int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, - struct the_nilfs *nilfs); -int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf); - void nilfs_clear_logs(struct list_head *logs); void nilfs_truncate_logs(struct list_head *logs, struct nilfs_segment_buffer *last); +int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs); int nilfs_wait_on_logs(struct list_head *logs); static inline void nilfs_destroy_logs(struct list_head *logs) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 17584c52448..ada2f1b947a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1764,14 +1764,9 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci, static int nilfs_segctor_write(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) { - struct nilfs_segment_buffer *segbuf; - int ret = 0; + int ret; - list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { - ret = nilfs_segbuf_write(segbuf, nilfs); - if (ret) - break; - } + ret = nilfs_write_logs(&sci->sc_segbufs, nilfs); list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); return ret; } @@ -1937,8 +1932,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf; struct page *bd_page = NULL, *fs_page = NULL; - struct nilfs_sb_info *sbi = sci->sc_sbi; - struct the_nilfs *nilfs = sbi->s_nilfs; + struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; int update_sr = (sci->sc_super_root != NULL); list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { @@ -2020,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) if (update_sr) { nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, segbuf->sb_sum.seg_seq, nilfs->ns_cno++); - sbi->s_super->s_dirt = 1; + set_nilfs_sb_dirty(nilfs); clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); @@ -2425,43 +2419,43 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, return err; } -struct nilfs_segctor_req { - int mode; - __u32 seq_accepted; - int sc_err; /* construction failure */ - int sb_err; /* super block writeback failure */ -}; - #define FLUSH_FILE_BIT (0x1) /* data file only */ #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ -static void nilfs_segctor_accept(struct nilfs_sc_info *sci, - struct nilfs_segctor_req *req) +/** + * nilfs_segctor_accept - record accepted sequence count of log-write requests + * @sci: segment constructor object + */ +static void nilfs_segctor_accept(struct nilfs_sc_info *sci) { - req->sc_err = req->sb_err = 0; spin_lock(&sci->sc_state_lock); - req->seq_accepted = sci->sc_seq_request; + sci->sc_seq_accepted = sci->sc_seq_request; spin_unlock(&sci->sc_state_lock); if (sci->sc_timer) del_timer_sync(sci->sc_timer); } -static void nilfs_segctor_notify(struct nilfs_sc_info *sci, - struct nilfs_segctor_req *req) +/** + * nilfs_segctor_notify - notify the result of request to caller threads + * @sci: segment constructor object + * @mode: mode of log forming + * @err: error code to be notified + */ +static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) { /* Clear requests (even when the construction failed) */ spin_lock(&sci->sc_state_lock); - if (req->mode == SC_LSEG_SR) { + if (mode == SC_LSEG_SR) { sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; - sci->sc_seq_done = req->seq_accepted; - nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err); + sci->sc_seq_done = sci->sc_seq_accepted; + nilfs_segctor_wakeup(sci, err); sci->sc_flush_request = 0; } else { - if (req->mode == SC_FLUSH_FILE) + if (mode == SC_FLUSH_FILE) sci->sc_flush_request &= ~FLUSH_FILE_BIT; - else if (req->mode == SC_FLUSH_DAT) + else if (mode == SC_FLUSH_DAT) sci->sc_flush_request &= ~FLUSH_DAT_BIT; /* re-enable timer if checkpoint creation was not done */ @@ -2472,30 +2466,37 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, spin_unlock(&sci->sc_state_lock); } -static int nilfs_segctor_construct(struct nilfs_sc_info *sci, - struct nilfs_segctor_req *req) +/** + * nilfs_segctor_construct - form logs and write them to disk + * @sci: segment constructor object + * @mode: mode of log forming + */ +static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { struct nilfs_sb_info *sbi = sci->sc_sbi; struct the_nilfs *nilfs = sbi->s_nilfs; int err = 0; + nilfs_segctor_accept(sci); + if (nilfs_discontinued(nilfs)) - req->mode = SC_LSEG_SR; - if (!nilfs_segctor_confirm(sci)) { - err = nilfs_segctor_do_construct(sci, req->mode); - req->sc_err = err; - } + mode = SC_LSEG_SR; + if (!nilfs_segctor_confirm(sci)) + err = nilfs_segctor_do_construct(sci, mode); + if (likely(!err)) { - if (req->mode != SC_FLUSH_DAT) + if (mode != SC_FLUSH_DAT) atomic_set(&nilfs->ns_ndirtyblks, 0); if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && nilfs_discontinued(nilfs)) { down_write(&nilfs->ns_sem); - req->sb_err = nilfs_commit_super(sbi, - nilfs_altsb_need_update(nilfs)); + err = nilfs_commit_super( + sbi, nilfs_altsb_need_update(nilfs)); up_write(&nilfs->ns_sem); } } + + nilfs_segctor_notify(sci, mode, err); return err; } @@ -2526,7 +2527,6 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, struct nilfs_sc_info *sci = NILFS_SC(sbi); struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_transaction_info ti; - struct nilfs_segctor_req req = { .mode = SC_LSEG_SR }; int err; if (unlikely(!sci)) @@ -2547,10 +2547,8 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); for (;;) { - nilfs_segctor_accept(sci, &req); - err = nilfs_segctor_construct(sci, &req); + err = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); - nilfs_segctor_notify(sci, &req); if (likely(!err)) break; @@ -2560,6 +2558,16 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(sci->sc_interval); } + if (nilfs_test_opt(sbi, DISCARD)) { + int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, + sci->sc_nfreesegs); + if (ret) { + printk(KERN_WARNING + "NILFS warning: error %d on discard request, " + "turning discards off for the device\n", ret); + nilfs_clear_opt(sbi, DISCARD); + } + } out_unlock: sci->sc_freesegs = NULL; @@ -2573,13 +2581,9 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode) { struct nilfs_sb_info *sbi = sci->sc_sbi; struct nilfs_transaction_info ti; - struct nilfs_segctor_req req = { .mode = mode }; nilfs_transaction_lock(sbi, &ti, 0); - - nilfs_segctor_accept(sci, &req); - nilfs_segctor_construct(sci, &req); - nilfs_segctor_notify(sci, &req); + nilfs_segctor_construct(sci, mode); /* * Unclosed segment should be retried. We do this using sc_timer. @@ -2635,6 +2639,7 @@ static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci) static int nilfs_segctor_thread(void *arg) { struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; + struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; struct timer_list timer; int timeout = 0; @@ -2680,7 +2685,6 @@ static int nilfs_segctor_thread(void *arg) } else { DEFINE_WAIT(wait); int should_sleep = 1; - struct the_nilfs *nilfs; prepare_to_wait(&sci->sc_wait_daemon, &wait, TASK_INTERRUPTIBLE); @@ -2701,8 +2705,8 @@ static int nilfs_segctor_thread(void *arg) finish_wait(&sci->sc_wait_daemon, &wait); timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && time_after_eq(jiffies, sci->sc_timer->expires)); - nilfs = sci->sc_sbi->s_nilfs; - if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs)) + + if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) set_nilfs_discontinued(nilfs); } goto loop; @@ -2797,12 +2801,9 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) do { struct nilfs_sb_info *sbi = sci->sc_sbi; struct nilfs_transaction_info ti; - struct nilfs_segctor_req req = { .mode = SC_LSEG_SR }; nilfs_transaction_lock(sbi, &ti, 0); - nilfs_segctor_accept(sci, &req); - ret = nilfs_segctor_construct(sci, &req); - nilfs_segctor_notify(sci, &req); + ret = nilfs_segctor_construct(sci, SC_LSEG_SR); nilfs_transaction_unlock(sbi); } while (ret && retrycount-- > 0); @@ -2829,7 +2830,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) || sci->sc_seq_request != sci->sc_seq_done); spin_unlock(&sci->sc_state_lock); - if (flag || nilfs_segctor_confirm(sci)) + if (flag || !nilfs_segctor_confirm(sci)) nilfs_segctor_write_out(sci); WARN_ON(!list_empty(&sci->sc_copied_buffers)); @@ -2865,8 +2866,15 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi) struct the_nilfs *nilfs = sbi->s_nilfs; int err; - /* Each field of nilfs_segctor is cleared through the initialization - of super-block info */ + if (NILFS_SC(sbi)) { + /* + * This happens if the filesystem was remounted + * read/write after nilfs_error degenerated it into a + * read-only mount. + */ + nilfs_detach_segment_constructor(sbi); + } + sbi->s_sc_info = nilfs_segctor_new(sbi); if (!sbi->s_sc_info) return -ENOMEM; diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 3d3ab2f9864..3155e0c7f41 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -116,6 +116,7 @@ struct nilfs_segsum_pointer { * @sc_wait_daemon: Daemon wait queue * @sc_wait_task: Start/end wait queue to control segctord task * @sc_seq_request: Request counter + * @sc_seq_accept: Accepted request count * @sc_seq_done: Completion counter * @sc_sync: Request of explicit sync operation * @sc_interval: Timeout value of background construction @@ -169,6 +170,7 @@ struct nilfs_sc_info { wait_queue_head_t sc_wait_task; __u32 sc_seq_request; + __u32 sc_seq_accepted; __u32 sc_seq_done; int sc_sync; diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 8173faee31e..92579cc4c93 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -96,9 +96,6 @@ void nilfs_error(struct super_block *sb, const char *function, if (!(sb->s_flags & MS_RDONLY)) { struct the_nilfs *nilfs = sbi->s_nilfs; - if (!nilfs_test_opt(sbi, ERRORS_CONT)) - nilfs_detach_segment_constructor(sbi); - down_write(&nilfs->ns_sem); if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { nilfs->ns_mount_state |= NILFS_ERROR_FS; @@ -301,7 +298,7 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); nilfs->ns_sbwtime[1] = t; } - sbi->s_super->s_dirt = 0; + clear_nilfs_sb_dirty(nilfs); return nilfs_sync_super(sbi, dupsb); } @@ -345,7 +342,7 @@ static int nilfs_sync_fs(struct super_block *sb, int wait) err = nilfs_construct_segment(sb); down_write(&nilfs->ns_sem); - if (sb->s_dirt) + if (nilfs_sb_dirty(nilfs)) nilfs_commit_super(sbi, 1); up_write(&nilfs->ns_sem); @@ -481,6 +478,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_printf(seq, ",order=strict"); if (nilfs_test_opt(sbi, NORECOVERY)) seq_printf(seq, ",norecovery"); + if (nilfs_test_opt(sbi, DISCARD)) + seq_printf(seq, ",discard"); return 0; } @@ -550,7 +549,7 @@ static const struct export_operations nilfs_export_ops = { enum { Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, - Opt_err, + Opt_discard, Opt_err, }; static match_table_t tokens = { @@ -561,6 +560,7 @@ static match_table_t tokens = { {Opt_snapshot, "cp=%u"}, {Opt_order, "order=%s"}, {Opt_norecovery, "norecovery"}, + {Opt_discard, "discard"}, {Opt_err, NULL} }; @@ -614,6 +614,9 @@ static int parse_options(char *options, struct super_block *sb) case Opt_norecovery: nilfs_set_opt(sbi, NORECOVERY); break; + case Opt_discard: + nilfs_set_opt(sbi, DISCARD); + break; default: printk(KERN_ERR "NILFS: Unrecognized mount option \"%s\"\n", p); diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 6241e1722ef..92733d5651d 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -646,6 +646,44 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) goto out; } +int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, + size_t nsegs) +{ + sector_t seg_start, seg_end; + sector_t start = 0, nblocks = 0; + unsigned int sects_per_block; + __u64 *sn; + int ret = 0; + + sects_per_block = (1 << nilfs->ns_blocksize_bits) / + bdev_logical_block_size(nilfs->ns_bdev); + for (sn = segnump; sn < segnump + nsegs; sn++) { + nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end); + + if (!nblocks) { + start = seg_start; + nblocks = seg_end - seg_start + 1; + } else if (start + nblocks == seg_start) { + nblocks += seg_end - seg_start + 1; + } else { + ret = blkdev_issue_discard(nilfs->ns_bdev, + start * sects_per_block, + nblocks * sects_per_block, + GFP_NOFS, + DISCARD_FL_BARRIER); + if (ret < 0) + return ret; + nblocks = 0; + } + } + if (nblocks) + ret = blkdev_issue_discard(nilfs->ns_bdev, + start * sects_per_block, + nblocks * sects_per_block, + GFP_NOFS, DISCARD_FL_BARRIER); + return ret; +} + int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) { struct inode *dat = nilfs_dat_inode(nilfs); diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 589786e3346..e9795f1724d 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -38,6 +38,7 @@ enum { the latest checkpoint was loaded */ THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ THE_NILFS_GC_RUNNING, /* gc process is running */ + THE_NILFS_SB_DIRTY, /* super block is dirty */ }; /** @@ -197,6 +198,7 @@ THE_NILFS_FNS(INIT, init) THE_NILFS_FNS(LOADED, loaded) THE_NILFS_FNS(DISCONTINUED, discontinued) THE_NILFS_FNS(GC_RUNNING, gc_running) +THE_NILFS_FNS(SB_DIRTY, sb_dirty) /* Minimum interval of periodical update of superblocks (in seconds) */ #define NILFS_SB_FREQ 10 @@ -221,6 +223,7 @@ struct the_nilfs *find_or_create_nilfs(struct block_device *); void put_nilfs(struct the_nilfs *); int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *); int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *); +int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t); int nilfs_count_free_blocks(struct the_nilfs *, sector_t *); struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64); int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int); diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index c9ee67b442e..1afb0a10229 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -121,7 +121,7 @@ static int idr_callback(int id, void *p, void *data) if (warned) return 0; - warned = false; + warned = true; entry = p; ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 8271cf05c95..472cdf29ef8 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -29,14 +29,12 @@ #include <linux/init.h> /* module_init */ #include <linux/inotify.h> #include <linux/kernel.h> /* roundup() */ -#include <linux/magic.h> /* superblock magic number */ -#include <linux/mount.h> /* mntget */ #include <linux/namei.h> /* LOOKUP_FOLLOW */ -#include <linux/path.h> /* struct path */ #include <linux/sched.h> /* struct user */ #include <linux/slab.h> /* struct kmem_cache */ #include <linux/syscalls.h> #include <linux/types.h> +#include <linux/anon_inodes.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/wait.h> @@ -45,8 +43,6 @@ #include <asm/ioctls.h> -static struct vfsmount *inotify_mnt __read_mostly; - /* these are configurable via /proc/sys/fs/inotify/ */ static int inotify_max_user_instances __read_mostly; static int inotify_max_queued_events __read_mostly; @@ -552,7 +548,7 @@ retry: spin_lock(&group->inotify_data.idr_lock); ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, - group->inotify_data.last_wd, + group->inotify_data.last_wd+1, &tmp_ientry->wd); spin_unlock(&group->inotify_data.idr_lock); if (ret) { @@ -632,7 +628,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign spin_lock_init(&group->inotify_data.idr_lock); idr_init(&group->inotify_data.idr); - group->inotify_data.last_wd = 1; + group->inotify_data.last_wd = 0; group->inotify_data.user = user; group->inotify_data.fa = NULL; @@ -645,9 +641,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) { struct fsnotify_group *group; struct user_struct *user; - struct file *filp; - struct path path; - int fd, ret; + int ret; /* Check the IN_* constants for consistency. */ BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); @@ -656,10 +650,6 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) return -EINVAL; - fd = get_unused_fd_flags(flags & O_CLOEXEC); - if (fd < 0) - return fd; - user = get_current_user(); if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_instances)) { @@ -676,27 +666,14 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) atomic_inc(&user->inotify_devs); - path.mnt = inotify_mnt; - path.dentry = inotify_mnt->mnt_root; - path_get(&path); - filp = alloc_file(&path, FMODE_READ, &inotify_fops); - if (!filp) - goto Enfile; + ret = anon_inode_getfd("inotify", &inotify_fops, group, + O_RDONLY | flags); + if (ret >= 0) + return ret; - filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); - filp->private_data = group; - - fd_install(fd, filp); - - return fd; - -Enfile: - ret = -ENFILE; - path_put(&path); atomic_dec(&user->inotify_devs); out_free_uid: free_uid(user); - put_unused_fd(fd); return ret; } @@ -783,20 +760,6 @@ out: return ret; } -static int -inotify_get_sb(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, struct vfsmount *mnt) -{ - return get_sb_pseudo(fs_type, "inotify", NULL, - INOTIFYFS_SUPER_MAGIC, mnt); -} - -static struct file_system_type inotify_fs_type = { - .name = "inotifyfs", - .get_sb = inotify_get_sb, - .kill_sb = kill_anon_super, -}; - /* * inotify_user_setup - Our initialization function. Note that we cannnot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here @@ -804,16 +767,6 @@ static struct file_system_type inotify_fs_type = { */ static int __init inotify_user_setup(void) { - int ret; - - ret = register_filesystem(&inotify_fs_type); - if (unlikely(ret)) - panic("inotify: register_filesystem returned %d!\n", ret); - - inotify_mnt = kern_mount(&inotify_fs_type); - if (IS_ERR(inotify_mnt)) - panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); - inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 600d2d2ade1..791c0886c06 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -46,6 +46,7 @@ ocfs2_stackglue-objs := stackglue.o ocfs2_stack_o2cb-objs := stack_o2cb.o ocfs2_stack_user-objs := stack_user.o +obj-$(CONFIG_OCFS2_FS) += dlmfs/ # cluster/ is always needed when OCFS2_FS for masklog support obj-$(CONFIG_OCFS2_FS) += cluster/ obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index d17bdc718f7..2bbe1ecc08c 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1050,7 +1050,8 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle, strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb->h_blkno = cpu_to_le64(first_blkno); eb->h_fs_generation = cpu_to_le32(osb->fs_generation); - eb->h_suballoc_slot = cpu_to_le16(osb->slot_num); + eb->h_suballoc_slot = + cpu_to_le16(meta_ac->ac_alloc_slot); eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); eb->h_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); @@ -6037,7 +6038,7 @@ static void ocfs2_truncate_log_worker(struct work_struct *work) if (status < 0) mlog_errno(status); else - ocfs2_init_inode_steal_slot(osb); + ocfs2_init_steal_slots(osb); mlog_exit(status); } diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3dae4a13f6e..4c2a6d282c4 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -577,8 +577,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, goto bail; } - /* We should already CoW the refcounted extent. */ - BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); + /* We should already CoW the refcounted extent in case of create. */ + BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED)); + /* * get_more_blocks() expects us to describe a hole by clearing * the mapped bit on bh_result(). @@ -599,7 +600,7 @@ bail: return ret; } -/* +/* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. Like the core uses * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from @@ -670,7 +671,7 @@ static ssize_t ocfs2_direct_IO(int rw, ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, - nr_segs, + nr_segs, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io); diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d43d34a1dd3..21c808f752d 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, } ocfs2_metadata_cache_io_unlock(ci); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", flags); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index eda5b8bcddd..5c989000670 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; -/* Only sets a new threshold if there are no active regions. +/* Only sets a new threshold if there are no active regions. * * No locking or otherwise interesting code is required for reading * o2hb_dead_threshold as it can't change once regions are active and @@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work) mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " "milliseconds\n", reg->hr_dev_name, - jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); + jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); o2quo_disk_timeout(); } @@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg, "seq %llu last %llu changed %u equal %u\n", slot->ds_node_num, (long long)slot->ds_last_generation, le32_to_cpu(hb_block->hb_cksum), - (unsigned long long)le64_to_cpu(hb_block->hb_seq), + (unsigned long long)le64_to_cpu(hb_block->hb_seq), (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, slot->ds_equal_samples); diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c index 1cd2934de61..b39da877b12 100644 --- a/fs/ocfs2/cluster/masklog.c +++ b/fs/ocfs2/cluster/masklog.c @@ -112,6 +112,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(XATTR), define_mask(QUOTA), define_mask(REFCOUNT), + define_mask(BASTS), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 9b4d11726cf..3dfddbec32f 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -114,6 +114,7 @@ #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ #define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ #define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ +#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */ /* bits that are infrequently given and frequently matched in the high word */ #define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ #define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ @@ -194,9 +195,9 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits; * previous token if args expands to nothing. */ #define __mlog_printk(level, fmt, args...) \ - printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \ - __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \ - ##args) + printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \ + task_pid_nr(current), __mlog_cpu_guess, \ + __PRETTY_FUNCTION__, __LINE__ , ##args) #define mlog(mask, fmt, args...) do { \ u64 __m = MLOG_MASK_PREFIX | (mask); \ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 334f231a422..d8d0c65ac03 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - printk(KERN_INFO "o2net: no longer connected to " + printk(KERN_NOTICE "o2net: no longer connected to " SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, if (!was_valid && valid) { o2quo_conn_up(o2net_num_from_nn(nn)); cancel_delayed_work(&nn->nn_connect_expired); - printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", + printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", o2nm_this_node() > sc->sc_node->nd_num ? "connected to" : "accepted connection from", SC_NODEF_ARGS(sc)); @@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc, cond_resched(); continue; } - mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT + mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); o2net_ensure_shutdown(nn, sc, 0); break; @@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data) do_gettimeofday(&now); - printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " + printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); mlog(ML_NOTICE, "here are some times that might help debug the " "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", - sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, + sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, now.tv_sec, (long) now.tv_usec, sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, sc->sc_tv_advance_start.tv_sec, diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 8d58cfe410b..96fa7ebc530 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -32,10 +32,10 @@ * on their number */ #define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) -/* +/* * This version number represents quite a lot, unfortunately. It not * only represents the raw network message protocol on the wire but also - * locking semantics of the file system using the protocol. It should + * locking semantics of the file system using the protocol. It should * be somewhere else, I'm sure, but right now it isn't. * * With version 11, we separate out the filesystem locking portion. The diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 28c3ec23879..765d66c7098 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -2439,7 +2439,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; memset(dx_root, 0, osb->sb->s_blocksize); strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); - dx_root->dr_suballoc_slot = cpu_to_le16(osb->slot_num); + dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation); dx_root->dr_blkno = cpu_to_le64(dr_blkno); diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile index 19036137570..dcebf0d920f 100644 --- a/fs/ocfs2/dlm/Makefile +++ b/fs/ocfs2/dlm/Makefile @@ -1,8 +1,7 @@ EXTRA_CFLAGS += -Ifs/ocfs2 -obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o +obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o -ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index b5786a787fa..3cfa114aa39 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err); mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ } while (0) -#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_UNUSED1 0x01 #define DLM_LKSB_PUT_LVB 0x02 #define DLM_LKSB_GET_LVB 0x04 #define DLM_LKSB_UNUSED2 0x08 diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 01cf8cc3d28..dccc439fa08 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_lock_put(lock); /* free up the reserved bast that we are cancelling. * guaranteed that this will not be the last reserved - * ast because *both* an ast and a bast were reserved + * ast because *both* an ast and a bast were reserved * to get to this point. the res->spinlock will not be * taken here */ dlm_lockres_release_ast(dlm, res); diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index ca96bce50e1..f283bce776b 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, /* instead of logging the same network error over * and over, sleep here and wait for the heartbeat * to notice the node is dead. times out after 5s. */ - dlm_wait_for_node_death(dlm, res->owner, + dlm_wait_for_node_death(dlm, res->owner, DLM_NODE_DEATH_WAIT_MAX); ret = DLM_RECOVERING; mlog(0, "node %u died so returning DLM_RECOVERING " diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 42b0bad7a61..0cd24cf5439 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) assert_spin_locked(&res->spinlock); stringify_lockname(res->lockname.name, res->lockname.len, - buf, sizeof(buf) - 1); + buf, sizeof(buf)); printk("lockres: %s, owner=%u, state=%u\n", buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 0334000676d..988c9055fd4 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, } /* Once the dlm ctxt is marked as leaving then we don't want - * to be put in someone's domain map. + * to be put in someone's domain map. * Also, explicitly disallow joining at certain troublesome * times (ie. during recovery). */ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 437698e9465..73333777267 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, } dlm_revert_pending_lock(res, lock); dlm_lock_put(lock); - } else if (dlm_is_recovery_lock(res->lockname.name, + } else if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { /* special case for the $RECOVERY lock. * there will never be an AST delivered to put diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 03ccf9a7b1f..a659606dcb9 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) struct dlm_master_list_entry *mle; assert_spin_locked(&dlm->spinlock); - + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { if (node_up) dlm_mle_node_up(dlm, mle, NULL, idx); @@ -833,7 +833,7 @@ lookup: __dlm_insert_mle(dlm, mle); /* still holding the dlm spinlock, check the recovery map - * to see if there are any nodes that still need to be + * to see if there are any nodes that still need to be * considered. these will not appear in the mle nodemap * but they might own this lockres. wait on them. */ bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); @@ -883,7 +883,7 @@ redo_request: msleep(500); } continue; - } + } dlm_kick_recovery_thread(dlm); msleep(1000); @@ -939,8 +939,8 @@ wait: res->lockname.name, blocked); if (++tries > 20) { mlog(ML_ERROR, "%s:%.*s: spinning on " - "dlm_wait_for_lock_mastery, blocked=%d\n", - dlm->name, res->lockname.len, + "dlm_wait_for_lock_mastery, blocked=%d\n", + dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); dlm_print_one_mle(mle); @@ -1029,7 +1029,7 @@ recheck: ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); b = (mle->type == DLM_MLE_BLOCK); if ((*blocked && !b) || (!*blocked && b)) { - mlog(0, "%s:%.*s: status change: old=%d new=%d\n", + mlog(0, "%s:%.*s: status change: old=%d new=%d\n", dlm->name, res->lockname.len, res->lockname.name, *blocked, b); *blocked = b; @@ -1602,7 +1602,7 @@ send_response: } mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", dlm->node_num, res->lockname.len, res->lockname.name); - ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, + ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, DLM_ASSERT_MASTER_MLE_CLEANUP); if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); @@ -1701,7 +1701,7 @@ again: if (r & DLM_ASSERT_RESPONSE_REASSERT) { mlog(0, "%.*s: node %u create mles on other " - "nodes and requests a re-assert\n", + "nodes and requests a re-assert\n", namelen, lockname, to); reassert = 1; } @@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&dlm->master_lock); spin_unlock(&dlm->spinlock); goto done; - } + } } } spin_unlock(&dlm->master_lock); @@ -1883,7 +1883,7 @@ ok: int extra_ref = 0; int nn = -1; int rr, err = 0; - + spin_lock(&mle->spinlock); if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) extra_ref = 1; @@ -1891,7 +1891,7 @@ ok: /* MASTER mle: if any bits set in the response map * then the calling node needs to re-assert to clear * up nodes that this node contacted */ - while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, + while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, nn+1)) < O2NM_MAX_NODES) { if (nn != dlm->node_num && nn != assert->node_idx) master_request = 1; @@ -2002,7 +2002,7 @@ kill: __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); - *ret_data = (void *)res; + *ret_data = (void *)res; dlm_put(dlm); return -EINVAL; } @@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, item->u.am.request_from = request_from; item->u.am.flags = flags; - if (ignore_higher) - mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, + if (ignore_higher) + mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, res->lockname.name); - + spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); @@ -2133,7 +2133,7 @@ put: * think that $RECOVERY is currently mastered by a dead node. If so, * we wait a short time to allow that node to get notified by its own * heartbeat stack, then check again. All $RECOVERY lock resources - * mastered by dead nodes are purged when the hearbeat callback is + * mastered by dead nodes are purged when the hearbeat callback is * fired, so we can know for sure that it is safe to continue once * the node returns a live node or no node. */ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, @@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, ret = -EAGAIN; } spin_unlock(&dlm->spinlock); - mlog(0, "%s: reco lock master is %u\n", dlm->name, + mlog(0, "%s: reco lock master is %u\n", dlm->name, master); break; } @@ -2602,7 +2602,7 @@ fail: mlog(0, "%s:%.*s: timed out during migration\n", dlm->name, res->lockname.len, res->lockname.name); - /* avoid hang during shutdown when migrating lockres + /* avoid hang during shutdown when migrating lockres * to a node which also goes down */ if (dlm_is_node_dead(dlm, target)) { mlog(0, "%s:%.*s: expected migration " @@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); spin_unlock(&res->spinlock); - /* target has died, so make the caller break out of the + /* target has died, so make the caller break out of the * wait_event, but caller must recheck the domain_map */ spin_lock(&dlm->spinlock); if (!test_bit(mig_target, dlm->domain_map)) diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 2f9e4e19a4f..b4f99de2caf 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -310,7 +310,7 @@ static int dlm_recovery_thread(void *data) mlog(0, "dlm thread running for %s...\n", dlm->name); while (!kthread_should_stop()) { - if (dlm_joined(dlm)) { + if (dlm_domain_fully_joined(dlm)) { status = dlm_do_recovery(dlm); if (status == -EAGAIN) { /* do not sleep, recheck immediately. */ @@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " - "node %u (%s)!\n", + "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); @@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, mres->master = master; } +static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, + struct dlm_migratable_lockres *mres, + int queue) +{ + if (!lock->lksb) + return; + + /* Ignore lvb in all locks in the blocked list */ + if (queue == DLM_BLOCKED_LIST) + return; + + /* Only consider lvbs in locks with granted EX or PR lock levels */ + if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) + return; + + if (dlm_lvb_is_empty(mres->lvb)) { + memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); + return; + } + + /* Ensure the lvb copied for migration matches in other valid locks */ + if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) + return; + + mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " + "node=%u\n", + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->lockres->lockname.len, lock->lockres->lockname.name, + lock->ml.node); + dlm_print_one_lock_resource(lock->lockres); + BUG(); +} /* returns 1 if this lock fills the network structure, * 0 otherwise */ @@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock, ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; - /* send our current lvb */ - if (ml->type == LKM_EXMODE || - ml->type == LKM_PRMODE) { - /* if it is already set, this had better be a PR - * and it has to match */ - if (!dlm_lvb_is_empty(mres->lvb) && - (ml->type == LKM_EXMODE || - memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { - mlog(ML_ERROR, "mismatched lvbs!\n"); - dlm_print_one_lock_resource(lock->lockres); - BUG(); - } - memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); - } + dlm_prepare_lvb_for_migration(lock, mres, queue); } ml->node = lock->ml.node; mres->num_locks++; @@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock *lock = NULL; u8 from = O2NM_MAX_NODES; unsigned int added = 0; + __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; i<mres->num_locks; i++) { @@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { - __be64 c = ml->cookie; - mlog(ML_ERROR, "could not find local lock " - "with cookie %u:%llu!\n", + c = ml->cookie; + mlog(ML_ERROR, "Could not find local lock " + "with cookie %u:%llu, node %u, " + "list %u, flags 0x%x, type %d, " + "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), - dlm_get_lock_cookie_seq(be64_to_cpu(c))); + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + ml->node, ml->list, ml->flags, ml->type, + ml->convert_type, ml->highest_blocked); + __dlm_print_one_lock_resource(res); + BUG(); + } + + if (lock->ml.node != ml->node) { + c = lock->ml.cookie; + mlog(ML_ERROR, "Mismatched node# in lock " + "cookie %u:%llu, name %.*s, node %u\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + res->lockname.len, res->lockname.name, + lock->ml.node); + c = ml->cookie; + mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " + "node %u, list %u, flags 0x%x, type %d, " + "conv %d, highest blocked %d\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + ml->node, ml->list, ml->flags, ml->type, + ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } - BUG_ON(lock->ml.node != ml->node); if (tmpq != queue) { - mlog(0, "lock was on %u instead of %u for %.*s\n", - j, ml->list, res->lockname.len, res->lockname.name); + c = ml->cookie; + mlog(0, "Lock cookie %u:%llu was on list %u " + "instead of list %u for %.*s\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c)), + j, ml->list, res->lockname.len, + res->lockname.name); + __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); continue; } @@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { - /* otherwise, the node is sending its + /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); @@ -1886,7 +1936,7 @@ skip_lvb: spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { - __be64 c = lock->ml.cookie; + c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, @@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) - /* if this node owned the lockres, and if the dead node + /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { @@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, /* this node is the lockres master: * 1) remove any stale locks for the dead node - * 2) if the dead node had an EX when he died, blank out the lvb + * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); @@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); - BUG_ON(!test_bit(dead_node, res->refmap)); + if(!test_bit(dead_node, res->refmap)) { + mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " + "but ref was not set\n", dlm->name, + res->lockname.len, res->lockname.name, freed, dead_node); + __dlm_print_one_lock_resource(res); + } dlm_lockres_clear_refmap_bit(dead_node, res); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " @@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) } spin_unlock(&res->spinlock); continue; - } + } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); @@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), - * or b) dlm->reco.new_master gets set to some nodenum + * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will @@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); -again: +again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, @@ -2437,8 +2492,8 @@ again: if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); - - /* got the EX lock. check to see if another node + + /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " @@ -2451,12 +2506,12 @@ again: /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { - status = -EINVAL; + status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " - "but no dead node!\n", + "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } @@ -2468,7 +2523,7 @@ again: * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " - "begin_reco now\n", dlm->name, + "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); @@ -2501,7 +2556,7 @@ again: mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on - * reco.new_master != O2NM_INVALID_NODE_NUM + * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), @@ -2589,7 +2644,13 @@ retry: "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } - if (ret == -EAGAIN) { + + /* + * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, + * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. + * We are handling both for compatibility reasons. + */ + if (ret == -EAGAIN || ret == EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, @@ -2599,7 +2660,7 @@ retry: } if (ret < 0) { struct dlm_lock_resource *res; - /* this is now a serious problem, possibly ENOMEM + /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " @@ -2612,7 +2673,7 @@ retry: } else { mlog(ML_ERROR, "recovery lock not found\n"); } - /* sleep for a bit in hopes that we can avoid + /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; @@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " - "node %u changing it to %u\n", dlm->name, + "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); @@ -2730,8 +2791,8 @@ stage2: if (ret < 0) { mlog_errno(ret); if (dlm_is_host_down(ret)) { - /* this has no effect on this recovery - * session, so set the status to zero to + /* this has no effect on this recovery + * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); @@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); - + spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 00f53b2aea7..49e29ecd020 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); - } else if (status == DLM_RECOVERING || - status == DLM_MIGRATING || + } else if (status == DLM_RECOVERING || + status == DLM_MIGRATING || status == DLM_FORWARD) { /* must clear the actions because this unlock * is about to be retried. cannot free or do @@ -661,14 +661,14 @@ retry: if (call_ast) { mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { - /* it is possible that there is one last bast + /* it is possible that there is one last bast * pending. make sure it is flushed, then * call the unlockast. * not an issue if this is a mastered remotely, * since this lock has been removed from the * lockres queues and cannot be found. */ dlm_kick_thread(dlm, NULL); - wait_event(dlm->ast_wq, + wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } (*unlockast)(data, status); diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile new file mode 100644 index 00000000000..df69b4856d0 --- /dev/null +++ b/fs/ocfs2/dlmfs/Makefile @@ -0,0 +1,5 @@ +EXTRA_CFLAGS += -Ifs/ocfs2 + +obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o + +ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 02bf17808bd..1b0de157a08 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -43,24 +43,17 @@ #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> +#include <linux/poll.h> #include <asm/uaccess.h> - -#include "cluster/nodemanager.h" -#include "cluster/heartbeat.h" -#include "cluster/tcp.h" - -#include "dlmapi.h" - +#include "stackglue.h" #include "userdlm.h" - #include "dlmfsver.h" #define MLOG_MASK_PREFIX ML_DLMFS #include "cluster/masklog.h" -#include "ocfs2_lockingver.h" static const struct super_operations dlmfs_ops; static const struct file_operations dlmfs_file_operations; @@ -71,15 +64,46 @@ static struct kmem_cache *dlmfs_inode_cache; struct workqueue_struct *user_dlm_worker; + + /* - * This is the userdlmfs locking protocol version. + * These are the ABI capabilities of dlmfs. + * + * Over time, dlmfs has added some features that were not part of the + * initial ABI. Unfortunately, some of these features are not detectable + * via standard usage. For example, Linux's default poll always returns + * POLLIN, so there is no way for a caller of poll(2) to know when dlmfs + * added poll support. Instead, we provide this list of new capabilities. + * + * Capabilities is a read-only attribute. We do it as a module parameter + * so we can discover it whether dlmfs is built in, loaded, or even not + * loaded. * - * See fs/ocfs2/dlmglue.c for more details on locking versions. + * The ABI features are local to this machine's dlmfs mount. This is + * distinct from the locking protocol, which is concerned with inter-node + * interaction. + * + * Capabilities: + * - bast : POLLIN against the file descriptor of a held lock + * signifies a bast fired on the lock. */ -static const struct dlm_protocol_version user_locking_protocol = { - .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, - .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, -}; +#define DLMFS_CAPABILITIES "bast stackglue" +extern int param_set_dlmfs_capabilities(const char *val, + struct kernel_param *kp) +{ + printk(KERN_ERR "%s: readonly parameter\n", kp->name); + return -EINVAL; +} +static int param_get_dlmfs_capabilities(char *buffer, + struct kernel_param *kp) +{ + return strlcpy(buffer, DLMFS_CAPABILITIES, + strlen(DLMFS_CAPABILITIES) + 1); +} +module_param_call(capabilities, param_set_dlmfs_capabilities, + param_get_dlmfs_capabilities, NULL, 0444); +MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES); + /* * decodes a set of open flags into a valid lock level and a set of flags. @@ -179,13 +203,46 @@ static int dlmfs_file_release(struct inode *inode, return 0; } +/* + * We do ->setattr() just to override size changes. Our size is the size + * of the LVB and nothing else. + */ +static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr) +{ + int error; + struct inode *inode = dentry->d_inode; + + attr->ia_valid &= ~ATTR_SIZE; + error = inode_change_ok(inode, attr); + if (!error) + error = inode_setattr(inode, attr); + + return error; +} + +static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait) +{ + int event = 0; + struct inode *inode = file->f_path.dentry->d_inode; + struct dlmfs_inode_private *ip = DLMFS_I(inode); + + poll_wait(file, &ip->ip_lockres.l_event, wait); + + spin_lock(&ip->ip_lockres.l_lock); + if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED) + event = POLLIN | POLLRDNORM; + spin_unlock(&ip->ip_lockres.l_lock); + + return event; +} + static ssize_t dlmfs_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { int bytes_left; - ssize_t readlen; + ssize_t readlen, got; char *lvb_buf; struct inode *inode = filp->f_path.dentry->d_inode; @@ -211,9 +268,13 @@ static ssize_t dlmfs_file_read(struct file *filp, if (!lvb_buf) return -ENOMEM; - user_dlm_read_lvb(inode, lvb_buf, readlen); - bytes_left = __copy_to_user(buf, lvb_buf, readlen); - readlen -= bytes_left; + got = user_dlm_read_lvb(inode, lvb_buf, readlen); + if (got) { + BUG_ON(got != readlen); + bytes_left = __copy_to_user(buf, lvb_buf, readlen); + readlen -= bytes_left; + } else + readlen = 0; kfree(lvb_buf); @@ -272,7 +333,7 @@ static void dlmfs_init_once(void *foo) struct dlmfs_inode_private *ip = (struct dlmfs_inode_private *) foo; - ip->ip_dlm = NULL; + ip->ip_conn = NULL; ip->ip_parent = NULL; inode_init_once(&ip->ip_vfs_inode); @@ -314,14 +375,14 @@ static void dlmfs_clear_inode(struct inode *inode) goto clear_fields; } - mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm); + mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); /* we must be a directory. If required, lets unregister the * dlm context now. */ - if (ip->ip_dlm) - user_dlm_unregister_context(ip->ip_dlm); + if (ip->ip_conn) + user_dlm_unregister(ip->ip_conn); clear_fields: ip->ip_parent = NULL; - ip->ip_dlm = NULL; + ip->ip_conn = NULL; } static struct backing_dev_info dlmfs_backing_dev_info = { @@ -371,7 +432,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent, inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ip = DLMFS_I(inode); - ip->ip_dlm = DLMFS_I(parent)->ip_dlm; + ip->ip_conn = DLMFS_I(parent)->ip_conn; switch (mode & S_IFMT) { default: @@ -425,13 +486,12 @@ static int dlmfs_mkdir(struct inode * dir, struct inode *inode = NULL; struct qstr *domain = &dentry->d_name; struct dlmfs_inode_private *ip; - struct dlm_ctxt *dlm; - struct dlm_protocol_version proto = user_locking_protocol; + struct ocfs2_cluster_connection *conn; mlog(0, "mkdir %.*s\n", domain->len, domain->name); /* verify that we have a proper domain */ - if (domain->len >= O2NM_MAX_NAME_LEN) { + if (domain->len >= GROUP_NAME_MAX) { status = -EINVAL; mlog(ML_ERROR, "invalid domain name for directory.\n"); goto bail; @@ -446,14 +506,14 @@ static int dlmfs_mkdir(struct inode * dir, ip = DLMFS_I(inode); - dlm = user_dlm_register_context(domain, &proto); - if (IS_ERR(dlm)) { - status = PTR_ERR(dlm); + conn = user_dlm_register(domain); + if (IS_ERR(conn)) { + status = PTR_ERR(conn); mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", status, domain->len, domain->name); goto bail; } - ip->ip_dlm = dlm; + ip->ip_conn = conn; inc_nlink(dir); d_instantiate(dentry, inode); @@ -549,6 +609,7 @@ static int dlmfs_fill_super(struct super_block * sb, static const struct file_operations dlmfs_file_operations = { .open = dlmfs_file_open, .release = dlmfs_file_release, + .poll = dlmfs_file_poll, .read = dlmfs_file_read, .write = dlmfs_file_write, }; @@ -576,6 +637,7 @@ static const struct super_operations dlmfs_ops = { static const struct inode_operations dlmfs_file_inode_operations = { .getattr = simple_getattr, + .setattr = dlmfs_file_setattr, }; static int dlmfs_get_sb(struct file_system_type *fs_type, @@ -620,6 +682,7 @@ static int __init init_dlmfs_fs(void) } cleanup_worker = 1; + user_dlm_set_locking_protocol(); status = register_filesystem(&dlmfs_fs_type); bail: if (status) { diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlmfs/dlmfsver.c index a733b3321f8..a733b3321f8 100644 --- a/fs/ocfs2/dlm/dlmfsver.c +++ b/fs/ocfs2/dlmfs/dlmfsver.c diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlmfs/dlmfsver.h index f35eadbed25..f35eadbed25 100644 --- a/fs/ocfs2/dlm/dlmfsver.h +++ b/fs/ocfs2/dlmfs/dlmfsver.h diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index 4cb1d3dae25..0499e3fb7bd 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -34,18 +34,19 @@ #include <linux/types.h> #include <linux/crc32.h> - -#include "cluster/nodemanager.h" -#include "cluster/heartbeat.h" -#include "cluster/tcp.h" - -#include "dlmapi.h" - +#include "ocfs2_lockingver.h" +#include "stackglue.h" #include "userdlm.h" #define MLOG_MASK_PREFIX ML_DLMFS #include "cluster/masklog.h" + +static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb) +{ + return container_of(lksb, struct user_lock_res, l_lksb); +} + static inline int user_check_wait_flag(struct user_lock_res *lockres, int flag) { @@ -73,15 +74,15 @@ static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) } /* I heart container_of... */ -static inline struct dlm_ctxt * -dlm_ctxt_from_user_lockres(struct user_lock_res *lockres) +static inline struct ocfs2_cluster_connection * +cluster_connection_from_user_lockres(struct user_lock_res *lockres) { struct dlmfs_inode_private *ip; ip = container_of(lockres, struct dlmfs_inode_private, ip_lockres); - return ip->ip_dlm; + return ip->ip_conn; } static struct inode * @@ -103,9 +104,9 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) } #define user_log_dlm_error(_func, _stat, _lockres) do { \ - mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ - "resource %.*s: %s\n", dlm_errname(_stat), _func, \ - _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ + mlog(ML_ERROR, "Dlm error %d while calling %s on " \ + "resource %.*s\n", _stat, _func, \ + _lockres->l_namelen, _lockres->l_name); \ } while (0) /* WARNING: This function lives in a world where the only three lock @@ -113,34 +114,35 @@ static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) * lock types are added. */ static inline int user_highest_compat_lock_level(int level) { - int new_level = LKM_EXMODE; + int new_level = DLM_LOCK_EX; - if (level == LKM_EXMODE) - new_level = LKM_NLMODE; - else if (level == LKM_PRMODE) - new_level = LKM_PRMODE; + if (level == DLM_LOCK_EX) + new_level = DLM_LOCK_NL; + else if (level == DLM_LOCK_PR) + new_level = DLM_LOCK_PR; return new_level; } -static void user_ast(void *opaque) +static void user_ast(struct ocfs2_dlm_lksb *lksb) { - struct user_lock_res *lockres = opaque; - struct dlm_lockstatus *lksb; + struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); + int status; - mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, - lockres->l_name); + mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n", + lockres->l_namelen, lockres->l_name, lockres->l_level, + lockres->l_requested); spin_lock(&lockres->l_lock); - lksb = &(lockres->l_lksb); - if (lksb->status != DLM_NORMAL) { + status = ocfs2_dlm_lock_status(&lockres->l_lksb); + if (status) { mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", - lksb->status, lockres->l_namelen, lockres->l_name); + status, lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); return; } - mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, + mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV, "Lockres %.*s, requested ivmode. flags 0x%x\n", lockres->l_namelen, lockres->l_name, lockres->l_flags); @@ -148,13 +150,13 @@ static void user_ast(void *opaque) if (lockres->l_requested < lockres->l_level) { if (lockres->l_requested <= user_highest_compat_lock_level(lockres->l_blocking)) { - lockres->l_blocking = LKM_NLMODE; + lockres->l_blocking = DLM_LOCK_NL; lockres->l_flags &= ~USER_LOCK_BLOCKED; } } lockres->l_level = lockres->l_requested; - lockres->l_requested = LKM_IVMODE; + lockres->l_requested = DLM_LOCK_IV; lockres->l_flags |= USER_LOCK_ATTACHED; lockres->l_flags &= ~USER_LOCK_BUSY; @@ -193,11 +195,11 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) return; switch (lockres->l_blocking) { - case LKM_EXMODE: + case DLM_LOCK_EX: if (!lockres->l_ex_holders && !lockres->l_ro_holders) queue = 1; break; - case LKM_PRMODE: + case DLM_LOCK_PR: if (!lockres->l_ex_holders) queue = 1; break; @@ -209,12 +211,12 @@ static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) __user_dlm_queue_lockres(lockres); } -static void user_bast(void *opaque, int level) +static void user_bast(struct ocfs2_dlm_lksb *lksb, int level) { - struct user_lock_res *lockres = opaque; + struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); - mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", - lockres->l_namelen, lockres->l_name, level); + mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n", + lockres->l_namelen, lockres->l_name, level, lockres->l_level); spin_lock(&lockres->l_lock); lockres->l_flags |= USER_LOCK_BLOCKED; @@ -227,15 +229,15 @@ static void user_bast(void *opaque, int level) wake_up(&lockres->l_event); } -static void user_unlock_ast(void *opaque, enum dlm_status status) +static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status) { - struct user_lock_res *lockres = opaque; + struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); - mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, - lockres->l_name); + mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n", + lockres->l_namelen, lockres->l_name, lockres->l_flags); - if (status != DLM_NORMAL && status != DLM_CANCELGRANT) - mlog(ML_ERROR, "Dlm returns status %d\n", status); + if (status) + mlog(ML_ERROR, "dlm returns status %d\n", status); spin_lock(&lockres->l_lock); /* The teardown flag gets set early during the unlock process, @@ -243,7 +245,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) * for a concurrent cancel. */ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { - lockres->l_level = LKM_IVMODE; + lockres->l_level = DLM_LOCK_IV; } else if (status == DLM_CANCELGRANT) { /* We tried to cancel a convert request, but it was * already granted. Don't clear the busy flag - the @@ -254,7 +256,7 @@ static void user_unlock_ast(void *opaque, enum dlm_status status) } else { BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); /* Cancel succeeded, we want to re-queue */ - lockres->l_requested = LKM_IVMODE; /* cancel an + lockres->l_requested = DLM_LOCK_IV; /* cancel an * upconvert * request. */ lockres->l_flags &= ~USER_LOCK_IN_CANCEL; @@ -271,6 +273,21 @@ out_noclear: wake_up(&lockres->l_event); } +/* + * This is the userdlmfs locking protocol version. + * + * See fs/ocfs2/dlmglue.c for more details on locking versions. + */ +static struct ocfs2_locking_protocol user_dlm_lproto = { + .lp_max_version = { + .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, + .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, + }, + .lp_lock_ast = user_ast, + .lp_blocking_ast = user_bast, + .lp_unlock_ast = user_unlock_ast, +}; + static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) { struct inode *inode; @@ -283,10 +300,10 @@ static void user_dlm_unblock_lock(struct work_struct *work) int new_level, status; struct user_lock_res *lockres = container_of(work, struct user_lock_res, l_work); - struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + struct ocfs2_cluster_connection *conn = + cluster_connection_from_user_lockres(lockres); - mlog(0, "processing lockres %.*s\n", lockres->l_namelen, - lockres->l_name); + mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); spin_lock(&lockres->l_lock); @@ -304,17 +321,23 @@ static void user_dlm_unblock_lock(struct work_struct *work) * flag, and finally we might get another bast which re-queues * us before our ast for the downconvert is called. */ if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { + mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n", + lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { + mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n", + lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_BUSY) { if (lockres->l_flags & USER_LOCK_IN_CANCEL) { + mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n", + lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } @@ -322,32 +345,31 @@ static void user_dlm_unblock_lock(struct work_struct *work) lockres->l_flags |= USER_LOCK_IN_CANCEL; spin_unlock(&lockres->l_lock); - status = dlmunlock(dlm, - &lockres->l_lksb, - LKM_CANCEL, - user_unlock_ast, - lockres); - if (status != DLM_NORMAL) - user_log_dlm_error("dlmunlock", status, lockres); + status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, + DLM_LKF_CANCEL); + if (status) + user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); goto drop_ref; } /* If there are still incompat holders, we can exit safely * without worrying about re-queueing this lock as that will * happen on the last call to user_cluster_unlock. */ - if ((lockres->l_blocking == LKM_EXMODE) + if ((lockres->l_blocking == DLM_LOCK_EX) && (lockres->l_ex_holders || lockres->l_ro_holders)) { spin_unlock(&lockres->l_lock); - mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n", - lockres->l_ro_holders, lockres->l_ex_holders); + mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n", + lockres->l_namelen, lockres->l_name, + lockres->l_ex_holders, lockres->l_ro_holders); goto drop_ref; } - if ((lockres->l_blocking == LKM_PRMODE) + if ((lockres->l_blocking == DLM_LOCK_PR) && lockres->l_ex_holders) { spin_unlock(&lockres->l_lock); - mlog(0, "can't downconvert for pr: ex = %u\n", - lockres->l_ex_holders); + mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n", + lockres->l_namelen, lockres->l_name, + lockres->l_ex_holders); goto drop_ref; } @@ -355,22 +377,17 @@ static void user_dlm_unblock_lock(struct work_struct *work) new_level = user_highest_compat_lock_level(lockres->l_blocking); lockres->l_requested = new_level; lockres->l_flags |= USER_LOCK_BUSY; - mlog(0, "Downconvert lock from %d to %d\n", - lockres->l_level, new_level); + mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n", + lockres->l_namelen, lockres->l_name, lockres->l_level, new_level); spin_unlock(&lockres->l_lock); /* need lock downconvert request now... */ - status = dlmlock(dlm, - new_level, - &lockres->l_lksb, - LKM_CONVERT|LKM_VALBLK, - lockres->l_name, - lockres->l_namelen, - user_ast, - lockres, - user_bast); - if (status != DLM_NORMAL) { - user_log_dlm_error("dlmlock", status, lockres); + status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb, + DLM_LKF_CONVERT|DLM_LKF_VALBLK, + lockres->l_name, + lockres->l_namelen); + if (status) { + user_log_dlm_error("ocfs2_dlm_lock", status, lockres); user_recover_from_dlm_error(lockres); } @@ -382,10 +399,10 @@ static inline void user_dlm_inc_holders(struct user_lock_res *lockres, int level) { switch(level) { - case LKM_EXMODE: + case DLM_LOCK_EX: lockres->l_ex_holders++; break; - case LKM_PRMODE: + case DLM_LOCK_PR: lockres->l_ro_holders++; break; default: @@ -410,20 +427,19 @@ int user_dlm_cluster_lock(struct user_lock_res *lockres, int lkm_flags) { int status, local_flags; - struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + struct ocfs2_cluster_connection *conn = + cluster_connection_from_user_lockres(lockres); - if (level != LKM_EXMODE && - level != LKM_PRMODE) { + if (level != DLM_LOCK_EX && + level != DLM_LOCK_PR) { mlog(ML_ERROR, "lockres %.*s: invalid request!\n", lockres->l_namelen, lockres->l_name); status = -EINVAL; goto bail; } - mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", - lockres->l_namelen, lockres->l_name, - (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", - lkm_flags); + mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n", + lockres->l_namelen, lockres->l_name, level, lkm_flags); again: if (signal_pending(current)) { @@ -457,35 +473,26 @@ again: } if (level > lockres->l_level) { - local_flags = lkm_flags | LKM_VALBLK; - if (lockres->l_level != LKM_IVMODE) - local_flags |= LKM_CONVERT; + local_flags = lkm_flags | DLM_LKF_VALBLK; + if (lockres->l_level != DLM_LOCK_IV) + local_flags |= DLM_LKF_CONVERT; lockres->l_requested = level; lockres->l_flags |= USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); - BUG_ON(level == LKM_IVMODE); - BUG_ON(level == LKM_NLMODE); + BUG_ON(level == DLM_LOCK_IV); + BUG_ON(level == DLM_LOCK_NL); /* call dlm_lock to upgrade lock now */ - status = dlmlock(dlm, - level, - &lockres->l_lksb, - local_flags, - lockres->l_name, - lockres->l_namelen, - user_ast, - lockres, - user_bast); - if (status != DLM_NORMAL) { - if ((lkm_flags & LKM_NOQUEUE) && - (status == DLM_NOTQUEUED)) - status = -EAGAIN; - else { - user_log_dlm_error("dlmlock", status, lockres); - status = -EINVAL; - } + status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb, + local_flags, lockres->l_name, + lockres->l_namelen); + if (status) { + if ((lkm_flags & DLM_LKF_NOQUEUE) && + (status != -EAGAIN)) + user_log_dlm_error("ocfs2_dlm_lock", + status, lockres); user_recover_from_dlm_error(lockres); goto bail; } @@ -506,11 +513,11 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres, int level) { switch(level) { - case LKM_EXMODE: + case DLM_LOCK_EX: BUG_ON(!lockres->l_ex_holders); lockres->l_ex_holders--; break; - case LKM_PRMODE: + case DLM_LOCK_PR: BUG_ON(!lockres->l_ro_holders); lockres->l_ro_holders--; break; @@ -522,8 +529,8 @@ static inline void user_dlm_dec_holders(struct user_lock_res *lockres, void user_dlm_cluster_unlock(struct user_lock_res *lockres, int level) { - if (level != LKM_EXMODE && - level != LKM_PRMODE) { + if (level != DLM_LOCK_EX && + level != DLM_LOCK_PR) { mlog(ML_ERROR, "lockres %.*s: invalid request!\n", lockres->l_namelen, lockres->l_name); return; @@ -540,33 +547,40 @@ void user_dlm_write_lvb(struct inode *inode, unsigned int len) { struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; - char *lvb = lockres->l_lksb.lvb; + char *lvb; BUG_ON(len > DLM_LVB_LEN); spin_lock(&lockres->l_lock); - BUG_ON(lockres->l_level < LKM_EXMODE); + BUG_ON(lockres->l_level < DLM_LOCK_EX); + lvb = ocfs2_dlm_lvb(&lockres->l_lksb); memcpy(lvb, val, len); spin_unlock(&lockres->l_lock); } -void user_dlm_read_lvb(struct inode *inode, - char *val, - unsigned int len) +ssize_t user_dlm_read_lvb(struct inode *inode, + char *val, + unsigned int len) { struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; - char *lvb = lockres->l_lksb.lvb; + char *lvb; + ssize_t ret = len; BUG_ON(len > DLM_LVB_LEN); spin_lock(&lockres->l_lock); - BUG_ON(lockres->l_level < LKM_PRMODE); - memcpy(val, lvb, len); + BUG_ON(lockres->l_level < DLM_LOCK_PR); + if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) { + lvb = ocfs2_dlm_lvb(&lockres->l_lksb); + memcpy(val, lvb, len); + } else + ret = 0; spin_unlock(&lockres->l_lock); + return ret; } void user_dlm_lock_res_init(struct user_lock_res *lockres, @@ -576,9 +590,9 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres, spin_lock_init(&lockres->l_lock); init_waitqueue_head(&lockres->l_event); - lockres->l_level = LKM_IVMODE; - lockres->l_requested = LKM_IVMODE; - lockres->l_blocking = LKM_IVMODE; + lockres->l_level = DLM_LOCK_IV; + lockres->l_requested = DLM_LOCK_IV; + lockres->l_blocking = DLM_LOCK_IV; /* should have been checked before getting here. */ BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN); @@ -592,9 +606,10 @@ void user_dlm_lock_res_init(struct user_lock_res *lockres, int user_dlm_destroy_lock(struct user_lock_res *lockres) { int status = -EBUSY; - struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + struct ocfs2_cluster_connection *conn = + cluster_connection_from_user_lockres(lockres); - mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name); + mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); spin_lock(&lockres->l_lock); if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { @@ -627,14 +642,9 @@ int user_dlm_destroy_lock(struct user_lock_res *lockres) lockres->l_flags |= USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); - status = dlmunlock(dlm, - &lockres->l_lksb, - LKM_VALBLK, - user_unlock_ast, - lockres); - if (status != DLM_NORMAL) { - user_log_dlm_error("dlmunlock", status, lockres); - status = -EINVAL; + status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK); + if (status) { + user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); goto bail; } @@ -645,32 +655,34 @@ bail: return status; } -struct dlm_ctxt *user_dlm_register_context(struct qstr *name, - struct dlm_protocol_version *proto) +static void user_dlm_recovery_handler_noop(int node_num, + void *recovery_data) { - struct dlm_ctxt *dlm; - u32 dlm_key; - char *domain; - - domain = kmalloc(name->len + 1, GFP_NOFS); - if (!domain) { - mlog_errno(-ENOMEM); - return ERR_PTR(-ENOMEM); - } + /* We ignore recovery events */ + return; +} - dlm_key = crc32_le(0, name->name, name->len); +void user_dlm_set_locking_protocol(void) +{ + ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version); +} - snprintf(domain, name->len + 1, "%.*s", name->len, name->name); +struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name) +{ + int rc; + struct ocfs2_cluster_connection *conn; - dlm = dlm_register_domain(domain, dlm_key, proto); - if (IS_ERR(dlm)) - mlog_errno(PTR_ERR(dlm)); + rc = ocfs2_cluster_connect_agnostic(name->name, name->len, + &user_dlm_lproto, + user_dlm_recovery_handler_noop, + NULL, &conn); + if (rc) + mlog_errno(rc); - kfree(domain); - return dlm; + return rc ? ERR_PTR(rc) : conn; } -void user_dlm_unregister_context(struct dlm_ctxt *dlm) +void user_dlm_unregister(struct ocfs2_cluster_connection *conn) { - dlm_unregister_domain(dlm); + ocfs2_cluster_disconnect(conn, 0); } diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h index 0c3cc03c61f..3b42d79531d 100644 --- a/fs/ocfs2/dlm/userdlm.h +++ b/fs/ocfs2/dlmfs/userdlm.h @@ -57,7 +57,7 @@ struct user_lock_res { int l_level; unsigned int l_ro_holders; unsigned int l_ex_holders; - struct dlm_lockstatus l_lksb; + struct ocfs2_dlm_lksb l_lksb; int l_requested; int l_blocking; @@ -80,15 +80,15 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres, void user_dlm_write_lvb(struct inode *inode, const char *val, unsigned int len); -void user_dlm_read_lvb(struct inode *inode, - char *val, - unsigned int len); -struct dlm_ctxt *user_dlm_register_context(struct qstr *name, - struct dlm_protocol_version *proto); -void user_dlm_unregister_context(struct dlm_ctxt *dlm); +ssize_t user_dlm_read_lvb(struct inode *inode, + char *val, + unsigned int len); +struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name); +void user_dlm_unregister(struct ocfs2_cluster_connection *conn); +void user_dlm_set_locking_protocol(void); struct dlmfs_inode_private { - struct dlm_ctxt *ip_dlm; + struct ocfs2_cluster_connection *ip_conn; struct user_lock_res ip_lockres; /* unused for directories. */ struct inode *ip_parent; diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index c5e4a49e3a1..8298608d416 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -297,6 +297,11 @@ static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) lockres->l_type == OCFS2_LOCK_TYPE_OPEN; } +static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb) +{ + return container_of(lksb, struct ocfs2_lock_res, l_lksb); +} + static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) { BUG_ON(!ocfs2_is_inode_lock(lockres)); @@ -875,6 +880,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); lockres->l_level = lockres->l_requested; + + /* + * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing + * the OCFS2_LOCK_BUSY flag to prevent the dc thread from + * downconverting the lock before the upconvert has fully completed. + */ + lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); mlog_exit_void(); @@ -907,8 +920,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, assert_spin_locked(&lockres->l_lock); - lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); - if (level > lockres->l_blocking) { /* only schedule a downconvert if we haven't already scheduled * one that goes low enough to satisfy the level we're @@ -921,6 +932,13 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, lockres->l_blocking = level; } + mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n", + lockres->l_name, level, lockres->l_level, lockres->l_blocking, + needs_downconvert); + + if (needs_downconvert) + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + mlog_exit(needs_downconvert); return needs_downconvert; } @@ -1031,18 +1049,17 @@ static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres) return lockres->l_pending_gen; } - -static void ocfs2_blocking_ast(void *opaque, int level) +static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level) { - struct ocfs2_lock_res *lockres = opaque; + struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); int needs_downconvert; unsigned long flags; BUG_ON(level <= DLM_LOCK_NL); - mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n", - lockres->l_name, level, lockres->l_level, + mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, " + "type %s\n", lockres->l_name, level, lockres->l_level, ocfs2_lock_type_string(lockres->l_type)); /* @@ -1063,9 +1080,9 @@ static void ocfs2_blocking_ast(void *opaque, int level) ocfs2_wake_downconvert_thread(osb); } -static void ocfs2_locking_ast(void *opaque) +static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb) { - struct ocfs2_lock_res *lockres = opaque; + struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); unsigned long flags; int status; @@ -1086,6 +1103,10 @@ static void ocfs2_locking_ast(void *opaque) return; } + mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, " + "level %d => %d\n", lockres->l_name, lockres->l_action, + lockres->l_unlock_action, lockres->l_level, lockres->l_requested); + switch(lockres->l_action) { case OCFS2_AST_ATTACH: ocfs2_generic_handle_attach_action(lockres); @@ -1098,8 +1119,8 @@ static void ocfs2_locking_ast(void *opaque) ocfs2_generic_handle_downconvert_action(lockres); break; default: - mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u " - "lockres flags = 0x%lx, unlock action: %u\n", + mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, " + "flags 0x%lx, unlock: %u\n", lockres->l_name, lockres->l_action, lockres->l_flags, lockres->l_unlock_action); BUG(); @@ -1125,6 +1146,88 @@ out: spin_unlock_irqrestore(&lockres->l_lock, flags); } +static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error) +{ + struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb); + unsigned long flags; + + mlog_entry_void(); + + mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n", + lockres->l_name, lockres->l_unlock_action); + + spin_lock_irqsave(&lockres->l_lock, flags); + if (error) { + mlog(ML_ERROR, "Dlm passes error %d for lock %s, " + "unlock_action %d\n", error, lockres->l_name, + lockres->l_unlock_action); + spin_unlock_irqrestore(&lockres->l_lock, flags); + mlog_exit_void(); + return; + } + + switch(lockres->l_unlock_action) { + case OCFS2_UNLOCK_CANCEL_CONVERT: + mlog(0, "Cancel convert success for %s\n", lockres->l_name); + lockres->l_action = OCFS2_AST_INVALID; + /* Downconvert thread may have requeued this lock, we + * need to wake it. */ + if (lockres->l_flags & OCFS2_LOCK_BLOCKED) + ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres)); + break; + case OCFS2_UNLOCK_DROP_LOCK: + lockres->l_level = DLM_LOCK_IV; + break; + default: + BUG(); + } + + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; + wake_up(&lockres->l_event); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + mlog_exit_void(); +} + +/* + * This is the filesystem locking protocol. It provides the lock handling + * hooks for the underlying DLM. It has a maximum version number. + * The version number allows interoperability with systems running at + * the same major number and an equal or smaller minor number. + * + * Whenever the filesystem does new things with locks (adds or removes a + * lock, orders them differently, does different things underneath a lock), + * the version must be changed. The protocol is negotiated when joining + * the dlm domain. A node may join the domain if its major version is + * identical to all other nodes and its minor version is greater than + * or equal to all other nodes. When its minor version is greater than + * the other nodes, it will run at the minor version specified by the + * other nodes. + * + * If a locking change is made that will not be compatible with older + * versions, the major number must be increased and the minor version set + * to zero. If a change merely adds a behavior that can be disabled when + * speaking to older versions, the minor version must be increased. If a + * change adds a fully backwards compatible change (eg, LVB changes that + * are just ignored by older versions), the version does not need to be + * updated. + */ +static struct ocfs2_locking_protocol lproto = { + .lp_max_version = { + .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, + .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, + }, + .lp_lock_ast = ocfs2_locking_ast, + .lp_blocking_ast = ocfs2_blocking_ast, + .lp_unlock_ast = ocfs2_unlock_ast, +}; + +void ocfs2_set_locking_protocol(void) +{ + ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version); +} + static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, int convert) { @@ -1133,6 +1236,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, mlog_entry_void(); spin_lock_irqsave(&lockres->l_lock, flags); lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); if (convert) lockres->l_action = OCFS2_AST_INVALID; else @@ -1179,8 +1283,7 @@ static int ocfs2_lock_create(struct ocfs2_super *osb, &lockres->l_lksb, dlm_flags, lockres->l_name, - OCFS2_LOCK_ID_MAX_LEN - 1, - lockres); + OCFS2_LOCK_ID_MAX_LEN - 1); lockres_clear_pending(lockres, gen, osb); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); @@ -1323,13 +1426,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb, again: wait = 0; + spin_lock_irqsave(&lockres->l_lock, flags); + if (catch_signals && signal_pending(current)) { ret = -ERESTARTSYS; - goto out; + goto unlock; } - spin_lock_irqsave(&lockres->l_lock, flags); - mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, "Cluster lock called on freeing lockres %s! flags " "0x%lx\n", lockres->l_name, lockres->l_flags); @@ -1346,6 +1449,25 @@ again: goto unlock; } + if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) { + /* + * We've upconverted. If the lock now has a level we can + * work with, we take it. If, however, the lock is not at the + * required level, we go thru the full cycle. One way this could + * happen is if a process requesting an upconvert to PR is + * closely followed by another requesting upconvert to an EX. + * If the process requesting EX lands here, we want it to + * continue attempting to upconvert and let the process + * requesting PR take the lock. + * If multiple processes request upconvert to PR, the first one + * here will take the lock. The others will have to go thru the + * OCFS2_LOCK_BLOCKED check to ensure that there is no pending + * downconvert request. + */ + if (level <= lockres->l_level) + goto update_holders; + } + if (lockres->l_flags & OCFS2_LOCK_BLOCKED && !ocfs2_may_continue_on_blocked_lock(lockres, level)) { /* is the lock is currently blocked on behalf of @@ -1383,7 +1505,7 @@ again: BUG_ON(level == DLM_LOCK_IV); BUG_ON(level == DLM_LOCK_NL); - mlog(0, "lock %s, convert from %d to level = %d\n", + mlog(ML_BASTS, "lockres %s, convert from %d to %d\n", lockres->l_name, lockres->l_level, level); /* call dlm_lock to upgrade lock now */ @@ -1392,8 +1514,7 @@ again: &lockres->l_lksb, lkm_flags, lockres->l_name, - OCFS2_LOCK_ID_MAX_LEN - 1, - lockres); + OCFS2_LOCK_ID_MAX_LEN - 1); lockres_clear_pending(lockres, gen, osb); if (ret) { if (!(lkm_flags & DLM_LKF_NOQUEUE) || @@ -1416,11 +1537,14 @@ again: goto again; } +update_holders: /* Ok, if we get here then we're good to go. */ ocfs2_inc_holders(lockres, level); ret = 0; unlock: + lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING); + spin_unlock_irqrestore(&lockres->l_lock, flags); out: /* @@ -1827,8 +1951,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock) spin_unlock_irqrestore(&lockres->l_lock, flags); ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags, - lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1, - lockres); + lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1); if (ret) { if (!trylock || (ret != -EAGAIN)) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); @@ -2957,7 +3080,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) status = ocfs2_cluster_connect(osb->osb_cluster_stack, osb->uuid_str, strlen(osb->uuid_str), - ocfs2_do_node_down, osb, + &lproto, ocfs2_do_node_down, osb, &conn); if (status) { mlog_errno(status); @@ -3024,50 +3147,6 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb, mlog_exit_void(); } -static void ocfs2_unlock_ast(void *opaque, int error) -{ - struct ocfs2_lock_res *lockres = opaque; - unsigned long flags; - - mlog_entry_void(); - - mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name, - lockres->l_unlock_action); - - spin_lock_irqsave(&lockres->l_lock, flags); - if (error) { - mlog(ML_ERROR, "Dlm passes error %d for lock %s, " - "unlock_action %d\n", error, lockres->l_name, - lockres->l_unlock_action); - spin_unlock_irqrestore(&lockres->l_lock, flags); - mlog_exit_void(); - return; - } - - switch(lockres->l_unlock_action) { - case OCFS2_UNLOCK_CANCEL_CONVERT: - mlog(0, "Cancel convert success for %s\n", lockres->l_name); - lockres->l_action = OCFS2_AST_INVALID; - /* Downconvert thread may have requeued this lock, we - * need to wake it. */ - if (lockres->l_flags & OCFS2_LOCK_BLOCKED) - ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres)); - break; - case OCFS2_UNLOCK_DROP_LOCK: - lockres->l_level = DLM_LOCK_IV; - break; - default: - BUG(); - } - - lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); - lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; - wake_up(&lockres->l_event); - spin_unlock_irqrestore(&lockres->l_lock, flags); - - mlog_exit_void(); -} - static int ocfs2_drop_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { @@ -3135,8 +3214,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb, mlog(0, "lock %s\n", lockres->l_name); - ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags, - lockres); + ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags); @@ -3155,7 +3233,7 @@ out: /* Mark the lockres as being dropped. It will no longer be * queued if blocking, but we still may have to wait on it * being dequeued from the downconvert thread before we can consider - * it safe to drop. + * it safe to drop. * * You can *not* attempt to call cluster_lock on this lockres anymore. */ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) @@ -3244,13 +3322,20 @@ static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); if (lockres->l_level <= new_level) { - mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n", - lockres->l_level, new_level); + mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, " + "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, " + "block %d, pgen %d\n", lockres->l_name, lockres->l_level, + new_level, list_empty(&lockres->l_blocked_list), + list_empty(&lockres->l_mask_waiters), lockres->l_type, + lockres->l_flags, lockres->l_ro_holders, + lockres->l_ex_holders, lockres->l_action, + lockres->l_unlock_action, lockres->l_requested, + lockres->l_blocking, lockres->l_pending_gen); BUG(); } - mlog(0, "lock %s, new_level = %d, l_blocking = %d\n", - lockres->l_name, new_level, lockres->l_blocking); + mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n", + lockres->l_name, lockres->l_level, new_level, lockres->l_blocking); lockres->l_action = OCFS2_AST_DOWNCONVERT; lockres->l_requested = new_level; @@ -3269,6 +3354,9 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, mlog_entry_void(); + mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, + lockres->l_level, new_level); + if (lvb) dlm_flags |= DLM_LKF_VALBLK; @@ -3277,8 +3365,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, &lockres->l_lksb, dlm_flags, lockres->l_name, - OCFS2_LOCK_ID_MAX_LEN - 1, - lockres); + OCFS2_LOCK_ID_MAX_LEN - 1); lockres_clear_pending(lockres, generation, osb); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); @@ -3299,14 +3386,12 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, assert_spin_locked(&lockres->l_lock); mlog_entry_void(); - mlog(0, "lock %s\n", lockres->l_name); if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { /* If we're already trying to cancel a lock conversion * then just drop the spinlock and allow the caller to * requeue this lock. */ - - mlog(0, "Lockres %s, skip convert\n", lockres->l_name); + mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name); return 0; } @@ -3321,6 +3406,8 @@ static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, "lock %s, invalid flags: 0x%lx\n", lockres->l_name, lockres->l_flags); + mlog(ML_BASTS, "lockres %s\n", lockres->l_name); + return 1; } @@ -3330,16 +3417,15 @@ static int ocfs2_cancel_convert(struct ocfs2_super *osb, int ret; mlog_entry_void(); - mlog(0, "lock %s\n", lockres->l_name); ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, - DLM_LKF_CANCEL, lockres); + DLM_LKF_CANCEL); if (ret) { ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); ocfs2_recover_from_dlm_error(lockres, 0); } - mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name); + mlog(ML_BASTS, "lockres %s\n", lockres->l_name); mlog_exit(ret); return ret; @@ -3352,6 +3438,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, unsigned long flags; int blocking; int new_level; + int level; int ret = 0; int set_lvb = 0; unsigned int gen; @@ -3360,9 +3447,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb, spin_lock_irqsave(&lockres->l_lock, flags); - BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); - recheck: + /* + * Is it still blocking? If not, we have no more work to do. + */ + if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) { + BUG_ON(lockres->l_blocking != DLM_LOCK_NL); + spin_unlock_irqrestore(&lockres->l_lock, flags); + ret = 0; + goto leave; + } + if (lockres->l_flags & OCFS2_LOCK_BUSY) { /* XXX * This is a *big* race. The OCFS2_LOCK_PENDING flag @@ -3387,8 +3482,11 @@ recheck: * at the same time they set OCFS2_DLM_BUSY. They must * clear OCFS2_DLM_PENDING after dlm_lock() returns. */ - if (lockres->l_flags & OCFS2_LOCK_PENDING) + if (lockres->l_flags & OCFS2_LOCK_PENDING) { + mlog(ML_BASTS, "lockres %s, ReQ: Pending\n", + lockres->l_name); goto leave_requeue; + } ctl->requeue = 1; ret = ocfs2_prepare_cancel_convert(osb, lockres); @@ -3401,31 +3499,70 @@ recheck: goto leave; } + /* + * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is + * set when the ast is received for an upconvert just before the + * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast + * on the heels of the ast, we want to delay the downconvert just + * enough to allow the up requestor to do its task. Because this + * lock is in the blocked queue, the lock will be downconverted + * as soon as the requestor is done with the lock. + */ + if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) + goto leave_requeue; + + /* + * How can we block and yet be at NL? We were trying to upconvert + * from NL and got canceled. The code comes back here, and now + * we notice and clear BLOCKING. + */ + if (lockres->l_level == DLM_LOCK_NL) { + BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders); + mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name); + lockres->l_blocking = DLM_LOCK_NL; + lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); + spin_unlock_irqrestore(&lockres->l_lock, flags); + goto leave; + } + /* if we're blocking an exclusive and we have *any* holders, * then requeue. */ if ((lockres->l_blocking == DLM_LOCK_EX) - && (lockres->l_ex_holders || lockres->l_ro_holders)) + && (lockres->l_ex_holders || lockres->l_ro_holders)) { + mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n", + lockres->l_name, lockres->l_ex_holders, + lockres->l_ro_holders); goto leave_requeue; + } /* If it's a PR we're blocking, then only * requeue if we've got any EX holders */ if (lockres->l_blocking == DLM_LOCK_PR && - lockres->l_ex_holders) + lockres->l_ex_holders) { + mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n", + lockres->l_name, lockres->l_ex_holders); goto leave_requeue; + } /* * Can we get a lock in this state if the holder counts are * zero? The meta data unblock code used to check this. */ if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) - && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) + && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) { + mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n", + lockres->l_name); goto leave_requeue; + } new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); if (lockres->l_ops->check_downconvert - && !lockres->l_ops->check_downconvert(lockres, new_level)) + && !lockres->l_ops->check_downconvert(lockres, new_level)) { + mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n", + lockres->l_name); goto leave_requeue; + } /* If we get here, then we know that there are no more * incompatible holders (and anyone asking for an incompatible @@ -3438,17 +3575,24 @@ recheck: * may sleep, so we save off a copy of what we're blocking as * it may change while we're not holding the spin lock. */ blocking = lockres->l_blocking; + level = lockres->l_level; spin_unlock_irqrestore(&lockres->l_lock, flags); ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); - if (ctl->unblock_action == UNBLOCK_STOP_POST) + if (ctl->unblock_action == UNBLOCK_STOP_POST) { + mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n", + lockres->l_name); goto leave; + } spin_lock_irqsave(&lockres->l_lock, flags); - if (blocking != lockres->l_blocking) { + if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) { /* If this changed underneath us, then we can't drop * it just yet. */ + mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, " + "Recheck\n", lockres->l_name, blocking, + lockres->l_blocking, level, lockres->l_level); goto recheck; } @@ -3843,45 +3987,6 @@ void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex) ocfs2_cluster_unlock(osb, lockres, level); } -/* - * This is the filesystem locking protocol. It provides the lock handling - * hooks for the underlying DLM. It has a maximum version number. - * The version number allows interoperability with systems running at - * the same major number and an equal or smaller minor number. - * - * Whenever the filesystem does new things with locks (adds or removes a - * lock, orders them differently, does different things underneath a lock), - * the version must be changed. The protocol is negotiated when joining - * the dlm domain. A node may join the domain if its major version is - * identical to all other nodes and its minor version is greater than - * or equal to all other nodes. When its minor version is greater than - * the other nodes, it will run at the minor version specified by the - * other nodes. - * - * If a locking change is made that will not be compatible with older - * versions, the major number must be increased and the minor version set - * to zero. If a change merely adds a behavior that can be disabled when - * speaking to older versions, the minor version must be increased. If a - * change adds a fully backwards compatible change (eg, LVB changes that - * are just ignored by older versions), the version does not need to be - * updated. - */ -static struct ocfs2_locking_protocol lproto = { - .lp_max_version = { - .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, - .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, - }, - .lp_lock_ast = ocfs2_locking_ast, - .lp_blocking_ast = ocfs2_blocking_ast, - .lp_unlock_ast = ocfs2_unlock_ast, -}; - -void ocfs2_set_locking_protocol(void) -{ - ocfs2_stack_glue_set_locking_protocol(&lproto); -} - - static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, struct ocfs2_lock_res *lockres) { @@ -3898,7 +4003,7 @@ static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, BUG_ON(!lockres); BUG_ON(!lockres->l_ops); - mlog(0, "lockres %s blocked.\n", lockres->l_name); + mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name); /* Detect whether a lock has been marked as going away while * the downconvert thread was processing other things. A lock can @@ -3921,7 +4026,7 @@ unqueue: } else ocfs2_schedule_blocked_lock(osb, lockres); - mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name, + mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name, ctl.requeue ? "yes" : "no"); spin_unlock_irqrestore(&lockres->l_lock, flags); @@ -3943,7 +4048,7 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, /* Do not schedule a lock for downconvert when it's on * the way to destruction - any nodes wanting access * to the resource will get it soon. */ - mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n", + mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n", lockres->l_name, lockres->l_flags); return; } diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 15713cbb865..19ad145d2af 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } - + *max_len = len; bail: diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index d35a27f4523..5328529e7fd 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, emi->ei_clusters += ins->ei_clusters; return 1; } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && - (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && + (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos && ins->ei_flags == emi->ei_flags) { emi->ei_phys = ins->ei_phys; emi->ei_cpos = ins->ei_cpos; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 06ccf6a86d3..5b52547d629 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode, int ret; offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ - /* ugh. in prepare/commit_write, if from==to==start of block, we + /* ugh. in prepare/commit_write, if from==to==start of block, we ** skip the prepare. make sure we never send an offset for the start ** of a block */ @@ -993,10 +993,9 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } if (size_change && attr->ia_size != i_size_read(inode)) { - if (attr->ia_size > sb->s_maxbytes) { - status = -EFBIG; + status = inode_newsize_ok(inode, attr->ia_size); + if (status) goto bail_unlock; - } if (i_size_read(inode) > attr->ia_size) { if (ocfs2_should_order_data(inode)) { @@ -1779,7 +1778,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, struct inode *inode = dentry->d_inode; loff_t saved_pos, end; - /* + /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ @@ -1836,6 +1835,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, &meta_level); if (has_refcount) *has_refcount = 1; + if (direct_io) + *direct_io = 0; } if (ret < 0) { @@ -1859,10 +1860,6 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, break; } - if (has_refcount && *has_refcount == 1) { - *direct_io = 0; - break; - } /* * Allowing concurrent direct writes means * i_size changes wouldn't be synchronized, so @@ -2013,8 +2010,8 @@ out_dio: /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); - if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || - (file->f_flags & O_DIRECT && has_refcount)) { + if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || + ((file->f_flags & O_DIRECT) && has_refcount)) { ret = filemap_fdatawrite_range(file->f_mapping, pos, pos + count - 1); if (ret < 0) @@ -2033,7 +2030,7 @@ out_dio: pos + count - 1); } - /* + /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that * it can unlock our rw lock. (it's the clustered equivalent of @@ -2043,7 +2040,7 @@ out_dio: * async dio is going to do it in the future or an end_io after an * error has already done it. */ - if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { + if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { rw_level = -1; have_alloc_sem = 0; } @@ -2198,7 +2195,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, goto bail; } - /* + /* * buffered reads protect themselves in ->readpage(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ @@ -2220,10 +2217,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * We're fine letting folks race truncates and extending * writes with read across the cluster, just like they can * locally. Hence no rw_lock during read. - * + * * Take and drop the meta data lock to update inode fields * like i_size. This allows the checks down below - * generic_file_aio_read() a chance of actually working. + * generic_file_aio_read() a chance of actually working. */ ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (ret < 0) { @@ -2248,7 +2245,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, bail: if (have_alloc_sem) up_read(&inode->i_alloc_sem); - if (rw_level != -1) + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); mlog_exit(ret); diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 0297fb8982b..88459bdd1ff 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { status = ocfs2_try_open_lock(inode, 0); if (status) { - make_bad_inode(inode); + make_bad_inode(inode); return status; } } @@ -684,7 +684,7 @@ bail: return status; } -/* +/* * Serialize with orphan dir recovery. If the process doing * recovery on this orphan dir does an iget() with the dir * i_mutex held, we'll deadlock here. Instead we detect this diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 31fbb061951..7d9d9c132ce 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -7,6 +7,7 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/compat.h> #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> @@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) #ifdef CONFIG_COMPAT long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { + bool preserve; + struct reflink_arguments args; + struct inode *inode = file->f_path.dentry->d_inode; + switch (cmd) { case OCFS2_IOC32_GETFLAGS: cmd = OCFS2_IOC_GETFLAGS; @@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: - case OCFS2_IOC_REFLINK: break; + case OCFS2_IOC_REFLINK: + if (copy_from_user(&args, (struct reflink_arguments *)arg, + sizeof(args))) + return -EFAULT; + preserve = (args.preserve != 0); + + return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), + compat_ptr(args.new_path), preserve); default: return -ENOIOCTLCMD; } diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h index cf9a5ee30fe..0cd5323bd3f 100644 --- a/fs/ocfs2/ioctl.h +++ b/fs/ocfs2/ioctl.h @@ -7,10 +7,10 @@ * */ -#ifndef OCFS2_IOCTL_H -#define OCFS2_IOCTL_H +#ifndef OCFS2_IOCTL_PROTO_H +#define OCFS2_IOCTL_PROTO_H long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg); -#endif /* OCFS2_IOCTL_H */ +#endif /* OCFS2_IOCTL_PROTO_H */ diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index bf34c491ae9..9336c60e3a3 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb, status = -ENOENT; mlog_errno(status); return status; - } + } mutex_lock(&orphan_dir_inode->i_mutex); status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index ac10f83edb9..ca992d91f51 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -476,7 +476,7 @@ out_mutex: out: if (!status) - ocfs2_init_inode_steal_slot(osb); + ocfs2_init_steal_slots(osb); mlog_exit(status); return status; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 9362eea7424..1238b491db9 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -42,6 +42,7 @@ #include "ocfs2_fs.h" #include "ocfs2_lockid.h" +#include "ocfs2_ioctl.h" /* For struct ocfs2_blockcheck_stats */ #include "blockcheck.h" @@ -136,6 +137,10 @@ enum ocfs2_unlock_action { #define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a call to dlm_lock. Only exists with BUSY set. */ +#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread + * from downconverting + * before the upconvert + * has completed */ struct ocfs2_lock_res_ops; @@ -155,7 +160,7 @@ struct ocfs2_lock_res { int l_level; unsigned int l_ro_holders; unsigned int l_ex_holders; - union ocfs2_dlm_lksb l_lksb; + struct ocfs2_dlm_lksb l_lksb; /* used from AST/BAST funcs. */ enum ocfs2_ast_action l_action; @@ -301,7 +306,9 @@ struct ocfs2_super u32 s_next_generation; unsigned long osb_flags; s16 s_inode_steal_slot; + s16 s_meta_steal_slot; atomic_t s_num_inodes_stolen; + atomic_t s_num_meta_stolen; unsigned long s_mount_opt; unsigned int s_atime_quantum; @@ -756,33 +763,6 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb, return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits); } -static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb) -{ - spin_lock(&osb->osb_lock); - osb->s_inode_steal_slot = OCFS2_INVALID_SLOT; - spin_unlock(&osb->osb_lock); - atomic_set(&osb->s_num_inodes_stolen, 0); -} - -static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb, - s16 slot) -{ - spin_lock(&osb->osb_lock); - osb->s_inode_steal_slot = slot; - spin_unlock(&osb->osb_lock); -} - -static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb) -{ - s16 slot; - - spin_lock(&osb->osb_lock); - slot = osb->s_inode_steal_slot; - spin_unlock(&osb->osb_lock); - - return slot; -} - #define ocfs2_set_bit ext2_set_bit #define ocfs2_clear_bit ext2_clear_bit #define ocfs2_test_bit ext2_test_bit diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 1a1a679e51b..bb37218a797 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -254,63 +254,6 @@ * refcount tree */ /* - * ioctl commands - */ -#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) -#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) -#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) -#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) - -/* - * Space reservation / allocation / free ioctls and argument structure - * are designed to be compatible with XFS. - * - * ALLOCSP* and FREESP* are not and will never be supported, but are - * included here for completeness. - */ -struct ocfs2_space_resv { - __s16 l_type; - __s16 l_whence; - __s64 l_start; - __s64 l_len; /* len == 0 means until end of file */ - __s32 l_sysid; - __u32 l_pid; - __s32 l_pad[4]; /* reserve area */ -}; - -#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv) -#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv) -#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv) -#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv) -#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv) -#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv) -#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv) -#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv) - -/* Used to pass group descriptor data when online resize is done */ -struct ocfs2_new_group_input { - __u64 group; /* Group descriptor's blkno. */ - __u32 clusters; /* Total number of clusters in this group */ - __u32 frees; /* Total free clusters in this group */ - __u16 chain; /* Chain for this group */ - __u16 reserved1; - __u32 reserved2; -}; - -#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int) -#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input) -#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input) - -/* Used to pass 2 file names to reflink. */ -struct reflink_arguments { - __u64 old_path; - __u64 new_path; - __u64 preserve; -}; -#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments) - - -/* * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) */ #define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */ @@ -1417,9 +1360,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize) return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); } -static inline int ocfs2_max_inline_data(int blocksize) +static inline int ocfs2_max_inline_data_with_xattr(int blocksize, + struct ocfs2_dinode *di) { - return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); + if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL)) + return blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data) - + di->i_xattr_inline_size; + else + return blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data); } static inline int ocfs2_extent_recs_per_inode(int blocksize) diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h new file mode 100644 index 00000000000..2d3420af1a8 --- /dev/null +++ b/fs/ocfs2/ocfs2_ioctl.h @@ -0,0 +1,79 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_ioctl.h + * + * Defines OCFS2 ioctls. + * + * Copyright (C) 2010 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef OCFS2_IOCTL_H +#define OCFS2_IOCTL_H + +/* + * ioctl commands + */ +#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) +#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) +#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) +#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) + +/* + * Space reservation / allocation / free ioctls and argument structure + * are designed to be compatible with XFS. + * + * ALLOCSP* and FREESP* are not and will never be supported, but are + * included here for completeness. + */ +struct ocfs2_space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserve area */ +}; + +#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv) +#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv) +#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv) +#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv) +#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv) +#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv) +#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv) +#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv) + +/* Used to pass group descriptor data when online resize is done */ +struct ocfs2_new_group_input { + __u64 group; /* Group descriptor's blkno. */ + __u32 clusters; /* Total number of clusters in this group */ + __u32 frees; /* Total free clusters in this group */ + __u16 chain; /* Chain for this group */ + __u16 reserved1; + __u32 reserved2; +}; + +#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int) +#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input) +#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input) + +/* Used to pass 2 file names to reflink. */ +struct reflink_arguments { + __u64 old_path; + __u64 new_path; + __u64 preserve; +}; +#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments) + +#endif /* OCFS2_IOCTL_H */ diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h index 82d5eeac0ff..2e45c8d2ea7 100644 --- a/fs/ocfs2/ocfs2_lockingver.h +++ b/fs/ocfs2/ocfs2_lockingver.h @@ -23,6 +23,8 @@ /* * The protocol version for ocfs2 cluster locking. See dlmglue.c for * more details. + * + * 1.0 - Initial locking version from ocfs2 1.4. */ #define OCFS2_LOCKING_PROTOCOL_MAJOR 1 #define OCFS2_LOCKING_PROTOCOL_MINOR 0 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 74db2be75dd..fb6aa7acf54 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -626,7 +626,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode, rb = (struct ocfs2_refcount_block *)new_bh->b_data; memset(rb, 0, inode->i_sb->s_blocksize); strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); - rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num); + rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); rb->rf_blkno = cpu_to_le64(first_blkno); @@ -1330,7 +1330,7 @@ static int ocfs2_expand_inline_ref_root(handle_t *handle, memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; - new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num); + new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); new_rb->rf_blkno = cpu_to_le64(blkno); new_rb->rf_cpos = cpu_to_le32(0); @@ -1576,7 +1576,7 @@ static int ocfs2_new_leaf_refcount_block(handle_t *handle, new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; memset(new_rb, 0, sb->s_blocksize); strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); - new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num); + new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); new_rb->rf_blkno = cpu_to_le64(blkno); @@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; @@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, page = grab_cache_page(mapping, page_index); - /* This page can't be dirtied before we CoW it out. */ - BUG_ON(PageDirty(page)); + /* + * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page + * can't be dirtied before we CoW it out. + */ + if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) + BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { ret = block_read_full_page(page, ocfs2_get_block); @@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, while (offset < end) { page_index = offset >> PAGE_CACHE_SHIFT; - map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; if (map_end > end) map_end = end; diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index e49c4105026..7020e1253ff 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -161,24 +161,23 @@ static int dlm_status_to_errno(enum dlm_status status) static void o2dlm_lock_ast_wrapper(void *astarg) { - BUG_ON(o2cb_stack.sp_proto == NULL); + struct ocfs2_dlm_lksb *lksb = astarg; - o2cb_stack.sp_proto->lp_lock_ast(astarg); + lksb->lksb_conn->cc_proto->lp_lock_ast(lksb); } static void o2dlm_blocking_ast_wrapper(void *astarg, int level) { - BUG_ON(o2cb_stack.sp_proto == NULL); + struct ocfs2_dlm_lksb *lksb = astarg; - o2cb_stack.sp_proto->lp_blocking_ast(astarg, level); + lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level); } static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status) { + struct ocfs2_dlm_lksb *lksb = astarg; int error = dlm_status_to_errno(status); - BUG_ON(o2cb_stack.sp_proto == NULL); - /* * In o2dlm, you can get both the lock_ast() for the lock being * granted and the unlock_ast() for the CANCEL failing. A @@ -193,16 +192,15 @@ static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status) if (status == DLM_CANCELGRANT) return; - o2cb_stack.sp_proto->lp_unlock_ast(astarg, error); + lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error); } static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, - union ocfs2_dlm_lksb *lksb, + struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, - unsigned int namelen, - void *astarg) + unsigned int namelen) { enum dlm_status status; int o2dlm_mode = mode_to_o2dlm(mode); @@ -211,28 +209,27 @@ static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn, status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm, o2dlm_flags, name, namelen, - o2dlm_lock_ast_wrapper, astarg, + o2dlm_lock_ast_wrapper, lksb, o2dlm_blocking_ast_wrapper); ret = dlm_status_to_errno(status); return ret; } static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn, - union ocfs2_dlm_lksb *lksb, - u32 flags, - void *astarg) + struct ocfs2_dlm_lksb *lksb, + u32 flags) { enum dlm_status status; int o2dlm_flags = flags_to_o2dlm(flags); int ret; status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm, - o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg); + o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb); ret = dlm_status_to_errno(status); return ret; } -static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb) +static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) { return dlm_status_to_errno(lksb->lksb_o2dlm.status); } @@ -242,17 +239,17 @@ static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb) * contents, it will zero out the LVB. Thus the caller can always trust * the contents. */ -static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) +static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb) { return 1; } -static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb) +static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb) { return (void *)(lksb->lksb_o2dlm.lvb); } -static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb) +static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb) { dlm_print_one_lock(lksb->lksb_o2dlm.lockid); } @@ -277,10 +274,10 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; - struct dlm_protocol_version dlm_version; + struct dlm_protocol_version fs_version; BUG_ON(conn == NULL); - BUG_ON(o2cb_stack.sp_proto == NULL); + BUG_ON(conn->cc_proto == NULL); /* for now we only have one cluster/node, make sure we see it * in the heartbeat universe */ @@ -304,18 +301,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); - dlm_version.pv_major = conn->cc_version.pv_major; - dlm_version.pv_minor = conn->cc_version.pv_minor; + fs_version.pv_major = conn->cc_version.pv_major; + fs_version.pv_minor = conn->cc_version.pv_minor; - dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); + dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } - conn->cc_version.pv_major = dlm_version.pv_major; - conn->cc_version.pv_minor = dlm_version.pv_minor; + conn->cc_version.pv_major = fs_version.pv_major; + conn->cc_version.pv_minor = fs_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index da78a2a334f..5ae8812b286 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -25,7 +25,6 @@ #include <linux/reboot.h> #include <asm/uaccess.h> -#include "ocfs2.h" /* For struct ocfs2_lock_res */ #include "stackglue.h" #include <linux/dlm_plock.h> @@ -63,8 +62,8 @@ * negotiated by the client. The client negotiates based on the maximum * version advertised in /sys/fs/ocfs2/max_locking_protocol. The major * number from the "SETV" message must match - * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number - * must be less than or equal to ...->lp_max_version.pv_minor. + * ocfs2_user_plugin.sp_max_proto.pv_major, and the minor number + * must be less than or equal to ...sp_max_version.pv_minor. * * Once this information has been set, mounts will be allowed. From this * point on, the "DOWN" message can be sent for node down notification. @@ -401,7 +400,7 @@ static int ocfs2_control_do_setversion_msg(struct file *file, char *ptr = NULL; struct ocfs2_control_private *p = file->private_data; struct ocfs2_protocol_version *max = - &ocfs2_user_plugin.sp_proto->lp_max_version; + &ocfs2_user_plugin.sp_max_proto; if (ocfs2_control_get_handshake_state(file) != OCFS2_CONTROL_HANDSHAKE_PROTOCOL) @@ -664,18 +663,10 @@ static void ocfs2_control_exit(void) -rc); } -static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg) -{ - struct ocfs2_lock_res *res = astarg; - return &res->l_lksb.lksb_fsdlm; -} - static void fsdlm_lock_ast_wrapper(void *astarg) { - struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg); - int status = lksb->sb_status; - - BUG_ON(ocfs2_user_plugin.sp_proto == NULL); + struct ocfs2_dlm_lksb *lksb = astarg; + int status = lksb->lksb_fsdlm.sb_status; /* * For now we're punting on the issue of other non-standard errors @@ -688,25 +679,24 @@ static void fsdlm_lock_ast_wrapper(void *astarg) */ if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL) - ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0); + lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, 0); else - ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg); + lksb->lksb_conn->cc_proto->lp_lock_ast(lksb); } static void fsdlm_blocking_ast_wrapper(void *astarg, int level) { - BUG_ON(ocfs2_user_plugin.sp_proto == NULL); + struct ocfs2_dlm_lksb *lksb = astarg; - ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level); + lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level); } static int user_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, - union ocfs2_dlm_lksb *lksb, + struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, - unsigned int namelen, - void *astarg) + unsigned int namelen) { int ret; @@ -716,36 +706,35 @@ static int user_dlm_lock(struct ocfs2_cluster_connection *conn, ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm, flags|DLM_LKF_NODLCKWT, name, namelen, 0, - fsdlm_lock_ast_wrapper, astarg, + fsdlm_lock_ast_wrapper, lksb, fsdlm_blocking_ast_wrapper); return ret; } static int user_dlm_unlock(struct ocfs2_cluster_connection *conn, - union ocfs2_dlm_lksb *lksb, - u32 flags, - void *astarg) + struct ocfs2_dlm_lksb *lksb, + u32 flags) { int ret; ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid, - flags, &lksb->lksb_fsdlm, astarg); + flags, &lksb->lksb_fsdlm, lksb); return ret; } -static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb) +static int user_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) { return lksb->lksb_fsdlm.sb_status; } -static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) +static int user_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb) { int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID; return !invalid; } -static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) +static void *user_dlm_lvb(struct ocfs2_dlm_lksb *lksb) { if (!lksb->lksb_fsdlm.sb_lvbptr) lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb + @@ -753,7 +742,7 @@ static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) return (void *)(lksb->lksb_fsdlm.sb_lvbptr); } -static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) +static void user_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb) { } diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index f3df0baa9a4..39abf89697e 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -36,7 +36,7 @@ #define OCFS2_STACK_PLUGIN_USER "user" #define OCFS2_MAX_HB_CTL_PATH 256 -static struct ocfs2_locking_protocol *lproto; +static struct ocfs2_protocol_version locking_max_version; static DEFINE_SPINLOCK(ocfs2_stack_lock); static LIST_HEAD(ocfs2_stack_list); static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1]; @@ -176,7 +176,7 @@ int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin) spin_lock(&ocfs2_stack_lock); if (!ocfs2_stack_lookup(plugin->sp_name)) { plugin->sp_count = 0; - plugin->sp_proto = lproto; + plugin->sp_max_proto = locking_max_version; list_add(&plugin->sp_list, &ocfs2_stack_list); printk(KERN_INFO "ocfs2: Registered cluster interface %s\n", plugin->sp_name); @@ -213,77 +213,76 @@ void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin) } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister); -void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto) +void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto) { struct ocfs2_stack_plugin *p; - BUG_ON(proto == NULL); - spin_lock(&ocfs2_stack_lock); - BUG_ON(active_stack != NULL); + if (memcmp(max_proto, &locking_max_version, + sizeof(struct ocfs2_protocol_version))) { + BUG_ON(locking_max_version.pv_major != 0); - lproto = proto; - list_for_each_entry(p, &ocfs2_stack_list, sp_list) { - p->sp_proto = lproto; + locking_max_version = *max_proto; + list_for_each_entry(p, &ocfs2_stack_list, sp_list) { + p->sp_max_proto = locking_max_version; + } } - spin_unlock(&ocfs2_stack_lock); } -EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol); +EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version); /* - * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take - * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the - * underlying stack plugins need to pilfer the lksb off of the lock_res. - * If some other structure needs to be passed as an astarg, the plugins - * will need to be given a different avenue to the lksb. + * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument + * for the ast and bast functions. They will pass the lksb to the ast + * and bast. The caller can wrap the lksb with their own structure to + * get more information. */ int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, - union ocfs2_dlm_lksb *lksb, + struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, - unsigned int namelen, - struct ocfs2_lock_res *astarg) + unsigned int namelen) { - BUG_ON(lproto == NULL); - + if (!lksb->lksb_conn) + lksb->lksb_conn = conn; + else + BUG_ON(lksb->lksb_conn != conn); return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags, - name, namelen, astarg); + name, namelen); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lock); int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, - union ocfs2_dlm_lksb *lksb, - u32 flags, - struct ocfs2_lock_res *astarg) + struct ocfs2_dlm_lksb *lksb, + u32 flags) { - BUG_ON(lproto == NULL); + BUG_ON(lksb->lksb_conn == NULL); - return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg); + return active_stack->sp_ops->dlm_unlock(conn, lksb, flags); } EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock); -int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb) +int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lock_status(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status); -int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb) +int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lvb_valid(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid); -void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb) +void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lock_lvb(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb); -void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) +void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb) { active_stack->sp_ops->dump_lksb(lksb); } @@ -312,6 +311,7 @@ EXPORT_SYMBOL_GPL(ocfs2_plock); int ocfs2_cluster_connect(const char *stack_name, const char *group, int grouplen, + struct ocfs2_locking_protocol *lproto, void (*recovery_handler)(int node_num, void *recovery_data), void *recovery_data, @@ -329,6 +329,12 @@ int ocfs2_cluster_connect(const char *stack_name, goto out; } + if (memcmp(&lproto->lp_max_version, &locking_max_version, + sizeof(struct ocfs2_protocol_version))) { + rc = -EINVAL; + goto out; + } + new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection), GFP_KERNEL); if (!new_conn) { @@ -341,6 +347,7 @@ int ocfs2_cluster_connect(const char *stack_name, new_conn->cc_recovery_handler = recovery_handler; new_conn->cc_recovery_data = recovery_data; + new_conn->cc_proto = lproto; /* Start the new connection at our maximum compatibility level */ new_conn->cc_version = lproto->lp_max_version; @@ -366,6 +373,24 @@ out: } EXPORT_SYMBOL_GPL(ocfs2_cluster_connect); +/* The caller will ensure all nodes have the same cluster stack */ +int ocfs2_cluster_connect_agnostic(const char *group, + int grouplen, + struct ocfs2_locking_protocol *lproto, + void (*recovery_handler)(int node_num, + void *recovery_data), + void *recovery_data, + struct ocfs2_cluster_connection **conn) +{ + char *stack_name = NULL; + + if (cluster_stack_name[0]) + stack_name = cluster_stack_name; + return ocfs2_cluster_connect(stack_name, group, grouplen, lproto, + recovery_handler, recovery_data, conn); +} +EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic); + /* If hangup_pending is 0, the stack driver will be dropped */ int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, int hangup_pending) @@ -453,10 +478,10 @@ static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj, ssize_t ret = 0; spin_lock(&ocfs2_stack_lock); - if (lproto) + if (locking_max_version.pv_major) ret = snprintf(buf, PAGE_SIZE, "%u.%u\n", - lproto->lp_max_version.pv_major, - lproto->lp_max_version.pv_minor); + locking_max_version.pv_major, + locking_max_version.pv_minor); spin_unlock(&ocfs2_stack_lock); return ret; @@ -685,7 +710,10 @@ static int __init ocfs2_stack_glue_init(void) static void __exit ocfs2_stack_glue_exit(void) { - lproto = NULL; + memset(&locking_max_version, 0, + sizeof(struct ocfs2_protocol_version)); + locking_max_version.pv_major = 0; + locking_max_version.pv_minor = 0; ocfs2_sysfs_exit(); if (ocfs2_table_header) unregister_sysctl_table(ocfs2_table_header); diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index 03a44d60eac..8ce7398ae1d 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -56,17 +56,6 @@ struct ocfs2_protocol_version { }; /* - * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf. - */ -struct ocfs2_locking_protocol { - struct ocfs2_protocol_version lp_max_version; - void (*lp_lock_ast)(void *astarg); - void (*lp_blocking_ast)(void *astarg, int level); - void (*lp_unlock_ast)(void *astarg, int error); -}; - - -/* * The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only * has a pointer to separately allocated lvb space. This struct exists only to * include in the lksb union to make space for a combined dlm_lksb and lvb. @@ -81,12 +70,27 @@ struct fsdlm_lksb_plus_lvb { * size of the union is known. Lock status structures are embedded in * ocfs2 inodes. */ -union ocfs2_dlm_lksb { - struct dlm_lockstatus lksb_o2dlm; - struct dlm_lksb lksb_fsdlm; - struct fsdlm_lksb_plus_lvb padding; +struct ocfs2_cluster_connection; +struct ocfs2_dlm_lksb { + union { + struct dlm_lockstatus lksb_o2dlm; + struct dlm_lksb lksb_fsdlm; + struct fsdlm_lksb_plus_lvb padding; + }; + struct ocfs2_cluster_connection *lksb_conn; +}; + +/* + * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf. + */ +struct ocfs2_locking_protocol { + struct ocfs2_protocol_version lp_max_version; + void (*lp_lock_ast)(struct ocfs2_dlm_lksb *lksb); + void (*lp_blocking_ast)(struct ocfs2_dlm_lksb *lksb, int level); + void (*lp_unlock_ast)(struct ocfs2_dlm_lksb *lksb, int error); }; + /* * A cluster connection. Mostly opaque to ocfs2, the connection holds * state for the underlying stack. ocfs2 does use cc_version to determine @@ -96,6 +100,7 @@ struct ocfs2_cluster_connection { char cc_name[GROUP_NAME_MAX]; int cc_namelen; struct ocfs2_protocol_version cc_version; + struct ocfs2_locking_protocol *cc_proto; void (*cc_recovery_handler)(int node_num, void *recovery_data); void *cc_recovery_data; void *cc_lockspace; @@ -155,27 +160,29 @@ struct ocfs2_stack_operations { * * ast and bast functions are not part of the call because the * stack will likely want to wrap ast and bast calls before passing - * them to stack->sp_proto. + * them to stack->sp_proto. There is no astarg. The lksb will + * be passed back to the ast and bast functions. The caller can + * use this to find their object. */ int (*dlm_lock)(struct ocfs2_cluster_connection *conn, int mode, - union ocfs2_dlm_lksb *lksb, + struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, - unsigned int namelen, - void *astarg); + unsigned int namelen); /* * Call the underlying dlm unlock function. The ->dlm_unlock() * function should convert the flags as appropriate. * * The unlock ast is not passed, as the stack will want to wrap - * it before calling stack->sp_proto->lp_unlock_ast(). + * it before calling stack->sp_proto->lp_unlock_ast(). There is + * no astarg. The lksb will be passed back to the unlock ast + * function. The caller can use this to find their object. */ int (*dlm_unlock)(struct ocfs2_cluster_connection *conn, - union ocfs2_dlm_lksb *lksb, - u32 flags, - void *astarg); + struct ocfs2_dlm_lksb *lksb, + u32 flags); /* * Return the status of the current lock status block. The fs @@ -183,17 +190,17 @@ struct ocfs2_stack_operations { * callback pulls out the stack-specific lksb, converts the status * to a proper errno, and returns it. */ - int (*lock_status)(union ocfs2_dlm_lksb *lksb); + int (*lock_status)(struct ocfs2_dlm_lksb *lksb); /* * Return non-zero if the LVB is valid. */ - int (*lvb_valid)(union ocfs2_dlm_lksb *lksb); + int (*lvb_valid)(struct ocfs2_dlm_lksb *lksb); /* * Pull the lvb pointer off of the stack-specific lksb. */ - void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb); + void *(*lock_lvb)(struct ocfs2_dlm_lksb *lksb); /* * Cluster-aware posix locks @@ -210,7 +217,7 @@ struct ocfs2_stack_operations { * This is an optoinal debugging hook. If provided, the * stack can dump debugging information about this lock. */ - void (*dump_lksb)(union ocfs2_dlm_lksb *lksb); + void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb); }; /* @@ -226,7 +233,7 @@ struct ocfs2_stack_plugin { /* These are managed by the stackglue code. */ struct list_head sp_list; unsigned int sp_count; - struct ocfs2_locking_protocol *sp_proto; + struct ocfs2_protocol_version sp_max_proto; }; @@ -234,10 +241,22 @@ struct ocfs2_stack_plugin { int ocfs2_cluster_connect(const char *stack_name, const char *group, int grouplen, + struct ocfs2_locking_protocol *lproto, void (*recovery_handler)(int node_num, void *recovery_data), void *recovery_data, struct ocfs2_cluster_connection **conn); +/* + * Used by callers that don't store their stack name. They must ensure + * all nodes have the same stack. + */ +int ocfs2_cluster_connect_agnostic(const char *group, + int grouplen, + struct ocfs2_locking_protocol *lproto, + void (*recovery_handler)(int node_num, + void *recovery_data), + void *recovery_data, + struct ocfs2_cluster_connection **conn); int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, int hangup_pending); void ocfs2_cluster_hangup(const char *group, int grouplen); @@ -246,26 +265,24 @@ int ocfs2_cluster_this_node(unsigned int *node); struct ocfs2_lock_res; int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, - union ocfs2_dlm_lksb *lksb, + struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, - unsigned int namelen, - struct ocfs2_lock_res *astarg); + unsigned int namelen); int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, - union ocfs2_dlm_lksb *lksb, - u32 flags, - struct ocfs2_lock_res *astarg); + struct ocfs2_dlm_lksb *lksb, + u32 flags); -int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb); -int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb); -void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb); -void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb); +int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb); +int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb); +void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb); +void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb); int ocfs2_stack_supports_plocks(void); int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino, struct file *file, int cmd, struct file_lock *fl); -void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto); +void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto); /* Used by stack plugins */ diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index c30b644d957..c3c60bc3e07 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -51,7 +51,7 @@ #define ALLOC_NEW_GROUP 0x1 #define ALLOC_GROUPS_FROM_GLOBAL 0x2 -#define OCFS2_MAX_INODES_TO_STEAL 1024 +#define OCFS2_MAX_TO_STEAL 1024 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); @@ -637,12 +637,113 @@ bail: return status; } +static void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb) +{ + spin_lock(&osb->osb_lock); + osb->s_inode_steal_slot = OCFS2_INVALID_SLOT; + spin_unlock(&osb->osb_lock); + atomic_set(&osb->s_num_inodes_stolen, 0); +} + +static void ocfs2_init_meta_steal_slot(struct ocfs2_super *osb) +{ + spin_lock(&osb->osb_lock); + osb->s_meta_steal_slot = OCFS2_INVALID_SLOT; + spin_unlock(&osb->osb_lock); + atomic_set(&osb->s_num_meta_stolen, 0); +} + +void ocfs2_init_steal_slots(struct ocfs2_super *osb) +{ + ocfs2_init_inode_steal_slot(osb); + ocfs2_init_meta_steal_slot(osb); +} + +static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) +{ + spin_lock(&osb->osb_lock); + if (type == INODE_ALLOC_SYSTEM_INODE) + osb->s_inode_steal_slot = slot; + else if (type == EXTENT_ALLOC_SYSTEM_INODE) + osb->s_meta_steal_slot = slot; + spin_unlock(&osb->osb_lock); +} + +static int __ocfs2_get_steal_slot(struct ocfs2_super *osb, int type) +{ + int slot = OCFS2_INVALID_SLOT; + + spin_lock(&osb->osb_lock); + if (type == INODE_ALLOC_SYSTEM_INODE) + slot = osb->s_inode_steal_slot; + else if (type == EXTENT_ALLOC_SYSTEM_INODE) + slot = osb->s_meta_steal_slot; + spin_unlock(&osb->osb_lock); + + return slot; +} + +static int ocfs2_get_inode_steal_slot(struct ocfs2_super *osb) +{ + return __ocfs2_get_steal_slot(osb, INODE_ALLOC_SYSTEM_INODE); +} + +static int ocfs2_get_meta_steal_slot(struct ocfs2_super *osb) +{ + return __ocfs2_get_steal_slot(osb, EXTENT_ALLOC_SYSTEM_INODE); +} + +static int ocfs2_steal_resource(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac, + int type) +{ + int i, status = -ENOSPC; + int slot = __ocfs2_get_steal_slot(osb, type); + + /* Start to steal resource from the first slot after ours. */ + if (slot == OCFS2_INVALID_SLOT) + slot = osb->slot_num + 1; + + for (i = 0; i < osb->max_slots; i++, slot++) { + if (slot == osb->max_slots) + slot = 0; + + if (slot == osb->slot_num) + continue; + + status = ocfs2_reserve_suballoc_bits(osb, ac, + type, + (u32)slot, NULL, + NOT_ALLOC_NEW_GROUP); + if (status >= 0) { + __ocfs2_set_steal_slot(osb, slot, type); + break; + } + + ocfs2_free_ac_resource(ac); + } + + return status; +} + +static int ocfs2_steal_inode(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac) +{ + return ocfs2_steal_resource(osb, ac, INODE_ALLOC_SYSTEM_INODE); +} + +static int ocfs2_steal_meta(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac) +{ + return ocfs2_steal_resource(osb, ac, EXTENT_ALLOC_SYSTEM_INODE); +} + int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb, int blocks, struct ocfs2_alloc_context **ac) { int status; - u32 slot; + int slot = ocfs2_get_meta_steal_slot(osb); *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); if (!(*ac)) { @@ -653,12 +754,34 @@ int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb, (*ac)->ac_bits_wanted = blocks; (*ac)->ac_which = OCFS2_AC_USE_META; - slot = osb->slot_num; (*ac)->ac_group_search = ocfs2_block_group_search; + if (slot != OCFS2_INVALID_SLOT && + atomic_read(&osb->s_num_meta_stolen) < OCFS2_MAX_TO_STEAL) + goto extent_steal; + + atomic_set(&osb->s_num_meta_stolen, 0); status = ocfs2_reserve_suballoc_bits(osb, (*ac), EXTENT_ALLOC_SYSTEM_INODE, - slot, NULL, ALLOC_NEW_GROUP); + (u32)osb->slot_num, NULL, + ALLOC_NEW_GROUP); + + + if (status >= 0) { + status = 0; + if (slot != OCFS2_INVALID_SLOT) + ocfs2_init_meta_steal_slot(osb); + goto bail; + } else if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + + ocfs2_free_ac_resource(*ac); + +extent_steal: + status = ocfs2_steal_meta(osb, *ac); + atomic_inc(&osb->s_num_meta_stolen); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); @@ -685,43 +808,11 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb, ac); } -static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb, - struct ocfs2_alloc_context *ac) -{ - int i, status = -ENOSPC; - s16 slot = ocfs2_get_inode_steal_slot(osb); - - /* Start to steal inodes from the first slot after ours. */ - if (slot == OCFS2_INVALID_SLOT) - slot = osb->slot_num + 1; - - for (i = 0; i < osb->max_slots; i++, slot++) { - if (slot == osb->max_slots) - slot = 0; - - if (slot == osb->slot_num) - continue; - - status = ocfs2_reserve_suballoc_bits(osb, ac, - INODE_ALLOC_SYSTEM_INODE, - slot, NULL, - NOT_ALLOC_NEW_GROUP); - if (status >= 0) { - ocfs2_set_inode_steal_slot(osb, slot); - break; - } - - ocfs2_free_ac_resource(ac); - } - - return status; -} - int ocfs2_reserve_new_inode(struct ocfs2_super *osb, struct ocfs2_alloc_context **ac) { int status; - s16 slot = ocfs2_get_inode_steal_slot(osb); + int slot = ocfs2_get_inode_steal_slot(osb); u64 alloc_group; *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); @@ -754,14 +845,14 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb, * need to check our slots to see whether there is some space for us. */ if (slot != OCFS2_INVALID_SLOT && - atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL) + atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_TO_STEAL) goto inode_steal; atomic_set(&osb->s_num_inodes_stolen, 0); alloc_group = osb->osb_inode_alloc_group; status = ocfs2_reserve_suballoc_bits(osb, *ac, INODE_ALLOC_SYSTEM_INODE, - osb->slot_num, + (u32)osb->slot_num, &alloc_group, ALLOC_NEW_GROUP | ALLOC_GROUPS_FROM_GLOBAL); @@ -789,7 +880,7 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb, ocfs2_free_ac_resource(*ac); inode_steal: - status = ocfs2_steal_inode_from_other_nodes(osb, *ac); + status = ocfs2_steal_inode(osb, *ac); atomic_inc(&osb->s_num_inodes_stolen); if (status < 0) { if (status != -ENOSPC) diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 8c9a78a4316..fa60723c43e 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -56,6 +56,7 @@ struct ocfs2_alloc_context { is the same as ~0 - unlimited */ }; +void ocfs2_init_steal_slots(struct ocfs2_super *osb); void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac) { diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 26069917a9f..dee03197a49 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -69,6 +69,7 @@ #include "xattr.h" #include "quota.h" #include "refcounttree.h" +#include "suballoc.h" #include "buffer_head_io.h" @@ -301,9 +302,12 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len) spin_lock(&osb->osb_lock); out += snprintf(buf + out, len - out, - "%10s => Slot: %d NumStolen: %d\n", "Steal", + "%10s => InodeSlot: %d StolenInodes: %d, " + "MetaSlot: %d StolenMeta: %d\n", "Steal", osb->s_inode_steal_slot, - atomic_read(&osb->s_num_inodes_stolen)); + atomic_read(&osb->s_num_inodes_stolen), + osb->s_meta_steal_slot, + atomic_read(&osb->s_num_meta_stolen)); spin_unlock(&osb->osb_lock); out += snprintf(buf + out, len - out, "OrphanScan => "); @@ -1062,7 +1066,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) "file system, but write access is " "unavailable.\n"); else - mlog_errno(status); + mlog_errno(status); goto read_super_error; } @@ -1997,7 +2001,7 @@ static int ocfs2_initialize_super(struct super_block *sb, osb->blocked_lock_count = 0; spin_lock_init(&osb->osb_lock); spin_lock_init(&osb->osb_xattr_lock); - ocfs2_init_inode_steal_slot(osb); + ocfs2_init_steal_slots(osb); atomic_set(&osb->alloc_stats.moves, 0); atomic_set(&osb->alloc_stats.local_data, 0); diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 49b133ccbf1..32499d213fc 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, } memcpy(link, target, len); - nd_set_link(nd, link); bail: + nd_set_link(nd, status ? ERR_PTR(status) : link); brelse(bh); mlog_exit(status); - return status ? ERR_PTR(status) : link; + return NULL; } static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { - char *link = cookie; - - kfree(link); + char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); } const struct inode_operations ocfs2_symlink_inode_operations = { diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index c61369342a2..a0a120e82b9 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. - * + * the block is stored in our inode metadata cache. + * * This can be called under lock_buffer() */ int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 8fc6fb071c6..d1b0d386f6d 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -116,10 +116,11 @@ static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { }; struct ocfs2_xattr_info { - int name_index; - const char *name; - const void *value; - size_t value_len; + int xi_name_index; + const char *xi_name; + int xi_name_len; + const void *xi_value; + size_t xi_value_len; }; struct ocfs2_xattr_search { @@ -137,6 +138,115 @@ struct ocfs2_xattr_search { int not_found; }; +/* Operations on struct ocfs2_xa_entry */ +struct ocfs2_xa_loc; +struct ocfs2_xa_loc_operations { + /* + * Journal functions + */ + int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc, + int type); + void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc); + + /* + * Return a pointer to the appropriate buffer in loc->xl_storage + * at the given offset from loc->xl_header. + */ + void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset); + + /* Can we reuse the existing entry for the new value? */ + int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi); + + /* How much space is needed for the new value? */ + int (*xlo_check_space)(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi); + + /* + * Return the offset of the first name+value pair. This is + * the start of our downward-filling free space. + */ + int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc); + + /* + * Remove the name+value at this location. Do whatever is + * appropriate with the remaining name+value pairs. + */ + void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc); + + /* Fill xl_entry with a new entry */ + void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash); + + /* Add name+value storage to an entry */ + void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size); + + /* + * Initialize the value buf's access and bh fields for this entry. + * ocfs2_xa_fill_value_buf() will handle the xv pointer. + */ + void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb); +}; + +/* + * Describes an xattr entry location. This is a memory structure + * tracking the on-disk structure. + */ +struct ocfs2_xa_loc { + /* This xattr belongs to this inode */ + struct inode *xl_inode; + + /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */ + struct ocfs2_xattr_header *xl_header; + + /* Bytes from xl_header to the end of the storage */ + int xl_size; + + /* + * The ocfs2_xattr_entry this location describes. If this is + * NULL, this location describes the on-disk structure where it + * would have been. + */ + struct ocfs2_xattr_entry *xl_entry; + + /* + * Internal housekeeping + */ + + /* Buffer(s) containing this entry */ + void *xl_storage; + + /* Operations on the storage backing this location */ + const struct ocfs2_xa_loc_operations *xl_ops; +}; + +/* + * Convenience functions to calculate how much space is needed for a + * given name+value pair + */ +static int namevalue_size(int name_len, uint64_t value_len) +{ + if (value_len > OCFS2_XATTR_INLINE_SIZE) + return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + else + return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len); +} + +static int namevalue_size_xi(struct ocfs2_xattr_info *xi) +{ + return namevalue_size(xi->xi_name_len, xi->xi_value_len); +} + +static int namevalue_size_xe(struct ocfs2_xattr_entry *xe) +{ + u64 value_len = le64_to_cpu(xe->xe_value_size); + + BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) && + ocfs2_xattr_is_local(xe)); + return namevalue_size(xe->xe_name_len, value_len); +} + + static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, struct ocfs2_xattr_header *xh, int index, @@ -212,14 +322,6 @@ static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb) return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits); } -static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) -{ - u16 len = sb->s_blocksize - - offsetof(struct ocfs2_xattr_header, xh_entries); - - return len / sizeof(struct ocfs2_xattr_entry); -} - #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) @@ -463,35 +565,22 @@ static u32 ocfs2_xattr_name_hash(struct inode *inode, return hash; } -/* - * ocfs2_xattr_hash_entry() - * - * Compute the hash of an extended attribute. - */ -static void ocfs2_xattr_hash_entry(struct inode *inode, - struct ocfs2_xattr_header *header, - struct ocfs2_xattr_entry *entry) +static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) { - u32 hash = 0; - char *name = (char *)header + le16_to_cpu(entry->xe_name_offset); - - hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len); - entry->xe_name_hash = cpu_to_le32(hash); - - return; + return namevalue_size(name_len, value_len) + + sizeof(struct ocfs2_xattr_entry); } -static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) +static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi) { - int size = 0; - - if (value_len <= OCFS2_XATTR_INLINE_SIZE) - size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len); - else - size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - size += sizeof(struct ocfs2_xattr_entry); + return namevalue_size_xi(xi) + + sizeof(struct ocfs2_xattr_entry); +} - return size; +static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe) +{ + return namevalue_size_xe(xe) + + sizeof(struct ocfs2_xattr_entry); } int ocfs2_calc_security_init(struct inode *dir, @@ -1308,452 +1397,897 @@ out: return ret; } -static int ocfs2_xattr_cleanup(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_value_buf *vb, - size_t offs) +static int ocfs2_xa_check_space_helper(int needed_space, int free_start, + int num_entries) { - int ret = 0; - size_t name_len = strlen(xi->name); - void *val = xs->base + offs; - size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + int free_space; - ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - /* Decrease xattr count */ - le16_add_cpu(&xs->header->xh_count, -1); - /* Remove the xattr entry and tree root which has already be set*/ - memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry)); - memset(val, 0, size); + if (!needed_space) + return 0; - ret = ocfs2_journal_dirty(handle, vb->vb_bh); - if (ret < 0) - mlog_errno(ret); -out: - return ret; + free_space = free_start - + sizeof(struct ocfs2_xattr_header) - + (num_entries * sizeof(struct ocfs2_xattr_entry)) - + OCFS2_XATTR_HEADER_GAP; + if (free_space < 0) + return -EIO; + if (free_space < needed_space) + return -ENOSPC; + + return 0; } -static int ocfs2_xattr_update_entry(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_value_buf *vb, - size_t offs) +static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc, + int type) { - int ret; + return loc->xl_ops->xlo_journal_access(handle, loc, type); +} - ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } +static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc) +{ + loc->xl_ops->xlo_journal_dirty(handle, loc); +} - xs->here->xe_name_offset = cpu_to_le16(offs); - xs->here->xe_value_size = cpu_to_le64(xi->value_len); - if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE) - ocfs2_xattr_set_local(xs->here, 1); - else - ocfs2_xattr_set_local(xs->here, 0); - ocfs2_xattr_hash_entry(inode, xs->header, xs->here); +/* Give a pointer into the storage for the given offset */ +static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset) +{ + BUG_ON(offset >= loc->xl_size); + return loc->xl_ops->xlo_offset_pointer(loc, offset); +} - ret = ocfs2_journal_dirty(handle, vb->vb_bh); - if (ret < 0) - mlog_errno(ret); -out: - return ret; +/* + * Wipe the name+value pair and allow the storage to reclaim it. This + * must be followed by either removal of the entry or a call to + * ocfs2_xa_add_namevalue(). + */ +static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc) +{ + loc->xl_ops->xlo_wipe_namevalue(loc); } /* - * ocfs2_xattr_set_value_outside() - * - * Set large size value in B tree. + * Find lowest offset to a name+value pair. This is the start of our + * downward-growing free space. */ -static int ocfs2_xattr_set_value_outside(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt, - struct ocfs2_xattr_value_buf *vb, - size_t offs) +static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc) { - size_t name_len = strlen(xi->name); - void *val = xs->base + offs; - struct ocfs2_xattr_value_root *xv = NULL; - size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - int ret = 0; + return loc->xl_ops->xlo_get_free_start(loc); +} - memset(val, 0, size); - memcpy(val, xi->name, name_len); - xv = (struct ocfs2_xattr_value_root *) - (val + OCFS2_XATTR_SIZE(name_len)); - xv->xr_clusters = 0; - xv->xr_last_eb_blk = 0; - xv->xr_list.l_tree_depth = 0; - xv->xr_list.l_count = cpu_to_le16(1); - xv->xr_list.l_next_free_rec = 0; - vb->vb_xv = xv; - - ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt); - if (ret < 0) { - mlog_errno(ret); - return ret; +/* Can we reuse loc->xl_entry for xi? */ +static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + return loc->xl_ops->xlo_can_reuse(loc, xi); +} + +/* How much free space is needed to set the new value */ +static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + return loc->xl_ops->xlo_check_space(loc, xi); +} + +static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + loc->xl_ops->xlo_add_entry(loc, name_hash); + loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); + /* + * We can't leave the new entry's xe_name_offset at zero or + * add_namevalue() will go nuts. We set it to the size of our + * storage so that it can never be less than any other entry. + */ + loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); +} + +static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + int size = namevalue_size_xi(xi); + int nameval_offset; + char *nameval_buf; + + loc->xl_ops->xlo_add_namevalue(loc, size); + loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len); + loc->xl_entry->xe_name_len = xi->xi_name_len; + ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index); + ocfs2_xattr_set_local(loc->xl_entry, + xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE); + + nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset); + memset(nameval_buf, 0, size); + memcpy(nameval_buf, xi->xi_name, xi->xi_name_len); +} + +static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb) +{ + int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len); + + /* Value bufs are for value trees */ + BUG_ON(ocfs2_xattr_is_local(loc->xl_entry)); + BUG_ON(namevalue_size_xe(loc->xl_entry) != + (name_size + OCFS2_XATTR_ROOT_SIZE)); + + loc->xl_ops->xlo_fill_value_buf(loc, vb); + vb->vb_xv = + (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc, + nameval_offset + + name_size); +} + +static int ocfs2_xa_block_journal_access(handle_t *handle, + struct ocfs2_xa_loc *loc, int type) +{ + struct buffer_head *bh = loc->xl_storage; + ocfs2_journal_access_func access; + + if (loc->xl_size == (bh->b_size - + offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_header))) + access = ocfs2_journal_access_xb; + else + access = ocfs2_journal_access_di; + return access(handle, INODE_CACHE(loc->xl_inode), bh, type); +} + +static void ocfs2_xa_block_journal_dirty(handle_t *handle, + struct ocfs2_xa_loc *loc) +{ + struct buffer_head *bh = loc->xl_storage; + + ocfs2_journal_dirty(handle, bh); +} + +static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc, + int offset) +{ + return (char *)loc->xl_header + offset; +} + +static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + /* + * Block storage is strict. If the sizes aren't exact, we will + * remove the old one and reinsert the new. + */ + return namevalue_size_xe(loc->xl_entry) == + namevalue_size_xi(xi); +} + +static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_header *xh = loc->xl_header; + int i, count = le16_to_cpu(xh->xh_count); + int offset, free_start = loc->xl_size; + + for (i = 0; i < count; i++) { + offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); + if (offset < free_start) + free_start = offset; } - ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs); - if (ret < 0) { - mlog_errno(ret); - return ret; + + return free_start; +} + +static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + int count = le16_to_cpu(loc->xl_header->xh_count); + int free_start = ocfs2_xa_get_free_start(loc); + int needed_space = ocfs2_xi_entry_usage(xi); + + /* + * Block storage will reclaim the original entry before inserting + * the new value, so we only need the difference. If the new + * entry is smaller than the old one, we don't need anything. + */ + if (loc->xl_entry) { + /* Don't need space if we're reusing! */ + if (ocfs2_xa_can_reuse_entry(loc, xi)) + needed_space = 0; + else + needed_space -= ocfs2_xe_entry_usage(loc->xl_entry); } - ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb, - xi->value, xi->value_len); - if (ret < 0) - mlog_errno(ret); + if (needed_space < 0) + needed_space = 0; + return ocfs2_xa_check_space_helper(needed_space, free_start, count); +} - return ret; +/* + * Block storage for xattrs keeps the name+value pairs compacted. When + * we remove one, we have to shift any that preceded it towards the end. + */ +static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc) +{ + int i, offset; + int namevalue_offset, first_namevalue_offset, namevalue_size; + struct ocfs2_xattr_entry *entry = loc->xl_entry; + struct ocfs2_xattr_header *xh = loc->xl_header; + int count = le16_to_cpu(xh->xh_count); + + namevalue_offset = le16_to_cpu(entry->xe_name_offset); + namevalue_size = namevalue_size_xe(entry); + first_namevalue_offset = ocfs2_xa_get_free_start(loc); + + /* Shift the name+value pairs */ + memmove((char *)xh + first_namevalue_offset + namevalue_size, + (char *)xh + first_namevalue_offset, + namevalue_offset - first_namevalue_offset); + memset((char *)xh + first_namevalue_offset, 0, namevalue_size); + + /* Now tell xh->xh_entries about it */ + for (i = 0; i < count; i++) { + offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); + if (offset < namevalue_offset) + le16_add_cpu(&xh->xh_entries[i].xe_name_offset, + namevalue_size); + } + + /* + * Note that we don't update xh_free_start or xh_name_value_len + * because they're not used in block-stored xattrs. + */ +} + +static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + int count = le16_to_cpu(loc->xl_header->xh_count); + loc->xl_entry = &(loc->xl_header->xh_entries[count]); + le16_add_cpu(&loc->xl_header->xh_count, 1); + memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry)); +} + +static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size) +{ + int free_start = ocfs2_xa_get_free_start(loc); + + loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size); +} + +static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb) +{ + struct buffer_head *bh = loc->xl_storage; + + if (loc->xl_size == (bh->b_size - + offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_header))) + vb->vb_access = ocfs2_journal_access_xb; + else + vb->vb_access = ocfs2_journal_access_di; + vb->vb_bh = bh; } /* - * ocfs2_xattr_set_entry_local() - * - * Set, replace or remove extended attribute in local. + * Operations for xattrs stored in blocks. This includes inline inode + * storage and unindexed ocfs2_xattr_blocks. */ -static void ocfs2_xattr_set_entry_local(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_entry *last, - size_t min_offs) +static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = { + .xlo_journal_access = ocfs2_xa_block_journal_access, + .xlo_journal_dirty = ocfs2_xa_block_journal_dirty, + .xlo_offset_pointer = ocfs2_xa_block_offset_pointer, + .xlo_check_space = ocfs2_xa_block_check_space, + .xlo_can_reuse = ocfs2_xa_block_can_reuse, + .xlo_get_free_start = ocfs2_xa_block_get_free_start, + .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue, + .xlo_add_entry = ocfs2_xa_block_add_entry, + .xlo_add_namevalue = ocfs2_xa_block_add_namevalue, + .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf, +}; + +static int ocfs2_xa_bucket_journal_access(handle_t *handle, + struct ocfs2_xa_loc *loc, int type) { - size_t name_len = strlen(xi->name); - int i; + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; - if (xi->value && xs->not_found) { - /* Insert the new xattr entry. */ - le16_add_cpu(&xs->header->xh_count, 1); - ocfs2_xattr_set_type(last, xi->name_index); - ocfs2_xattr_set_local(last, 1); - last->xe_name_len = name_len; - } else { - void *first_val; - void *val; - size_t offs, size; - - first_val = xs->base + min_offs; - offs = le16_to_cpu(xs->here->xe_name_offset); - val = xs->base + offs; - - if (le64_to_cpu(xs->here->xe_value_size) > - OCFS2_XATTR_INLINE_SIZE) - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_ROOT_SIZE; + return ocfs2_xattr_bucket_journal_access(handle, bucket, type); +} + +static void ocfs2_xa_bucket_journal_dirty(handle_t *handle, + struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + + ocfs2_xattr_bucket_journal_dirty(handle, bucket); +} + +static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc, + int offset) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + int block, block_offset; + + /* The header is at the front of the bucket */ + block = offset >> loc->xl_inode->i_sb->s_blocksize_bits; + block_offset = offset % loc->xl_inode->i_sb->s_blocksize; + + return bucket_block(bucket, block) + block_offset; +} + +static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + return namevalue_size_xe(loc->xl_entry) >= + namevalue_size_xi(xi); +} + +static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + return le16_to_cpu(bucket_xh(bucket)->xh_free_start); +} + +static int ocfs2_bucket_align_free_start(struct super_block *sb, + int free_start, int size) +{ + /* + * We need to make sure that the name+value pair fits within + * one block. + */ + if (((free_start - size) >> sb->s_blocksize_bits) != + ((free_start - 1) >> sb->s_blocksize_bits)) + free_start -= free_start % sb->s_blocksize; + + return free_start; +} + +static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + int rc; + int count = le16_to_cpu(loc->xl_header->xh_count); + int free_start = ocfs2_xa_get_free_start(loc); + int needed_space = ocfs2_xi_entry_usage(xi); + int size = namevalue_size_xi(xi); + struct super_block *sb = loc->xl_inode->i_sb; + + /* + * Bucket storage does not reclaim name+value pairs it cannot + * reuse. They live as holes until the bucket fills, and then + * the bucket is defragmented. However, the bucket can reclaim + * the ocfs2_xattr_entry. + */ + if (loc->xl_entry) { + /* Don't need space if we're reusing! */ + if (ocfs2_xa_can_reuse_entry(loc, xi)) + needed_space = 0; else - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); - - if (xi->value && size == OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len)) { - /* The old and the new value have the - same size. Just replace the value. */ - ocfs2_xattr_set_local(xs->here, 1); - xs->here->xe_value_size = cpu_to_le64(xi->value_len); - /* Clear value bytes. */ - memset(val + OCFS2_XATTR_SIZE(name_len), - 0, - OCFS2_XATTR_SIZE(xi->value_len)); - memcpy(val + OCFS2_XATTR_SIZE(name_len), - xi->value, - xi->value_len); - return; - } - /* Remove the old name+value. */ - memmove(first_val + size, first_val, val - first_val); - memset(first_val, 0, size); - xs->here->xe_name_hash = 0; - xs->here->xe_name_offset = 0; - ocfs2_xattr_set_local(xs->here, 1); - xs->here->xe_value_size = 0; - - min_offs += size; - - /* Adjust all value offsets. */ - last = xs->header->xh_entries; - for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { - size_t o = le16_to_cpu(last->xe_name_offset); - - if (o < offs) - last->xe_name_offset = cpu_to_le16(o + size); - last += 1; - } + needed_space -= sizeof(struct ocfs2_xattr_entry); + } + BUG_ON(needed_space < 0); - if (!xi->value) { - /* Remove the old entry. */ - last -= 1; - memmove(xs->here, xs->here + 1, - (void *)last - (void *)xs->here); - memset(last, 0, sizeof(struct ocfs2_xattr_entry)); - le16_add_cpu(&xs->header->xh_count, -1); - } + if (free_start < size) { + if (needed_space) + return -ENOSPC; + } else { + /* + * First we check if it would fit in the first place. + * Below, we align the free start to a block. This may + * slide us below the minimum gap. By checking unaligned + * first, we avoid that error. + */ + rc = ocfs2_xa_check_space_helper(needed_space, free_start, + count); + if (rc) + return rc; + free_start = ocfs2_bucket_align_free_start(sb, free_start, + size); } - if (xi->value) { - /* Insert the new name+value. */ - size_t size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len); - void *val = xs->base + min_offs - size; + return ocfs2_xa_check_space_helper(needed_space, free_start, count); +} + +static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc) +{ + le16_add_cpu(&loc->xl_header->xh_name_value_len, + -namevalue_size_xe(loc->xl_entry)); +} - xs->here->xe_name_offset = cpu_to_le16(min_offs - size); - memset(val, 0, size); - memcpy(val, xi->name, name_len); - memcpy(val + OCFS2_XATTR_SIZE(name_len), - xi->value, - xi->value_len); - xs->here->xe_value_size = cpu_to_le64(xi->value_len); - ocfs2_xattr_set_local(xs->here, 1); - ocfs2_xattr_hash_entry(inode, xs->header, xs->here); +static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + struct ocfs2_xattr_header *xh = loc->xl_header; + int count = le16_to_cpu(xh->xh_count); + int low = 0, high = count - 1, tmp; + struct ocfs2_xattr_entry *tmp_xe; + + /* + * We keep buckets sorted by name_hash, so we need to find + * our insert place. + */ + while (low <= high && count) { + tmp = (low + high) / 2; + tmp_xe = &xh->xh_entries[tmp]; + + if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash)) + low = tmp + 1; + else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash)) + high = tmp - 1; + else { + low = tmp; + break; + } } - return; + if (low != count) + memmove(&xh->xh_entries[low + 1], + &xh->xh_entries[low], + ((count - low) * sizeof(struct ocfs2_xattr_entry))); + + le16_add_cpu(&xh->xh_count, 1); + loc->xl_entry = &xh->xh_entries[low]; + memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry)); +} + +static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size) +{ + int free_start = ocfs2_xa_get_free_start(loc); + struct ocfs2_xattr_header *xh = loc->xl_header; + struct super_block *sb = loc->xl_inode->i_sb; + int nameval_offset; + + free_start = ocfs2_bucket_align_free_start(sb, free_start, size); + nameval_offset = free_start - size; + loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset); + xh->xh_free_start = cpu_to_le16(nameval_offset); + le16_add_cpu(&xh->xh_name_value_len, size); + +} + +static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + struct super_block *sb = loc->xl_inode->i_sb; + int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + int size = namevalue_size_xe(loc->xl_entry); + int block_offset = nameval_offset >> sb->s_blocksize_bits; + + /* Values are not allowed to straddle block boundaries */ + BUG_ON(block_offset != + ((nameval_offset + size - 1) >> sb->s_blocksize_bits)); + /* We expect the bucket to be filled in */ + BUG_ON(!bucket->bu_bhs[block_offset]); + + vb->vb_access = ocfs2_journal_access; + vb->vb_bh = bucket->bu_bhs[block_offset]; +} + +/* Operations for xattrs stored in buckets. */ +static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = { + .xlo_journal_access = ocfs2_xa_bucket_journal_access, + .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty, + .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer, + .xlo_check_space = ocfs2_xa_bucket_check_space, + .xlo_can_reuse = ocfs2_xa_bucket_can_reuse, + .xlo_get_free_start = ocfs2_xa_bucket_get_free_start, + .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue, + .xlo_add_entry = ocfs2_xa_bucket_add_entry, + .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue, + .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf, +}; + +static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_value_buf vb; + + if (ocfs2_xattr_is_local(loc->xl_entry)) + return 0; + + ocfs2_xa_fill_value_buf(loc, &vb); + return le32_to_cpu(vb.vb_xv->xr_clusters); +} + +static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int trunc_rc, access_rc; + struct ocfs2_xattr_value_buf vb; + + ocfs2_xa_fill_value_buf(loc, &vb); + trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes, + ctxt); + + /* + * The caller of ocfs2_xa_value_truncate() has already called + * ocfs2_xa_journal_access on the loc. However, The truncate code + * calls ocfs2_extend_trans(). This may commit the previous + * transaction and open a new one. If this is a bucket, truncate + * could leave only vb->vb_bh set up for journaling. Meanwhile, + * the caller is expecting to dirty the entire bucket. So we must + * reset the journal work. We do this even if truncate has failed, + * as it could have failed after committing the extend. + */ + access_rc = ocfs2_xa_journal_access(ctxt->handle, loc, + OCFS2_JOURNAL_ACCESS_WRITE); + + /* Errors in truncate take precedence */ + return trunc_rc ? trunc_rc : access_rc; +} + +static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc) +{ + int index, count; + struct ocfs2_xattr_header *xh = loc->xl_header; + struct ocfs2_xattr_entry *entry = loc->xl_entry; + + ocfs2_xa_wipe_namevalue(loc); + loc->xl_entry = NULL; + + le16_add_cpu(&xh->xh_count, -1); + count = le16_to_cpu(xh->xh_count); + + /* + * Only zero out the entry if there are more remaining. This is + * important for an empty bucket, as it keeps track of the + * bucket's hash value. It doesn't hurt empty block storage. + */ + if (count) { + index = ((char *)entry - (char *)&xh->xh_entries) / + sizeof(struct ocfs2_xattr_entry); + memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1], + (count - index) * sizeof(struct ocfs2_xattr_entry)); + memset(&xh->xh_entries[count], 0, + sizeof(struct ocfs2_xattr_entry)); + } } /* - * ocfs2_xattr_set_entry() + * If we have a problem adjusting the size of an external value during + * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr + * in an intermediate state. For example, the value may be partially + * truncated. + * + * If the value tree hasn't changed, the extend/truncate went nowhere. + * We have nothing to do. The caller can treat it as a straight error. * - * Set extended attribute entry into inode or block. + * If the value tree got partially truncated, we now have a corrupted + * extended attribute. We're going to wipe its entry and leak the + * clusters. Better to leak some storage than leave a corrupt entry. * - * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE, - * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(), - * then set value in B tree with set_value_outside(). + * If the value tree grew, it obviously didn't grow enough for the + * new entry. We're not going to try and reclaim those clusters either. + * If there was already an external value there (orig_clusters != 0), + * the new clusters are attached safely and we can just leave the old + * value in place. If there was no external value there, we remove + * the entry. + * + * This way, the xattr block we store in the journal will be consistent. + * If the size change broke because of the journal, no changes will hit + * disk anyway. */ -static int ocfs2_xattr_set_entry(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt, - int flag) -{ - struct ocfs2_xattr_entry *last; - struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; - size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name); - size_t size_l = 0; - handle_t *handle = ctxt->handle; - int free, i, ret; - struct ocfs2_xattr_info xi_l = { - .name_index = xi->name_index, - .name = xi->name, - .value = xi->value, - .value_len = xi->value_len, - }; - struct ocfs2_xattr_value_buf vb = { - .vb_bh = xs->xattr_bh, - .vb_access = ocfs2_journal_access_di, - }; +static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc, + const char *what, + unsigned int orig_clusters) +{ + unsigned int new_clusters = ocfs2_xa_value_clusters(loc); + char *nameval_buf = ocfs2_xa_offset_pointer(loc, + le16_to_cpu(loc->xl_entry->xe_name_offset)); + + if (new_clusters < orig_clusters) { + mlog(ML_ERROR, + "Partial truncate while %s xattr %.*s. Leaking " + "%u clusters and removing the entry\n", + what, loc->xl_entry->xe_name_len, nameval_buf, + orig_clusters - new_clusters); + ocfs2_xa_remove_entry(loc); + } else if (!orig_clusters) { + mlog(ML_ERROR, + "Unable to allocate an external value for xattr " + "%.*s safely. Leaking %u clusters and removing the " + "entry\n", + loc->xl_entry->xe_name_len, nameval_buf, + new_clusters - orig_clusters); + ocfs2_xa_remove_entry(loc); + } else if (new_clusters > orig_clusters) + mlog(ML_ERROR, + "Unable to grow xattr %.*s safely. %u new clusters " + "have been added, but the value will not be " + "modified\n", + loc->xl_entry->xe_name_len, nameval_buf, + new_clusters - orig_clusters); +} + +static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + unsigned int orig_clusters; + + if (!ocfs2_xattr_is_local(loc->xl_entry)) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc) { + mlog_errno(rc); + /* + * Since this is remove, we can return 0 if + * ocfs2_xa_cleanup_value_truncate() is going to + * wipe the entry anyway. So we check the + * cluster count as well. + */ + if (orig_clusters != ocfs2_xa_value_clusters(loc)) + rc = 0; + ocfs2_xa_cleanup_value_truncate(loc, "removing", + orig_clusters); + if (rc) + goto out; + } + } - if (!(flag & OCFS2_INLINE_XATTR_FL)) { - BUG_ON(xs->xattr_bh == xs->inode_bh); - vb.vb_access = ocfs2_journal_access_xb; - } else - BUG_ON(xs->xattr_bh != xs->inode_bh); + ocfs2_xa_remove_entry(loc); - /* Compute min_offs, last and free space. */ - last = xs->header->xh_entries; +out: + return rc; +} - for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { - size_t offs = le16_to_cpu(last->xe_name_offset); - if (offs < min_offs) - min_offs = offs; - last += 1; - } +static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc) +{ + int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len); + char *nameval_buf; - free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP; - if (free < 0) - return -EIO; + nameval_buf = ocfs2_xa_offset_pointer(loc, + le16_to_cpu(loc->xl_entry->xe_name_offset)); + memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE); +} - if (!xs->not_found) { - size_t size = 0; - if (ocfs2_xattr_is_local(xs->here)) - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); - else - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_ROOT_SIZE; - free += (size + sizeof(struct ocfs2_xattr_entry)); - } - /* Check free space in inode or block */ - if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { - if (free < sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_ROOT_SIZE) { - ret = -ENOSPC; - goto out; +/* + * Take an existing entry and make it ready for the new value. This + * won't allocate space, but it may free space. It should be ready for + * ocfs2_xa_prepare_entry() to finish the work. + */ +static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); + unsigned int orig_clusters; + char *nameval_buf; + int xe_local = ocfs2_xattr_is_local(loc->xl_entry); + int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE; + + BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) != + name_size); + + nameval_buf = ocfs2_xa_offset_pointer(loc, + le16_to_cpu(loc->xl_entry->xe_name_offset)); + if (xe_local) { + memset(nameval_buf + name_size, 0, + namevalue_size_xe(loc->xl_entry) - name_size); + if (!xi_local) + ocfs2_xa_install_value_root(loc); + } else { + orig_clusters = ocfs2_xa_value_clusters(loc); + if (xi_local) { + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc < 0) + mlog_errno(rc); + else + memset(nameval_buf + name_size, 0, + namevalue_size_xe(loc->xl_entry) - + name_size); + } else if (le64_to_cpu(loc->xl_entry->xe_value_size) > + xi->xi_value_len) { + rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, + ctxt); + if (rc < 0) + mlog_errno(rc); } - size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - xi_l.value = (void *)&def_xv; - xi_l.value_len = OCFS2_XATTR_ROOT_SIZE; - } else if (xi->value) { - if (free < sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len)) { - ret = -ENOSPC; + + if (rc) { + ocfs2_xa_cleanup_value_truncate(loc, "reusing", + orig_clusters); goto out; } } - if (!xs->not_found) { - /* For existing extended attribute */ - size_t size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); - size_t offs = le16_to_cpu(xs->here->xe_name_offset); - void *val = xs->base + offs; + loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len); + ocfs2_xattr_set_local(loc->xl_entry, xi_local); - if (ocfs2_xattr_is_local(xs->here) && size == size_l) { - /* Replace existing local xattr with tree root */ - ret = ocfs2_xattr_set_value_outside(inode, xi, xs, - ctxt, &vb, offs); - if (ret < 0) - mlog_errno(ret); - goto out; - } else if (!ocfs2_xattr_is_local(xs->here)) { - /* For existing xattr which has value outside */ - vb.vb_xv = (struct ocfs2_xattr_value_root *) - (val + OCFS2_XATTR_SIZE(name_len)); +out: + return rc; +} - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { - /* - * If new value need set outside also, - * first truncate old value to new value, - * then set new value with set_value_outside(). - */ - ret = ocfs2_xattr_value_truncate(inode, - &vb, - xi->value_len, - ctxt); - if (ret < 0) { - mlog_errno(ret); - goto out; - } +/* + * Prepares loc->xl_entry to receive the new xattr. This includes + * properly setting up the name+value pair region. If loc->xl_entry + * already exists, it will take care of modifying it appropriately. + * + * Note that this modifies the data. You did journal_access already, + * right? + */ +static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + u32 name_hash, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + unsigned int orig_clusters; + __le64 orig_value_size = 0; - ret = ocfs2_xattr_update_entry(inode, - handle, - xi, - xs, - &vb, - offs); - if (ret < 0) { - mlog_errno(ret); - goto out; - } + rc = ocfs2_xa_check_space(loc, xi); + if (rc) + goto out; - ret = __ocfs2_xattr_set_value_outside(inode, - handle, - &vb, - xi->value, - xi->value_len); - if (ret < 0) - mlog_errno(ret); + if (loc->xl_entry) { + if (ocfs2_xa_can_reuse_entry(loc, xi)) { + orig_value_size = loc->xl_entry->xe_value_size; + rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); + if (rc) + goto out; + goto alloc_value; + } + + if (!ocfs2_xattr_is_local(loc->xl_entry)) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc) { + mlog_errno(rc); + ocfs2_xa_cleanup_value_truncate(loc, + "overwriting", + orig_clusters); goto out; - } else { - /* - * If new value need set in local, - * just trucate old value to zero. - */ - ret = ocfs2_xattr_value_truncate(inode, - &vb, - 0, - ctxt); - if (ret < 0) - mlog_errno(ret); } } + ocfs2_xa_wipe_namevalue(loc); + } else + ocfs2_xa_add_entry(loc, name_hash); + + /* + * If we get here, we have a blank entry. Fill it. We grow our + * name+value pair back from the end. + */ + ocfs2_xa_add_namevalue(loc, xi); + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) + ocfs2_xa_install_value_root(loc); + +alloc_value: + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); + if (rc < 0) { + /* + * If we tried to grow an existing external value, + * ocfs2_xa_cleanuP-value_truncate() is going to + * let it stand. We have to restore its original + * value size. + */ + loc->xl_entry->xe_value_size = orig_value_size; + ocfs2_xa_cleanup_value_truncate(loc, "growing", + orig_clusters); + mlog_errno(rc); + } } - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh, +out: + return rc; +} + +/* + * Store the value portion of the name+value pair. This will skip + * values that are stored externally. Their tree roots were set up + * by ocfs2_xa_prepare_entry(). + */ +static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); + char *nameval_buf; + struct ocfs2_xattr_value_buf vb; + + nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset); + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { + ocfs2_xa_fill_value_buf(loc, &vb); + rc = __ocfs2_xattr_set_value_outside(loc->xl_inode, + ctxt->handle, &vb, + xi->xi_value, + xi->xi_value_len); + } else + memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len); + + return rc; +} + +static int ocfs2_xa_set(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret; + u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name, + xi->xi_name_len); + + ret = ocfs2_xa_journal_access(ctxt->handle, loc, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - if (!(flag & OCFS2_INLINE_XATTR_FL)) { - ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - } - /* - * Set value in local, include set tree root in local. - * This is the first step for value size >INLINE_SIZE. + * From here on out, everything is going to modify the buffer a + * little. Errors are going to leave the xattr header in a + * sane state. Thus, even with errors we dirty the sucker. */ - ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs); - if (!(flag & OCFS2_INLINE_XATTR_FL)) { - ret = ocfs2_journal_dirty(handle, xs->xattr_bh); - if (ret < 0) { - mlog_errno(ret); - goto out; - } + /* Don't worry, we are never called with !xi_value and !xl_entry */ + if (!xi->xi_value) { + ret = ocfs2_xa_remove(loc, ctxt); + goto out_dirty; } - if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) && - (flag & OCFS2_INLINE_XATTR_FL)) { - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - unsigned int xattrsize = osb->s_xattr_inline_size; - - /* - * Adjust extent record count or inline data size - * to reserve space for extended attribute. - */ - if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - struct ocfs2_inline_data *idata = &di->id2.i_data; - le16_add_cpu(&idata->id_count, -xattrsize); - } else if (!(ocfs2_inode_is_fast_symlink(inode))) { - struct ocfs2_extent_list *el = &di->id2.i_list; - le16_add_cpu(&el->l_count, -(xattrsize / - sizeof(struct ocfs2_extent_rec))); - } - di->i_xattr_inline_size = cpu_to_le16(xattrsize); + ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out_dirty; } - /* Update xattr flag */ - spin_lock(&oi->ip_lock); - oi->ip_dyn_features |= flag; - di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); - spin_unlock(&oi->ip_lock); - ret = ocfs2_journal_dirty(handle, xs->inode_bh); - if (ret < 0) + ret = ocfs2_xa_store_value(loc, xi, ctxt); + if (ret) mlog_errno(ret); - if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { - /* - * Set value outside in B tree. - * This is the second step for value size > INLINE_SIZE. - */ - size_t offs = le16_to_cpu(xs->here->xe_name_offset); - ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt, - &vb, offs); - if (ret < 0) { - int ret2; +out_dirty: + ocfs2_xa_journal_dirty(ctxt->handle, loc); - mlog_errno(ret); - /* - * If set value outside failed, we have to clean - * the junk tree root we have already set in local. - */ - ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle, - xi, xs, &vb, offs); - if (ret2 < 0) - mlog_errno(ret2); - } - } out: return ret; } +static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_xattr_entry *entry) +{ + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + + BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL)); + + loc->xl_inode = inode; + loc->xl_ops = &ocfs2_xa_block_loc_ops; + loc->xl_storage = bh; + loc->xl_entry = entry; + loc->xl_size = le16_to_cpu(di->i_xattr_inline_size); + loc->xl_header = + (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size - + loc->xl_size); +} + +static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_xattr_entry *entry) +{ + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)bh->b_data; + + BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED); + + loc->xl_inode = inode; + loc->xl_ops = &ocfs2_xa_block_loc_ops; + loc->xl_storage = bh; + loc->xl_header = &(xb->xb_attrs.xb_header); + loc->xl_entry = entry; + loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_header); +} + +static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_bucket *bucket, + struct ocfs2_xattr_entry *entry) +{ + loc->xl_inode = bucket->bu_inode; + loc->xl_ops = &ocfs2_xa_bucket_loc_ops; + loc->xl_storage = bucket; + loc->xl_header = bucket_xh(bucket); + loc->xl_entry = entry; + loc->xl_size = OCFS2_XATTR_BUCKET_SIZE; +} + /* * In xattr remove, if it is stored outside and refcounted, we may have * the chance to split the refcount tree. So need the allocators. @@ -2149,6 +2683,55 @@ static int ocfs2_xattr_ibody_find(struct inode *inode, return 0; } +static int ocfs2_xattr_ibody_init(struct inode *inode, + struct buffer_head *di_bh, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int xattrsize = osb->s_xattr_inline_size; + + if (!ocfs2_xattr_has_space_inline(inode, di)) { + ret = -ENOSPC; + goto out; + } + + ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Adjust extent record count or inline data size + * to reserve space for extended attribute. + */ + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + struct ocfs2_inline_data *idata = &di->id2.i_data; + le16_add_cpu(&idata->id_count, -xattrsize); + } else if (!(ocfs2_inode_is_fast_symlink(inode))) { + struct ocfs2_extent_list *el = &di->id2.i_list; + le16_add_cpu(&el->l_count, -(xattrsize / + sizeof(struct ocfs2_extent_rec))); + } + di->i_xattr_inline_size = cpu_to_le16(xattrsize); + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + + ret = ocfs2_journal_dirty(ctxt->handle, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + /* * ocfs2_xattr_ibody_set() * @@ -2160,9 +2743,10 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { + int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; - int ret; + struct ocfs2_xa_loc loc; if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) return -ENOSPC; @@ -2175,8 +2759,25 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, } } - ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, - (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL)); + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { + ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + } + + ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh, + xs->not_found ? NULL : xs->here); + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + xs->here = loc.xl_entry; + out: up_write(&oi->ip_alloc_sem); @@ -2236,12 +2837,11 @@ cleanup: return ret; } -static int ocfs2_create_xattr_block(handle_t *handle, - struct inode *inode, +static int ocfs2_create_xattr_block(struct inode *inode, struct buffer_head *inode_bh, - struct ocfs2_alloc_context *meta_ac, - struct buffer_head **ret_bh, - int indexed) + struct ocfs2_xattr_set_ctxt *ctxt, + int indexed, + struct buffer_head **ret_bh) { int ret; u16 suballoc_bit_start; @@ -2252,14 +2852,14 @@ static int ocfs2_create_xattr_block(handle_t *handle, struct buffer_head *new_bh = NULL; struct ocfs2_xattr_block *xblk; - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), + inode_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto end; } - ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, + ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1, &suballoc_bit_start, &num_got, &first_blkno); if (ret < 0) { @@ -2270,7 +2870,7 @@ static int ocfs2_create_xattr_block(handle_t *handle, new_bh = sb_getblk(inode->i_sb, first_blkno); ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); - ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), + ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { @@ -2282,11 +2882,10 @@ static int ocfs2_create_xattr_block(handle_t *handle, xblk = (struct ocfs2_xattr_block *)new_bh->b_data; memset(xblk, 0, inode->i_sb->s_blocksize); strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); - xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num); + xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot); xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation); xblk->xb_blkno = cpu_to_le64(first_blkno); - if (indexed) { struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root; xr->xt_clusters = cpu_to_le32(1); @@ -2297,14 +2896,17 @@ static int ocfs2_create_xattr_block(handle_t *handle, xr->xt_list.l_next_free_rec = cpu_to_le16(1); xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED); } + ocfs2_journal_dirty(ctxt->handle, new_bh); - ret = ocfs2_journal_dirty(handle, new_bh); - if (ret < 0) { - mlog_errno(ret); - goto end; - } + /* Add it to the inode */ di->i_xattr_loc = cpu_to_le64(first_blkno); - ocfs2_journal_dirty(handle, inode_bh); + + spin_lock(&OCFS2_I(inode)->ip_lock); + OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL; + di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features); + spin_unlock(&OCFS2_I(inode)->ip_lock); + + ocfs2_journal_dirty(ctxt->handle, inode_bh); *ret_bh = new_bh; new_bh = NULL; @@ -2326,13 +2928,13 @@ static int ocfs2_xattr_block_set(struct inode *inode, struct ocfs2_xattr_set_ctxt *ctxt) { struct buffer_head *new_bh = NULL; - handle_t *handle = ctxt->handle; struct ocfs2_xattr_block *xblk = NULL; int ret; + struct ocfs2_xa_loc loc; if (!xs->xattr_bh) { - ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh, - ctxt->meta_ac, &new_bh, 0); + ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt, + 0, &new_bh); if (ret) { mlog_errno(ret); goto end; @@ -2348,21 +2950,25 @@ static int ocfs2_xattr_block_set(struct inode *inode, xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) { - /* Set extended attribute into external block */ - ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, - OCFS2_HAS_XATTR_FL); - if (!ret || ret != -ENOSPC) - goto end; + ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh, + xs->not_found ? NULL : xs->here); - ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); - if (ret) + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (!ret) + xs->here = loc.xl_entry; + else if (ret != -ENOSPC) goto end; + else { + ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); + if (ret) + goto end; + } } - ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); + if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED) + ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); end: - return ret; } @@ -2371,7 +2977,6 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs) { - u64 value_size; struct ocfs2_xattr_entry *last; int free, i; size_t min_offs = xs->end - xs->base; @@ -2394,13 +2999,7 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode, BUG_ON(!xs->not_found); - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) - value_size = OCFS2_XATTR_ROOT_SIZE; - else - value_size = OCFS2_XATTR_SIZE(xi->value_len); - - if (free >= sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size) + if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi))) return 1; return 0; @@ -2424,7 +3023,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, char *base = NULL; int name_offset, name_len = 0; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, - xi->value_len); + xi->xi_value_len); u64 value_size; /* @@ -2432,14 +3031,14 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, * No matter whether we replace an old one or add a new one, * we need this for writing. */ - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) credits += new_clusters * ocfs2_clusters_to_blocks(inode->i_sb, 1); if (xis->not_found && xbs->not_found) { credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { clusters_add += new_clusters; credits += ocfs2_calc_extend_credits(inode->i_sb, &def_xv.xv.xr_list, @@ -2484,7 +3083,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, * The credits for removing the value tree will be extended * by ocfs2_remove_extent itself. */ - if (!xi->value) { + if (!xi->xi_value) { if (!ocfs2_xattr_is_local(xe)) credits += ocfs2_remove_extent_credits(inode->i_sb); @@ -2514,7 +3113,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, } } - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { /* the new values will be stored outside. */ u32 old_clusters = 0; @@ -2547,9 +3146,10 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, * value, we don't need any allocation, otherwise we have * to guess metadata allocation. */ - if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) || + if ((ocfs2_xattr_is_local(xe) && + (value_size >= xi->xi_value_len)) || (!ocfs2_xattr_is_local(xe) && - OCFS2_XATTR_ROOT_SIZE >= xi->value_len)) + OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len)) goto out; } @@ -2639,7 +3239,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, meta_add += extra_meta; mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " - "credits = %d\n", xi->name, meta_add, clusters_add, *credits); + "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits); if (meta_add) { ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, @@ -2679,7 +3279,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, { int ret = 0, credits, old_found; - if (!xi->value) { + if (!xi->xi_value) { /* Remove existing extended attribute */ if (!xis->not_found) ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); @@ -2693,8 +3293,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, * If succeed and that extended attribute existing in * external block, then we will remove it. */ - xi->value = NULL; - xi->value_len = 0; + xi->xi_value = NULL; + xi->xi_value_len = 0; old_found = xis->not_found; xis->not_found = -ENODATA; @@ -2722,8 +3322,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, } else if (ret == -ENOSPC) { if (di->i_xattr_loc && !xbs->xattr_bh) { ret = ocfs2_xattr_block_find(inode, - xi->name_index, - xi->name, xbs); + xi->xi_name_index, + xi->xi_name, xbs); if (ret) goto out; @@ -2762,8 +3362,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, * If succeed and that extended attribute * existing in inode, we will remove it. */ - xi->value = NULL; - xi->value_len = 0; + xi->xi_value = NULL; + xi->xi_value_len = 0; xbs->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, @@ -2829,10 +3429,11 @@ int ocfs2_xattr_set_handle(handle_t *handle, int ret; struct ocfs2_xattr_info xi = { - .name_index = name_index, - .name = name, - .value = value, - .value_len = value_len, + .xi_name_index = name_index, + .xi_name = name, + .xi_name_len = strlen(name), + .xi_value = value, + .xi_value_len = value_len, }; struct ocfs2_xattr_search xis = { @@ -2912,10 +3513,11 @@ int ocfs2_xattr_set(struct inode *inode, struct ocfs2_refcount_tree *ref_tree = NULL; struct ocfs2_xattr_info xi = { - .name_index = name_index, - .name = name, - .value = value, - .value_len = value_len, + .xi_name_index = name_index, + .xi_name = name, + .xi_name_len = strlen(name), + .xi_value = value, + .xi_value_len = value_len, }; struct ocfs2_xattr_search xis = { @@ -3759,7 +4361,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket) { int ret, i; - size_t end, offset, len, value_len; + size_t end, offset, len; struct ocfs2_xattr_header *xh; char *entries, *buf, *bucket_buf = NULL; u64 blkno = bucket_blkno(bucket); @@ -3813,12 +4415,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, end = OCFS2_XATTR_BUCKET_SIZE; for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) { offset = le16_to_cpu(xe->xe_name_offset); - if (ocfs2_xattr_is_local(xe)) - value_len = OCFS2_XATTR_SIZE( - le64_to_cpu(xe->xe_value_size)); - else - value_len = OCFS2_XATTR_ROOT_SIZE; - len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len; + len = namevalue_size_xe(xe); /* * We must make sure that the name/value pair @@ -4007,7 +4604,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, int new_bucket_head) { int ret, i; - int count, start, len, name_value_len = 0, xe_len, name_offset = 0; + int count, start, len, name_value_len = 0, name_offset = 0; struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; @@ -4098,13 +4695,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, name_value_len = 0; for (i = 0; i < start; i++) { xe = &xh->xh_entries[i]; - xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); - if (ocfs2_xattr_is_local(xe)) - xe_len += - OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - xe_len += OCFS2_XATTR_ROOT_SIZE; - name_value_len += xe_len; + name_value_len += namevalue_size_xe(xe); if (le16_to_cpu(xe->xe_name_offset) < name_offset) name_offset = le16_to_cpu(xe->xe_name_offset); } @@ -4134,12 +4725,6 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; - xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); - if (ocfs2_xattr_is_local(xe)) - xe_len += - OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - xe_len += OCFS2_XATTR_ROOT_SIZE; if (le16_to_cpu(xe->xe_name_offset) < le16_to_cpu(xh->xh_free_start)) xh->xh_free_start = xe->xe_name_offset; @@ -4751,195 +5336,6 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode, } /* - * Handle the normal xattr set, including replace, delete and new. - * - * Note: "local" indicates the real data's locality. So we can't - * just its bucket locality by its length. - */ -static void ocfs2_xattr_set_entry_normal(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - u32 name_hash, - int local) -{ - struct ocfs2_xattr_entry *last, *xe; - int name_len = strlen(xi->name); - struct ocfs2_xattr_header *xh = xs->header; - u16 count = le16_to_cpu(xh->xh_count), start; - size_t blocksize = inode->i_sb->s_blocksize; - char *val; - size_t offs, size, new_size; - - last = &xh->xh_entries[count]; - if (!xs->not_found) { - xe = xs->here; - offs = le16_to_cpu(xe->xe_name_offset); - if (ocfs2_xattr_is_local(xe)) - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE); - - /* - * If the new value will be stored outside, xi->value has been - * initalized as an empty ocfs2_xattr_value_root, and the same - * goes with xi->value_len, so we can set new_size safely here. - * See ocfs2_xattr_set_in_bucket. - */ - new_size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len); - - le16_add_cpu(&xh->xh_name_value_len, -size); - if (xi->value) { - if (new_size > size) - goto set_new_name_value; - - /* Now replace the old value with new one. */ - if (local) - xe->xe_value_size = cpu_to_le64(xi->value_len); - else - xe->xe_value_size = 0; - - val = ocfs2_xattr_bucket_get_val(inode, - xs->bucket, offs); - memset(val + OCFS2_XATTR_SIZE(name_len), 0, - size - OCFS2_XATTR_SIZE(name_len)); - if (OCFS2_XATTR_SIZE(xi->value_len) > 0) - memcpy(val + OCFS2_XATTR_SIZE(name_len), - xi->value, xi->value_len); - - le16_add_cpu(&xh->xh_name_value_len, new_size); - ocfs2_xattr_set_local(xe, local); - return; - } else { - /* - * Remove the old entry if there is more than one. - * We don't remove the last entry so that we can - * use it to indicate the hash value of the empty - * bucket. - */ - last -= 1; - le16_add_cpu(&xh->xh_count, -1); - if (xh->xh_count) { - memmove(xe, xe + 1, - (void *)last - (void *)xe); - memset(last, 0, - sizeof(struct ocfs2_xattr_entry)); - } else - xh->xh_free_start = - cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); - - return; - } - } else { - /* find a new entry for insert. */ - int low = 0, high = count - 1, tmp; - struct ocfs2_xattr_entry *tmp_xe; - - while (low <= high && count) { - tmp = (low + high) / 2; - tmp_xe = &xh->xh_entries[tmp]; - - if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash)) - low = tmp + 1; - else if (name_hash < - le32_to_cpu(tmp_xe->xe_name_hash)) - high = tmp - 1; - else { - low = tmp; - break; - } - } - - xe = &xh->xh_entries[low]; - if (low != count) - memmove(xe + 1, xe, (void *)last - (void *)xe); - - le16_add_cpu(&xh->xh_count, 1); - memset(xe, 0, sizeof(struct ocfs2_xattr_entry)); - xe->xe_name_hash = cpu_to_le32(name_hash); - xe->xe_name_len = name_len; - ocfs2_xattr_set_type(xe, xi->name_index); - } - -set_new_name_value: - /* Insert the new name+value. */ - size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len); - - /* - * We must make sure that the name/value pair - * exists in the same block. - */ - offs = le16_to_cpu(xh->xh_free_start); - start = offs - size; - - if (start >> inode->i_sb->s_blocksize_bits != - (offs - 1) >> inode->i_sb->s_blocksize_bits) { - offs = offs - offs % blocksize; - xh->xh_free_start = cpu_to_le16(offs); - } - - val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size); - xe->xe_name_offset = cpu_to_le16(offs - size); - - memset(val, 0, size); - memcpy(val, xi->name, name_len); - memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len); - - xe->xe_value_size = cpu_to_le64(xi->value_len); - ocfs2_xattr_set_local(xe, local); - xs->here = xe; - le16_add_cpu(&xh->xh_free_start, -size); - le16_add_cpu(&xh->xh_name_value_len, size); - - return; -} - -/* - * Set the xattr entry in the specified bucket. - * The bucket is indicated by xs->bucket and it should have the enough - * space for the xattr insertion. - */ -static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - u32 name_hash, - int local) -{ - int ret; - u64 blkno; - - mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", - (unsigned long)xi->value_len, xi->name_index, - (unsigned long long)bucket_blkno(xs->bucket)); - - if (!xs->bucket->bu_bhs[1]) { - blkno = bucket_blkno(xs->bucket); - ocfs2_xattr_bucket_relse(xs->bucket); - ret = ocfs2_read_xattr_bucket(xs->bucket, blkno); - if (ret) { - mlog_errno(ret); - goto out; - } - } - - ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local); - ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); - -out: - return ret; -} - -/* * Truncate the specified xe_off entry in xattr bucket. * bucket is indicated by header_bh and len is the new length. * Both the ocfs2_xattr_value_root and the entry will be updated here. @@ -5009,66 +5405,6 @@ out: return ret; } -static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, - struct ocfs2_xattr_search *xs, - int len, - struct ocfs2_xattr_set_ctxt *ctxt) -{ - int ret, offset; - struct ocfs2_xattr_entry *xe = xs->here; - struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base; - - BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe)); - - offset = xe - xh->xh_entries; - ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket, - offset, len, ctxt); - if (ret) - mlog_errno(ret); - - return ret; -} - -static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_search *xs, - char *val, - int value_len) -{ - int ret, offset, block_off; - struct ocfs2_xattr_value_root *xv; - struct ocfs2_xattr_entry *xe = xs->here; - struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); - void *base; - struct ocfs2_xattr_value_buf vb = { - .vb_access = ocfs2_journal_access, - }; - - BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe)); - - ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh, - xe - xh->xh_entries, - &block_off, - &offset); - if (ret) { - mlog_errno(ret); - goto out; - } - - base = bucket_block(xs->bucket, block_off); - xv = (struct ocfs2_xattr_value_root *)(base + offset + - OCFS2_XATTR_SIZE(xe->xe_name_len)); - - vb.vb_xv = xv; - vb.vb_bh = xs->bucket->bu_bhs[block_off]; - ret = __ocfs2_xattr_set_value_outside(inode, handle, - &vb, val, value_len); - if (ret) - mlog_errno(ret); -out: - return ret; -} - static int ocfs2_rm_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, u64 blkno, @@ -5167,128 +5503,6 @@ out: return ret; } -static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_search *xs) -{ - struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); - struct ocfs2_xattr_entry *last = &xh->xh_entries[ - le16_to_cpu(xh->xh_count) - 1]; - int ret = 0; - - ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - return; - } - - /* Remove the old entry. */ - memmove(xs->here, xs->here + 1, - (void *)last - (void *)xs->here); - memset(last, 0, sizeof(struct ocfs2_xattr_entry)); - le16_add_cpu(&xh->xh_count, -1); - - ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); -} - -/* - * Set the xattr name/value in the bucket specified in xs. - * - * As the new value in xi may be stored in the bucket or in an outside cluster, - * we divide the whole process into 3 steps: - * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket) - * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs) - * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside) - * 4. If the clusters for the new outside value can't be allocated, we need - * to free the xattr we allocated in set. - */ -static int ocfs2_xattr_set_in_bucket(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt) -{ - int ret, local = 1; - size_t value_len; - char *val = (char *)xi->value; - struct ocfs2_xattr_entry *xe = xs->here; - u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name, - strlen(xi->name)); - - if (!xs->not_found && !ocfs2_xattr_is_local(xe)) { - /* - * We need to truncate the xattr storage first. - * - * If both the old and new value are stored to - * outside block, we only need to truncate - * the storage and then set the value outside. - * - * If the new value should be stored within block, - * we should free all the outside block first and - * the modification to the xattr block will be done - * by following steps. - */ - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) - value_len = xi->value_len; - else - value_len = 0; - - ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, - value_len, - ctxt); - if (ret) - goto out; - - if (value_len) - goto set_value_outside; - } - - value_len = xi->value_len; - /* So we have to handle the inside block change now. */ - if (value_len > OCFS2_XATTR_INLINE_SIZE) { - /* - * If the new value will be stored outside of block, - * initalize a new empty value root and insert it first. - */ - local = 0; - xi->value = &def_xv; - xi->value_len = OCFS2_XATTR_ROOT_SIZE; - } - - ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs, - name_hash, local); - if (ret) { - mlog_errno(ret); - goto out; - } - - if (value_len <= OCFS2_XATTR_INLINE_SIZE) - goto out; - - /* allocate the space now for the outside block storage. */ - ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, - value_len, ctxt); - if (ret) { - mlog_errno(ret); - - if (xs->not_found) { - /* - * We can't allocate enough clusters for outside - * storage and we have allocated xattr already, - * so need to remove it. - */ - ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs); - } - goto out; - } - -set_value_outside: - ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle, - xs, val, value_len); -out: - return ret; -} - /* * check whether the xattr bucket is filled up with the same hash value. * If we want to insert the xattr with the same hash, return -ENOSPC. @@ -5317,156 +5531,116 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, return 0; } -static int ocfs2_xattr_set_entry_index_block(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt) +/* + * Try to set the entry in the current bucket. If we fail, the caller + * will handle getting us another bucket. + */ +static int ocfs2_xattr_set_entry_bucket(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { - struct ocfs2_xattr_header *xh; - struct ocfs2_xattr_entry *xe; - u16 count, header_size, xh_free_start; - int free, max_free, need, old; - size_t value_size = 0, name_len = strlen(xi->name); - size_t blocksize = inode->i_sb->s_blocksize; - int ret, allocation = 0; - - mlog_entry("Set xattr %s in xattr index block\n", xi->name); - -try_again: - xh = xs->header; - count = le16_to_cpu(xh->xh_count); - xh_free_start = le16_to_cpu(xh->xh_free_start); - header_size = sizeof(struct ocfs2_xattr_header) + - count * sizeof(struct ocfs2_xattr_entry); - max_free = OCFS2_XATTR_BUCKET_SIZE - header_size - - le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP; - - mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " - "of %u which exceed block size\n", - (unsigned long long)bucket_blkno(xs->bucket), - header_size); + int ret; + struct ocfs2_xa_loc loc; - if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) - value_size = OCFS2_XATTR_ROOT_SIZE; - else if (xi->value) - value_size = OCFS2_XATTR_SIZE(xi->value_len); + mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name); - if (xs->not_found) - need = sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(name_len) + value_size; - else { - need = value_size + OCFS2_XATTR_SIZE(name_len); + ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, + xs->not_found ? NULL : xs->here); + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (!ret) { + xs->here = loc.xl_entry; + goto out; + } + if (ret != -ENOSPC) { + mlog_errno(ret); + goto out; + } - /* - * We only replace the old value if the new length is smaller - * than the old one. Otherwise we will allocate new space in the - * bucket to store it. - */ - xe = xs->here; - if (ocfs2_xattr_is_local(xe)) - old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE); + /* Ok, we need space. Let's try defragmenting the bucket. */ + ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, + xs->bucket); + if (ret) { + mlog_errno(ret); + goto out; + } - if (old >= value_size) - need = 0; + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (!ret) { + xs->here = loc.xl_entry; + goto out; } + if (ret != -ENOSPC) + mlog_errno(ret); - free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP; - /* - * We need to make sure the new name/value pair - * can exist in the same block. - */ - if (xh_free_start % blocksize < need) - free -= xh_free_start % blocksize; - - mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, " - "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len =" - " %u\n", xs->not_found, - (unsigned long long)bucket_blkno(xs->bucket), - free, need, max_free, le16_to_cpu(xh->xh_free_start), - le16_to_cpu(xh->xh_name_value_len)); - - if (free < need || - (xs->not_found && - count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) { - if (need <= max_free && - count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) { - /* - * We can create the space by defragment. Since only the - * name/value will be moved, the xe shouldn't be changed - * in xs. - */ - ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, - xs->bucket); - if (ret) { - mlog_errno(ret); - goto out; - } - xh_free_start = le16_to_cpu(xh->xh_free_start); - free = xh_free_start - header_size - - OCFS2_XATTR_HEADER_GAP; - if (xh_free_start % blocksize < need) - free -= xh_free_start % blocksize; +out: + mlog_exit(ret); + return ret; +} - if (free >= need) - goto xattr_set; +static int ocfs2_xattr_set_entry_index_block(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret; - mlog(0, "Can't get enough space for xattr insert by " - "defragment. Need %u bytes, but we have %d, so " - "allocate new bucket for it.\n", need, free); - } + mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name); - /* - * We have to add new buckets or clusters and one - * allocation should leave us enough space for insert. - */ - BUG_ON(allocation); + ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); + if (!ret) + goto out; + if (ret != -ENOSPC) { + mlog_errno(ret); + goto out; + } - /* - * We do not allow for overlapping ranges between buckets. And - * the maximum number of collisions we will allow for then is - * one bucket's worth, so check it here whether we need to - * add a new bucket for the insert. - */ - ret = ocfs2_check_xattr_bucket_collision(inode, - xs->bucket, - xi->name); - if (ret) { - mlog_errno(ret); - goto out; - } + /* Ack, need more space. Let's try to get another bucket! */ - ret = ocfs2_add_new_xattr_bucket(inode, - xs->xattr_bh, + /* + * We do not allow for overlapping ranges between buckets. And + * the maximum number of collisions we will allow for then is + * one bucket's worth, so check it here whether we need to + * add a new bucket for the insert. + */ + ret = ocfs2_check_xattr_bucket_collision(inode, xs->bucket, - ctxt); - if (ret) { - mlog_errno(ret); - goto out; - } + xi->xi_name); + if (ret) { + mlog_errno(ret); + goto out; + } - /* - * ocfs2_add_new_xattr_bucket() will have updated - * xs->bucket if it moved, but it will not have updated - * any of the other search fields. Thus, we drop it and - * re-search. Everything should be cached, so it'll be - * quick. - */ - ocfs2_xattr_bucket_relse(xs->bucket); - ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, - xi->name_index, - xi->name, xs); - if (ret && ret != -ENODATA) - goto out; - xs->not_found = ret; - allocation = 1; - goto try_again; + ret = ocfs2_add_new_xattr_bucket(inode, + xs->xattr_bh, + xs->bucket, + ctxt); + if (ret) { + mlog_errno(ret); + goto out; } -xattr_set: - ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt); + /* + * ocfs2_add_new_xattr_bucket() will have updated + * xs->bucket if it moved, but it will not have updated + * any of the other search fields. Thus, we drop it and + * re-search. Everything should be cached, so it'll be + * quick. + */ + ocfs2_xattr_bucket_relse(xs->bucket); + ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, + xi->xi_name_index, + xi->xi_name, xs); + if (ret && ret != -ENODATA) + goto out; + xs->not_found = ret; + + /* Ok, we have a new bucket, let's try again */ + ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); + if (ret && (ret != -ENOSPC)) + mlog_errno(ret); + out: mlog_exit(ret); return ret; @@ -5678,7 +5852,7 @@ static int ocfs2_prepare_refcount_xattr(struct inode *inode, * refcount tree, and make the original extent become 3. So we will need * 2 * cluster more extent recs at most. */ - if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) { + if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) { ret = ocfs2_refcounted_xattr_delete_need(inode, &(*ref_tree)->rf_ci, @@ -6354,9 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, int indexed) { int ret; - handle_t *handle; struct ocfs2_alloc_context *meta_ac; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_xattr_set_ctxt ctxt = { + .meta_ac = meta_ac, + }; ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); if (ret < 0) { @@ -6364,21 +6540,21 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, return ret; } - handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); + ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); mlog_errno(ret); goto out; } mlog(0, "create new xattr block for inode %llu, index = %d\n", (unsigned long long)fe_bh->b_blocknr, indexed); - ret = ocfs2_create_xattr_block(handle, inode, fe_bh, - meta_ac, ret_bh, indexed); + ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, + ret_bh); if (ret) mlog_errno(ret); - ocfs2_commit_trans(osb, handle); + ocfs2_commit_trans(osb, ctxt.handle); out: ocfs2_free_alloc_context(meta_ac); return ret; diff --git a/fs/open.c b/fs/open.c index 040cef72bc0..e0b2d88b038 100644 --- a/fs/open.c +++ b/fs/open.c @@ -271,7 +271,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) * Make sure that there are no leases. get_write_access() protects * against the truncate racing with a lease-granting setlease(). */ - error = break_lease(inode, FMODE_WRITE); + error = break_lease(inode, O_WRONLY); if (error) goto put_write_and_out; diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 64bc8998ac9..e8865c11777 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -412,9 +412,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, pdev = part_to_dev(p); p->start_sect = start; - p->alignment_offset = queue_sector_alignment_offset(disk->queue, start); - p->discard_alignment = queue_sector_discard_alignment(disk->queue, - start); + p->alignment_offset = + queue_limit_alignment_offset(&disk->queue->limits, start); + p->discard_alignment = + queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; p->partno = partno; p->policy = get_disk_ro(disk); diff --git a/fs/pnode.c b/fs/pnode.c index 8d5f392ec3d..5cc564a8314 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -86,7 +86,7 @@ static int do_make_slave(struct vfsmount *mnt) /* * slave 'mnt' to a peer mount that has the - * same root dentry. If none is available than + * same root dentry. If none is available then * slave it to anything that is available. */ while ((peer_mnt = next_peer(peer_mnt)) != mnt && @@ -147,6 +147,11 @@ void change_mnt_propagation(struct vfsmount *mnt, int type) * get the next mount in the propagation tree. * @m: the mount seen last * @origin: the original mount from where the tree walk initiated + * + * Note that peer groups form contiguous segments of slave lists. + * We rely on that in get_source() to be able to find out if + * vfsmount found while iterating with propagation_next() is + * a peer of one we'd found earlier. */ static struct vfsmount *propagation_next(struct vfsmount *m, struct vfsmount *origin) @@ -186,10 +191,6 @@ static struct vfsmount *get_source(struct vfsmount *dest, { struct vfsmount *p_last_src = NULL; struct vfsmount *p_last_dest = NULL; - *type = CL_PROPAGATION; - - if (IS_MNT_SHARED(dest)) - *type |= CL_MAKE_SHARED; while (last_dest != dest->mnt_master) { p_last_dest = last_dest; @@ -202,13 +203,18 @@ static struct vfsmount *get_source(struct vfsmount *dest, do { p_last_dest = next_peer(p_last_dest); } while (IS_MNT_NEW(p_last_dest)); + /* is that a peer of the earlier? */ + if (dest == p_last_dest) { + *type = CL_MAKE_SHARED; + return p_last_src; + } } - - if (dest != p_last_dest) { - *type |= CL_SLAVE; - return last_src; - } else - return p_last_src; + /* slave of the earlier, then */ + *type = CL_SLAVE; + /* beginning of peer group among the slaves? */ + if (IS_MNT_SHARED(dest)) + *type |= CL_MAKE_SHARED; + return last_src; } /* diff --git a/fs/pnode.h b/fs/pnode.h index 958665d662a..1ea4ae1efcd 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -21,12 +21,11 @@ #define CL_SLAVE 0x02 #define CL_COPY_ALL 0x04 #define CL_MAKE_SHARED 0x08 -#define CL_PROPAGATION 0x10 -#define CL_PRIVATE 0x20 +#define CL_PRIVATE 0x10 static inline void set_mnt_shared(struct vfsmount *mnt) { - mnt->mnt_flags &= ~MNT_PNODE_MASK; + mnt->mnt_flags &= ~MNT_SHARED_MASK; mnt->mnt_flags |= MNT_SHARED; } diff --git a/fs/proc/array.c b/fs/proc/array.c index 13b5d070817..18e20feee25 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -270,7 +270,9 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) blocked = p->blocked; collect_sigign_sigcatch(p, &ignored, &caught); num_threads = atomic_read(&p->signal->count); + rcu_read_lock(); /* FIXME: is this correct? */ qsize = atomic_read(&__task_cred(p)->user->sigpending); + rcu_read_unlock(); qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; unlock_task_sighand(p, &flags); } diff --git a/fs/proc/base.c b/fs/proc/base.c index 18d5cc62d8e..a7310841c83 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -647,17 +647,11 @@ static int mounts_release(struct inode *inode, struct file *file) static unsigned mounts_poll(struct file *file, poll_table *wait) { struct proc_mounts *p = file->private_data; - struct mnt_namespace *ns = p->ns; unsigned res = POLLIN | POLLRDNORM; - poll_wait(file, &ns->poll, wait); - - spin_lock(&vfsmount_lock); - if (p->event != ns->event) { - p->event = ns->event; + poll_wait(file, &p->ns->poll, wait); + if (mnt_had_events(p)) res |= POLLERR | POLLPRI; - } - spin_unlock(&vfsmount_lock); return res; } @@ -1095,8 +1089,12 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; - if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) + rcu_read_lock(); + if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) { + rcu_read_unlock(); return -EPERM; + } + rcu_read_unlock(); if (count >= PAGE_SIZE) count = PAGE_SIZE - 1; @@ -1419,7 +1417,6 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) goto out; error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); - nd->last_type = LAST_BIND; out: return ERR_PTR(error); } @@ -2370,16 +2367,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) { struct pid_namespace *ns = dentry->d_sb->s_fs_info; pid_t tgid = task_tgid_nr_ns(current, ns); - char tmp[PROC_NUMBUF]; - if (!tgid) - return ERR_PTR(-ENOENT); - sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); - return ERR_PTR(vfs_follow_link(nd,tmp)); + char *name = ERR_PTR(-ENOENT); + if (tgid) { + name = __getname(); + if (!name) + name = ERR_PTR(-ENOMEM); + else + sprintf(name, "%d", tgid); + } + nd_set_link(nd, name); + return NULL; +} + +static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) +{ + char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); } static const struct inode_operations proc_self_inode_operations = { .readlink = proc_self_readlink, .follow_link = proc_self_follow_link, + .put_link = proc_self_put_link, }; /* diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 480cb1065ee..9580abeadeb 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -662,6 +662,7 @@ struct proc_dir_entry *proc_symlink(const char *name, } return ent; } +EXPORT_SYMBOL(proc_symlink); struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, struct proc_dir_entry *parent) @@ -700,6 +701,7 @@ struct proc_dir_entry *proc_mkdir(const char *name, { return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); } +EXPORT_SYMBOL(proc_mkdir); struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) @@ -728,6 +730,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, } return ent; } +EXPORT_SYMBOL(create_proc_entry); struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, struct proc_dir_entry *parent, @@ -762,6 +765,7 @@ out_free: out: return NULL; } +EXPORT_SYMBOL(proc_create_data); static void free_proc_entry(struct proc_dir_entry *de) { @@ -853,3 +857,4 @@ continue_removing: de->parent->name, de->name, de->subdir->name); pde_put(de); } +EXPORT_SYMBOL(remove_proc_entry); diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c index 7ca78346d3f..cfe90a48a6e 100644 --- a/fs/proc/kmsg.c +++ b/fs/proc/kmsg.c @@ -12,37 +12,37 @@ #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/fs.h> +#include <linux/syslog.h> #include <asm/uaccess.h> #include <asm/io.h> extern wait_queue_head_t log_wait; -extern int do_syslog(int type, char __user *bug, int count); - static int kmsg_open(struct inode * inode, struct file * file) { - return do_syslog(1,NULL,0); + return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE); } static int kmsg_release(struct inode * inode, struct file * file) { - (void) do_syslog(0,NULL,0); + (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE); return 0; } static ssize_t kmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0)) + if ((file->f_flags & O_NONBLOCK) && + !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) return -EAGAIN; - return do_syslog(2, buf, count); + return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE); } static unsigned int kmsg_poll(struct file *file, poll_table *wait) { poll_wait(file, &log_wait, wait); - if (do_syslog(9, NULL, 0)) + if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE)) return POLLIN | POLLRDNORM; return 0; } diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index 123257bb356..f8650dce74f 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -10,16 +10,19 @@ #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/string.h> +#include <linux/of.h> +#include <linux/module.h> #include <asm/prom.h> #include <asm/uaccess.h> #include "internal.h" -#ifndef HAVE_ARCH_DEVTREE_FIXUPS static inline void set_node_proc_entry(struct device_node *np, struct proc_dir_entry *de) { -} +#ifdef HAVE_ARCH_DEVTREE_FIXUPS + np->pde = de; #endif +} static struct proc_dir_entry *proc_device_tree; diff --git a/fs/proc/root.c b/fs/proc/root.c index b080b791d9e..757c069f2a6 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -220,9 +220,3 @@ void pid_ns_release_proc(struct pid_namespace *ns) { mntput(ns->proc_mnt); } - -EXPORT_SYMBOL(proc_symlink); -EXPORT_SYMBOL(proc_mkdir); -EXPORT_SYMBOL(create_proc_entry); -EXPORT_SYMBOL(proc_create_data); -EXPORT_SYMBOL(remove_proc_entry); diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 2efc57173fd..1739a4aba25 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -123,30 +123,6 @@ add_error: /*****************************************************************************/ /* - * check that file shrinkage doesn't leave any VMAs dangling in midair - */ -static int ramfs_nommu_check_mappings(struct inode *inode, - size_t newsize, size_t size) -{ - struct vm_area_struct *vma; - struct prio_tree_iter iter; - - /* search for VMAs that fall within the dead zone */ - vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, - newsize >> PAGE_SHIFT, - (size + PAGE_SIZE - 1) >> PAGE_SHIFT - ) { - /* found one - only interested if it's shared out of the page - * cache */ - if (vma->vm_flags & VM_SHARED) - return -ETXTBSY; /* not quite true, but near enough */ - } - - return 0; -} - -/*****************************************************************************/ -/* * */ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) @@ -164,7 +140,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) /* check that a decrease in size doesn't cut off any shared mappings */ if (newsize < size) { - ret = ramfs_nommu_check_mappings(inode, newsize, size); + ret = nommu_shrink_inode_mappings(inode, size, newsize); if (ret < 0) return ret; } diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 9087b10209e..2df0f5c7c60 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key) args.objectid = key->on_disk_key.k_objectid; args.dirid = key->on_disk_key.k_dir_id; + reiserfs_write_unlock(s); inode = iget5_locked(s, key->on_disk_key.k_objectid, reiserfs_find_actor, reiserfs_init_locked_inode, (void *)(&args)); + reiserfs_write_lock(s); if (!inode) return ERR_PTR(-ENOMEM); diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 83ac4d3b3cb..ba98546fabb 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2913,7 +2913,9 @@ int journal_init(struct super_block *sb, const char *j_dev_name, journal->j_mount_id = 10; journal->j_state = 0; atomic_set(&(journal->j_jlock), 0); + reiserfs_write_unlock(sb); journal->j_cnode_free_list = allocate_cnodes(num_cnodes); + reiserfs_write_lock(sb); journal->j_cnode_free_orig = journal->j_cnode_free_list; journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0; journal->j_cnode_used = 0; diff --git a/fs/romfs/super.c b/fs/romfs/super.c index c117fa80d1e..42d21354689 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -544,6 +544,7 @@ error: error_rsb_inval: ret = -EINVAL; error_rsb: + kfree(rsb); return ret; } diff --git a/fs/seq_file.c b/fs/seq_file.c index eae7d9dbf3f..5afd554efad 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -674,7 +674,6 @@ struct list_head *seq_list_start(struct list_head *head, loff_t pos) return NULL; } - EXPORT_SYMBOL(seq_list_start); struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) @@ -684,7 +683,6 @@ struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) return seq_list_start(head, pos - 1); } - EXPORT_SYMBOL(seq_list_start_head); struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) @@ -695,5 +693,131 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) ++*ppos; return lh == head ? NULL : lh; } - EXPORT_SYMBOL(seq_list_next); + +/** + * seq_hlist_start - start an iteration of a hlist + * @head: the head of the hlist + * @pos: the start position of the sequence + * + * Called at seq_file->op->start(). + */ +struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) +{ + struct hlist_node *node; + + hlist_for_each(node, head) + if (pos-- == 0) + return node; + return NULL; +} +EXPORT_SYMBOL(seq_hlist_start); + +/** + * seq_hlist_start_head - start an iteration of a hlist + * @head: the head of the hlist + * @pos: the start position of the sequence + * + * Called at seq_file->op->start(). Call this function if you want to + * print a header at the top of the output. + */ +struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) +{ + if (!pos) + return SEQ_START_TOKEN; + + return seq_hlist_start(head, pos - 1); +} +EXPORT_SYMBOL(seq_hlist_start_head); + +/** + * seq_hlist_next - move to the next position of the hlist + * @v: the current iterator + * @head: the head of the hlist + * @pos: the current posision + * + * Called at seq_file->op->next(). + */ +struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, + loff_t *ppos) +{ + struct hlist_node *node = v; + + ++*ppos; + if (v == SEQ_START_TOKEN) + return head->first; + else + return node->next; +} +EXPORT_SYMBOL(seq_hlist_next); + +/** + * seq_hlist_start_rcu - start an iteration of a hlist protected by RCU + * @head: the head of the hlist + * @pos: the start position of the sequence + * + * Called at seq_file->op->start(). + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, + loff_t pos) +{ + struct hlist_node *node; + + __hlist_for_each_rcu(node, head) + if (pos-- == 0) + return node; + return NULL; +} +EXPORT_SYMBOL(seq_hlist_start_rcu); + +/** + * seq_hlist_start_head_rcu - start an iteration of a hlist protected by RCU + * @head: the head of the hlist + * @pos: the start position of the sequence + * + * Called at seq_file->op->start(). Call this function if you want to + * print a header at the top of the output. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, + loff_t pos) +{ + if (!pos) + return SEQ_START_TOKEN; + + return seq_hlist_start_rcu(head, pos - 1); +} +EXPORT_SYMBOL(seq_hlist_start_head_rcu); + +/** + * seq_hlist_next_rcu - move to the next position of the hlist protected by RCU + * @v: the current iterator + * @head: the head of the hlist + * @pos: the current posision + * + * Called at seq_file->op->next(). + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +struct hlist_node *seq_hlist_next_rcu(void *v, + struct hlist_head *head, + loff_t *ppos) +{ + struct hlist_node *node = v; + + ++*ppos; + if (v == SEQ_START_TOKEN) + return rcu_dereference(head->first); + else + return rcu_dereference(node->next); +} +EXPORT_SYMBOL(seq_hlist_next_rcu); diff --git a/fs/super.c b/fs/super.c index aff046b0fe7..f35ac602210 100644 --- a/fs/super.c +++ b/fs/super.c @@ -568,7 +568,7 @@ out: int do_remount_sb(struct super_block *sb, int flags, void *data, int force) { int retval; - int remount_rw; + int remount_rw, remount_ro; if (sb->s_frozen != SB_UNFROZEN) return -EBUSY; @@ -583,9 +583,12 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) shrink_dcache_sb(sb); sync_filesystem(sb); + remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); + remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY); + /* If we are remounting RDONLY and current sb is read/write, make sure there are no rw files opened */ - if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { + if (remount_ro) { if (force) mark_files_ro(sb); else if (!fs_may_remount_ro(sb)) @@ -594,7 +597,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) if (retval < 0 && retval != -ENOSYS) return -EBUSY; } - remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY); if (sb->s_op->remount_fs) { retval = sb->s_op->remount_fs(sb, &flags, data); @@ -604,6 +606,16 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); if (remount_rw) vfs_dq_quota_on_remount(sb); + /* + * Some filesystems modify their metadata via some other path than the + * bdev buffer cache (eg. use a private mapping, or directories in + * pagecache, etc). Also file data modifications go via their own + * mappings. So If we try to mount readonly then copy the filesystem + * from bdev, we could get stale data, so invalidate it to give a best + * effort at coherency. + */ + if (remount_ro && sb->s_bdev) + invalidate_bdev(sb->s_bdev); return 0; } @@ -925,6 +937,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void if (!mnt) goto out; + if (flags & MS_KERNMOUNT) + mnt->mnt_flags = MNT_INTERNAL; + if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { secdata = alloc_secdata(); if (!secdata) diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 220b758523a..6a06a1d1ea7 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr) if (!sd_attrs) return -ENOMEM; sd->s_iattr = sd_attrs; - } else { - /* attributes were changed at least once in past */ - iattrs = &sd_attrs->ia_iattr; - - if (ia_valid & ATTR_UID) - iattrs->ia_uid = iattr->ia_uid; - if (ia_valid & ATTR_GID) - iattrs->ia_gid = iattr->ia_gid; - if (ia_valid & ATTR_ATIME) - iattrs->ia_atime = iattr->ia_atime; - if (ia_valid & ATTR_MTIME) - iattrs->ia_mtime = iattr->ia_mtime; - if (ia_valid & ATTR_CTIME) - iattrs->ia_ctime = iattr->ia_ctime; - if (ia_valid & ATTR_MODE) { - umode_t mode = iattr->ia_mode; - iattrs->ia_mode = sd->s_mode = mode; - } + } + /* attributes were changed at least once in past */ + iattrs = &sd_attrs->ia_iattr; + + if (ia_valid & ATTR_UID) + iattrs->ia_uid = iattr->ia_uid; + if (ia_valid & ATTR_GID) + iattrs->ia_gid = iattr->ia_gid; + if (ia_valid & ATTR_ATIME) + iattrs->ia_atime = iattr->ia_atime; + if (ia_valid & ATTR_MTIME) + iattrs->ia_mtime = iattr->ia_mtime; + if (ia_valid & ATTR_CTIME) + iattrs->ia_ctime = iattr->ia_ctime; + if (ia_valid & ATTR_MODE) { + umode_t mode = iattr->ia_mode; + iattrs->ia_mode = sd->s_mode = mode; } return 0; } diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 82372e332f0..b2d96f45c12 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -547,7 +547,7 @@ static void udf_table_free_blocks(struct super_block *sb, } if (epos.offset + (2 * adsize) > sb->s_blocksize) { - char *sptr, *dptr; + unsigned char *sptr, *dptr; int loffset; brelse(oepos.bh); diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 61d9a76a3a6..f0f2a436251 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -45,8 +45,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp, int block, iblock; loff_t nf_pos = (filp->f_pos - 1) << 2; int flen; - char *fname = NULL; - char *nameptr; + unsigned char *fname = NULL; + unsigned char *nameptr; uint16_t liu; uint8_t lfi; loff_t size = udf_ext0_offset(dir) + dir->i_size; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index f90231eb291..378a7592257 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1672,7 +1672,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos, return -1; if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { - char *sptr, *dptr; + unsigned char *sptr, *dptr; struct buffer_head *nbh; int err, loffset; struct kernel_lb_addr obloc = epos->block; diff --git a/fs/udf/namei.c b/fs/udf/namei.c index cd2115060fd..7c56ff00cd5 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -34,8 +34,8 @@ #include <linux/crc-itu-t.h> #include <linux/exportfs.h> -static inline int udf_match(int len1, const char *name1, int len2, - const char *name2) +static inline int udf_match(int len1, const unsigned char *name1, int len2, + const unsigned char *name2) { if (len1 != len2) return 0; @@ -142,15 +142,15 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, } static struct fileIdentDesc *udf_find_entry(struct inode *dir, - struct qstr *child, + const struct qstr *child, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { struct fileIdentDesc *fi = NULL; loff_t f_pos; int block, flen; - char *fname = NULL; - char *nameptr; + unsigned char *fname = NULL; + unsigned char *nameptr; uint8_t lfi; uint16_t liu; loff_t size; @@ -308,7 +308,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, { struct super_block *sb = dir->i_sb; struct fileIdentDesc *fi = NULL; - char *name = NULL; + unsigned char *name = NULL; int namelen; loff_t f_pos; loff_t size = udf_ext0_offset(dir) + dir->i_size; @@ -885,16 +885,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, { struct inode *inode; struct pathComponent *pc; - char *compstart; + const char *compstart; struct udf_fileident_bh fibh; struct extent_position epos = {}; int eoffset, elen = 0; struct fileIdentDesc *fi; struct fileIdentDesc cfi; - char *ea; + uint8_t *ea; int err; int block; - char *name = NULL; + unsigned char *name = NULL; int namelen; struct buffer_head *bh; struct udf_inode_info *iinfo; @@ -970,7 +970,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, pc = (struct pathComponent *)(ea + elen); - compstart = (char *)symname; + compstart = symname; do { symname++; diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index c3265e1385d..852e9184568 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -32,12 +32,12 @@ #include <linux/buffer_head.h> #include "udf_i.h" -static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, - char *to) +static void udf_pc_to_char(struct super_block *sb, unsigned char *from, + int fromlen, unsigned char *to) { struct pathComponent *pc; int elen = 0; - char *p = to; + unsigned char *p = to; while (elen < fromlen) { pc = (struct pathComponent *)(from + elen); @@ -75,9 +75,9 @@ static int udf_symlink_filler(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct buffer_head *bh = NULL; - char *symlink; + unsigned char *symlink; int err = -EIO; - char *p = kmap(page); + unsigned char *p = kmap(page); struct udf_inode_info *iinfo; lock_kernel(); diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 22af68f8b68..317a0d444f6 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -31,7 +31,7 @@ * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. */ static inline int ufs_match(struct super_block *sb, int len, - const char * const name, struct ufs_dir_entry * de) + const unsigned char *name, struct ufs_dir_entry *de) { if (len != ufs_get_de_namlen(sb, de)) return 0; @@ -70,7 +70,7 @@ static inline unsigned long ufs_dir_pages(struct inode *inode) return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; } -ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr) +ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) { ino_t res = 0; struct ufs_dir_entry *de; @@ -249,11 +249,11 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p) * (as a parameter - res_dir). Page is returned mapped and unlocked. * Entry is guaranteed to be valid. */ -struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr, +struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, struct page **res_page) { struct super_block *sb = dir->i_sb; - const char *name = qstr->name; + const unsigned char *name = qstr->name; int namelen = qstr->len; unsigned reclen = UFS_DIR_REC_LEN(namelen); unsigned long start, n; @@ -313,7 +313,7 @@ found: int ufs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; - const char *name = dentry->d_name.name; + const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block *sb = dir->i_sb; unsigned reclen = UFS_DIR_REC_LEN(namelen); diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h index 0b4c39bc0d9..01d0e2a3b23 100644 --- a/fs/ufs/ufs.h +++ b/fs/ufs/ufs.h @@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned); /* dir.c */ extern const struct inode_operations ufs_dir_inode_operations; extern int ufs_add_link (struct dentry *, struct inode *); -extern ino_t ufs_inode_by_name(struct inode *, struct qstr *); +extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *); extern int ufs_make_empty(struct inode *, struct inode *); -extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **); +extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **); extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); extern int ufs_empty_dir (struct inode *); extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 56641fe52a2..5c5a366aa33 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -16,7 +16,7 @@ # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # -EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6 -funsigned-char +EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6 XFS_LINUX := linux-2.6 diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c index 2d3f90afe5f..bc7405585de 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/linux-2.6/kmem.c @@ -16,7 +16,6 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/mm.h> -#include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/blkdev.h> @@ -24,8 +23,25 @@ #include "time.h" #include "kmem.h" -#define MAX_VMALLOCS 6 -#define MAX_SLAB_SIZE 0x20000 +/* + * Greedy allocation. May fail and may return vmalloced memory. + * + * Must be freed using kmem_free_large. + */ +void * +kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) +{ + void *ptr; + size_t kmsize = maxsize; + + while (!(ptr = kmem_zalloc_large(kmsize))) { + if ((kmsize >>= 1) <= minsize) + kmsize = minsize; + } + if (ptr) + *size = kmsize; + return ptr; +} void * kmem_alloc(size_t size, unsigned int __nocast flags) @@ -34,19 +50,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags) gfp_t lflags = kmem_flags_convert(flags); void *ptr; -#ifdef DEBUG - if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) { - printk(KERN_WARNING "Large %s attempt, size=%ld\n", - __func__, (long)size); - dump_stack(); - } -#endif - do { - if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) - ptr = kmalloc(size, lflags); - else - ptr = __vmalloc(size, lflags, PAGE_KERNEL); + ptr = kmalloc(size, lflags); if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) return ptr; if (!(++retries % 100)) @@ -68,27 +73,6 @@ kmem_zalloc(size_t size, unsigned int __nocast flags) return ptr; } -void * -kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize, - unsigned int __nocast flags) -{ - void *ptr; - size_t kmsize = maxsize; - unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP; - - while (!(ptr = kmem_zalloc(kmsize, kmflags))) { - if ((kmsize <= minsize) && (flags & KM_NOSLEEP)) - break; - if ((kmsize >>= 1) <= minsize) { - kmsize = minsize; - kmflags = flags; - } - } - if (ptr) - *size = kmsize; - return ptr; -} - void kmem_free(const void *ptr) { diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index 179cbd630f6..f7c8f7a9ea6 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -21,6 +21,7 @@ #include <linux/slab.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/vmalloc.h> /* * General memory allocation interfaces @@ -30,7 +31,6 @@ #define KM_NOSLEEP 0x0002u #define KM_NOFS 0x0004u #define KM_MAYFAIL 0x0008u -#define KM_LARGE 0x0010u /* * We use a special process flag to avoid recursive callbacks into @@ -42,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags) { gfp_t lflags; - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE)); + BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); if (flags & KM_NOSLEEP) { lflags = GFP_ATOMIC | __GFP_NOWARN; @@ -56,10 +56,25 @@ kmem_flags_convert(unsigned int __nocast flags) extern void *kmem_alloc(size_t, unsigned int __nocast); extern void *kmem_zalloc(size_t, unsigned int __nocast); -extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast); extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); extern void kmem_free(const void *); +static inline void *kmem_zalloc_large(size_t size) +{ + void *ptr; + + ptr = vmalloc(size); + if (ptr) + memset(ptr, 0, size); + return ptr; +} +static inline void kmem_free_large(void *ptr) +{ + vfree(ptr); +} + +extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); + /* * Zone interfaces */ diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index 883ca5ab8af..bf85bbe4a9a 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c @@ -106,7 +106,7 @@ xfs_get_acl(struct inode *inode, int type) struct posix_acl *acl; struct xfs_acl *xfs_acl; int len = sizeof(struct xfs_acl); - char *ea_name; + unsigned char *ea_name; int error; acl = get_cached_acl(inode, type); @@ -133,7 +133,8 @@ xfs_get_acl(struct inode *inode, int type) if (!xfs_acl) return ERR_PTR(-ENOMEM); - error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT); + error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, + &len, ATTR_ROOT); if (error) { /* * If the attribute doesn't exist make sure we have a negative @@ -162,7 +163,7 @@ STATIC int xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) { struct xfs_inode *ip = XFS_I(inode); - char *ea_name; + unsigned char *ea_name; int error; if (S_ISLNK(inode->i_mode)) @@ -194,7 +195,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) (sizeof(struct xfs_acl_entry) * (XFS_ACL_MAX_ENTRIES - acl->a_count)); - error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl, + error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, len, ATTR_ROOT); kfree(xfs_acl); @@ -262,7 +263,7 @@ xfs_set_mode(struct inode *inode, mode_t mode) } static int -xfs_acl_exists(struct inode *inode, char *name) +xfs_acl_exists(struct inode *inode, unsigned char *name) { int len = sizeof(struct xfs_acl); diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 77b8be81c76..6f76ba85f19 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -33,6 +33,7 @@ #include <linux/migrate.h> #include <linux/backing-dev.h> #include <linux/freezer.h> +#include <linux/list_sort.h> #include "xfs_sb.h" #include "xfs_inum.h" @@ -76,6 +77,27 @@ struct workqueue_struct *xfsconvertd_workqueue; #define xfs_buf_deallocate(bp) \ kmem_zone_free(xfs_buf_zone, (bp)); +static inline int +xfs_buf_is_vmapped( + struct xfs_buf *bp) +{ + /* + * Return true if the buffer is vmapped. + * + * The XBF_MAPPED flag is set if the buffer should be mapped, but the + * code is clever enough to know it doesn't have to map a single page, + * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1. + */ + return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1; +} + +static inline int +xfs_buf_vmap_len( + struct xfs_buf *bp) +{ + return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; +} + /* * Page Region interfaces. * @@ -314,7 +336,7 @@ xfs_buf_free( if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { uint i; - if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) + if (xfs_buf_is_vmapped(bp)) free_address(bp->b_addr - bp->b_offset); for (i = 0; i < bp->b_page_count; i++) { @@ -1051,22 +1073,30 @@ xfs_buf_ioerror( } int -xfs_bawrite( - void *mp, +xfs_bwrite( + struct xfs_mount *mp, struct xfs_buf *bp) { - trace_xfs_buf_bawrite(bp, _RET_IP_); + int iowait = (bp->b_flags & XBF_ASYNC) == 0; + int error = 0; - ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); + bp->b_strat = xfs_bdstrat_cb; + bp->b_mount = mp; + bp->b_flags |= XBF_WRITE; + if (!iowait) + bp->b_flags |= _XBF_RUN_QUEUES; xfs_buf_delwri_dequeue(bp); + xfs_buf_iostrategy(bp); - bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD); - bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES); + if (iowait) { + error = xfs_buf_iowait(bp); + if (error) + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + xfs_buf_relse(bp); + } - bp->b_mount = mp; - bp->b_strat = xfs_bdstrat_cb; - return xfs_bdstrat_cb(bp); + return error; } void @@ -1085,6 +1115,126 @@ xfs_bdwrite( xfs_buf_delwri_queue(bp, 1); } +/* + * Called when we want to stop a buffer from getting written or read. + * We attach the EIO error, muck with its flags, and call biodone + * so that the proper iodone callbacks get called. + */ +STATIC int +xfs_bioerror( + xfs_buf_t *bp) +{ +#ifdef XFSERRORDEBUG + ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); +#endif + + /* + * No need to wait until the buffer is unpinned, we aren't flushing it. + */ + XFS_BUF_ERROR(bp, EIO); + + /* + * We're calling biodone, so delete XBF_DONE flag. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_UNDONE(bp); + XFS_BUF_STALE(bp); + + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + xfs_biodone(bp); + + return EIO; +} + +/* + * Same as xfs_bioerror, except that we are releasing the buffer + * here ourselves, and avoiding the biodone call. + * This is meant for userdata errors; metadata bufs come with + * iodone functions attached, so that we can track down errors. + */ +STATIC int +xfs_bioerror_relse( + struct xfs_buf *bp) +{ + int64_t fl = XFS_BUF_BFLAGS(bp); + /* + * No need to wait until the buffer is unpinned. + * We aren't flushing it. + * + * chunkhold expects B_DONE to be set, whether + * we actually finish the I/O or not. We don't want to + * change that interface. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_STALE(bp); + XFS_BUF_CLR_IODONE_FUNC(bp); + XFS_BUF_CLR_BDSTRAT_FUNC(bp); + if (!(fl & XBF_ASYNC)) { + /* + * Mark b_error and B_ERROR _both_. + * Lot's of chunkcache code assumes that. + * There's no reason to mark error for + * ASYNC buffers. + */ + XFS_BUF_ERROR(bp, EIO); + XFS_BUF_FINISH_IOWAIT(bp); + } else { + xfs_buf_relse(bp); + } + + return EIO; +} + + +/* + * All xfs metadata buffers except log state machine buffers + * get this attached as their b_bdstrat callback function. + * This is so that we can catch a buffer + * after prematurely unpinning it to forcibly shutdown the filesystem. + */ +int +xfs_bdstrat_cb( + struct xfs_buf *bp) +{ + if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { + trace_xfs_bdstrat_shut(bp, _RET_IP_); + /* + * Metadata write that didn't get logged but + * written delayed anyway. These aren't associated + * with a transaction, and can be ignored. + */ + if (!bp->b_iodone && !XFS_BUF_ISREAD(bp)) + return xfs_bioerror_relse(bp); + else + return xfs_bioerror(bp); + } + + xfs_buf_iorequest(bp); + return 0; +} + +/* + * Wrapper around bdstrat so that we can stop data from going to disk in case + * we are shutting down the filesystem. Typically user data goes thru this + * path; one of the exceptions is the superblock. + */ +void +xfsbdstrat( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + if (XFS_FORCED_SHUTDOWN(mp)) { + trace_xfs_bdstrat_shut(bp, _RET_IP_); + xfs_bioerror_relse(bp); + return; + } + + xfs_buf_iorequest(bp); +} + STATIC void _xfs_buf_ioend( xfs_buf_t *bp, @@ -1107,6 +1257,9 @@ xfs_buf_bio_end_io( xfs_buf_ioerror(bp, -error); + if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) + invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); + do { struct page *page = bvec->bv_page; @@ -1216,6 +1369,10 @@ next_chunk: submit_io: if (likely(bio->bi_size)) { + if (xfs_buf_is_vmapped(bp)) { + flush_kernel_vmap_range(bp->b_addr, + xfs_buf_vmap_len(bp)); + } submit_bio(rw, bio); if (size) goto next_chunk; @@ -1296,7 +1453,7 @@ xfs_buf_iomove( xfs_buf_t *bp, /* buffer to process */ size_t boff, /* starting buffer offset */ size_t bsize, /* length to copy */ - caddr_t data, /* data address */ + void *data, /* data address */ xfs_buf_rw_t mode) /* read/write/zero flag */ { size_t bend, cpoff, csize; @@ -1378,8 +1535,8 @@ xfs_alloc_bufhash( btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ btp->bt_hashmask = (1 << btp->bt_hashshift) - 1; - btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) * - sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE); + btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * + sizeof(xfs_bufhash_t)); for (i = 0; i < (1 << btp->bt_hashshift); i++) { spin_lock_init(&btp->bt_hash[i].bh_lock); INIT_LIST_HEAD(&btp->bt_hash[i].bh_list); @@ -1390,7 +1547,7 @@ STATIC void xfs_free_bufhash( xfs_buftarg_t *btp) { - kmem_free(btp->bt_hash); + kmem_free_large(btp->bt_hash); btp->bt_hash = NULL; } @@ -1595,6 +1752,11 @@ xfs_buf_delwri_queue( list_del(&bp->b_list); } + if (list_empty(dwq)) { + /* start xfsbufd as it is about to have something to do */ + wake_up_process(bp->b_target->bt_task); + } + bp->b_flags |= _XBF_DELWRI_Q; list_add_tail(&bp->b_list, dwq); bp->b_queuetime = jiffies; @@ -1626,6 +1788,35 @@ xfs_buf_delwri_dequeue( trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); } +/* + * If a delwri buffer needs to be pushed before it has aged out, then promote + * it to the head of the delwri queue so that it will be flushed on the next + * xfsbufd run. We do this by resetting the queuetime of the buffer to be older + * than the age currently needed to flush the buffer. Hence the next time the + * xfsbufd sees it is guaranteed to be considered old enough to flush. + */ +void +xfs_buf_delwri_promote( + struct xfs_buf *bp) +{ + struct xfs_buftarg *btp = bp->b_target; + long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1; + + ASSERT(bp->b_flags & XBF_DELWRI); + ASSERT(bp->b_flags & _XBF_DELWRI_Q); + + /* + * Check the buffer age before locking the delayed write queue as we + * don't need to promote buffers that are already past the flush age. + */ + if (bp->b_queuetime < jiffies - age) + return; + bp->b_queuetime = jiffies - age; + spin_lock(&btp->bt_delwrite_lock); + list_move(&bp->b_list, &btp->bt_delwrite_queue); + spin_unlock(&btp->bt_delwrite_lock); +} + STATIC void xfs_buf_runall_queues( struct workqueue_struct *queue) @@ -1644,6 +1835,8 @@ xfsbufd_wakeup( list_for_each_entry(btp, &xfs_buftarg_list, bt_list) { if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags)) continue; + if (list_empty(&btp->bt_delwrite_queue)) + continue; set_bit(XBT_FORCE_FLUSH, &btp->bt_flags); wake_up_process(btp->bt_task); } @@ -1694,20 +1887,53 @@ xfs_buf_delwri_split( } +/* + * Compare function is more complex than it needs to be because + * the return value is only 32 bits and we are doing comparisons + * on 64 bit values + */ +static int +xfs_buf_cmp( + void *priv, + struct list_head *a, + struct list_head *b) +{ + struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); + struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); + xfs_daddr_t diff; + + diff = ap->b_bn - bp->b_bn; + if (diff < 0) + return -1; + if (diff > 0) + return 1; + return 0; +} + +void +xfs_buf_delwri_sort( + xfs_buftarg_t *target, + struct list_head *list) +{ + list_sort(NULL, list, xfs_buf_cmp); +} + STATIC int xfsbufd( void *data) { - struct list_head tmp; - xfs_buftarg_t *target = (xfs_buftarg_t *)data; - int count; - xfs_buf_t *bp; + xfs_buftarg_t *target = (xfs_buftarg_t *)data; current->flags |= PF_MEMALLOC; set_freezable(); do { + long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); + long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); + int count = 0; + struct list_head tmp; + if (unlikely(freezing(current))) { set_bit(XBT_FORCE_SLEEP, &target->bt_flags); refrigerator(); @@ -1715,17 +1941,16 @@ xfsbufd( clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); } - schedule_timeout_interruptible( - xfs_buf_timer_centisecs * msecs_to_jiffies(10)); + /* sleep for a long time if there is nothing to do. */ + if (list_empty(&target->bt_delwrite_queue)) + tout = MAX_SCHEDULE_TIMEOUT; + schedule_timeout_interruptible(tout); - xfs_buf_delwri_split(target, &tmp, - xfs_buf_age_centisecs * msecs_to_jiffies(10)); - - count = 0; + xfs_buf_delwri_split(target, &tmp, age); + list_sort(NULL, &tmp, xfs_buf_cmp); while (!list_empty(&tmp)) { - bp = list_entry(tmp.next, xfs_buf_t, b_list); - ASSERT(target == bp->b_target); - + struct xfs_buf *bp; + bp = list_first_entry(&tmp, struct xfs_buf, b_list); list_del_init(&bp->b_list); xfs_buf_iostrategy(bp); count++; @@ -1751,42 +1976,45 @@ xfs_flush_buftarg( xfs_buftarg_t *target, int wait) { - struct list_head tmp; - xfs_buf_t *bp, *n; + xfs_buf_t *bp; int pincount = 0; + LIST_HEAD(tmp_list); + LIST_HEAD(wait_list); xfs_buf_runall_queues(xfsconvertd_workqueue); xfs_buf_runall_queues(xfsdatad_workqueue); xfs_buf_runall_queues(xfslogd_workqueue); set_bit(XBT_FORCE_FLUSH, &target->bt_flags); - pincount = xfs_buf_delwri_split(target, &tmp, 0); + pincount = xfs_buf_delwri_split(target, &tmp_list, 0); /* - * Dropped the delayed write list lock, now walk the temporary list + * Dropped the delayed write list lock, now walk the temporary list. + * All I/O is issued async and then if we need to wait for completion + * we do that after issuing all the IO. */ - list_for_each_entry_safe(bp, n, &tmp, b_list) { + list_sort(NULL, &tmp_list, xfs_buf_cmp); + while (!list_empty(&tmp_list)) { + bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); ASSERT(target == bp->b_target); - if (wait) + list_del_init(&bp->b_list); + if (wait) { bp->b_flags &= ~XBF_ASYNC; - else - list_del_init(&bp->b_list); - + list_add(&bp->b_list, &wait_list); + } xfs_buf_iostrategy(bp); } - if (wait) + if (wait) { + /* Expedite and wait for IO to complete. */ blk_run_address_space(target->bt_mapping); + while (!list_empty(&wait_list)) { + bp = list_first_entry(&wait_list, struct xfs_buf, b_list); - /* - * Remaining list items must be flushed before returning - */ - while (!list_empty(&tmp)) { - bp = list_entry(tmp.next, xfs_buf_t, b_list); - - list_del_init(&bp->b_list); - xfs_iowait(bp); - xfs_buf_relse(bp); + list_del_init(&bp->b_list); + xfs_iowait(bp); + xfs_buf_relse(bp); + } } return pincount; diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index a34c7b54822..386e7361e50 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -232,13 +232,17 @@ extern void xfs_buf_lock(xfs_buf_t *); extern void xfs_buf_unlock(xfs_buf_t *); /* Buffer Read and Write Routines */ -extern int xfs_bawrite(void *mp, xfs_buf_t *bp); +extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); extern void xfs_bdwrite(void *mp, xfs_buf_t *bp); + +extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); +extern int xfs_bdstrat_cb(struct xfs_buf *); + extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int); extern int xfs_buf_iorequest(xfs_buf_t *); extern int xfs_buf_iowait(xfs_buf_t *); -extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t, +extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, xfs_buf_rw_t); static inline int xfs_buf_iostrategy(xfs_buf_t *bp) @@ -261,6 +265,7 @@ extern int xfs_buf_ispin(xfs_buf_t *); /* Delayed Write Buffer Routines */ extern void xfs_buf_delwri_dequeue(xfs_buf_t *); +extern void xfs_buf_delwri_promote(xfs_buf_t *); /* Buffer Daemon Setup Routines */ extern int xfs_buf_init(void); @@ -270,33 +275,19 @@ extern void xfs_buf_terminate(void); ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) -#define XFS_B_ASYNC XBF_ASYNC -#define XFS_B_DELWRI XBF_DELWRI -#define XFS_B_READ XBF_READ -#define XFS_B_WRITE XBF_WRITE -#define XFS_B_STALE XBF_STALE - -#define XFS_BUF_TRYLOCK XBF_TRYLOCK -#define XFS_INCORE_TRYLOCK XBF_TRYLOCK -#define XFS_BUF_LOCK XBF_LOCK -#define XFS_BUF_MAPPED XBF_MAPPED - -#define BUF_BUSY XBF_DONT_BLOCK - #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) #define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) -#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE) -#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE) -#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE) +#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE) +#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) +#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) #define XFS_BUF_SUPER_STALE(bp) do { \ XFS_BUF_STALE(bp); \ xfs_buf_delwri_dequeue(bp); \ XFS_BUF_DONE(bp); \ } while (0) -#define XFS_BUF_MANAGE XBF_FS_MANAGED #define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED) #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) @@ -385,31 +376,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp) #define xfs_biomove(bp, off, len, data, rw) \ xfs_buf_iomove((bp), (off), (len), (data), \ - ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ) + ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ) #define xfs_biozero(bp, off, len) \ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) - -static inline int XFS_bwrite(xfs_buf_t *bp) -{ - int iowait = (bp->b_flags & XBF_ASYNC) == 0; - int error = 0; - - if (!iowait) - bp->b_flags |= _XBF_RUN_QUEUES; - - xfs_buf_delwri_dequeue(bp); - xfs_buf_iostrategy(bp); - if (iowait) { - error = xfs_buf_iowait(bp); - xfs_buf_relse(bp); - } - return error; -} - -#define XFS_bdstrat(bp) xfs_buf_iorequest(bp) - #define xfs_iowait(bp) xfs_buf_iowait(bp) #define xfs_baread(target, rablkno, ralen) \ @@ -424,6 +395,7 @@ extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); extern void xfs_wait_buftarg(xfs_buftarg_t *); extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); extern int xfs_flush_buftarg(xfs_buftarg_t *, int); + #ifdef CONFIG_KDB_MODULES extern struct list_head *xfs_get_buftarg_list(void); #endif diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 7501b85fd86..b6918d76bc7 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -79,7 +79,7 @@ xfs_flush_pages( xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = -filemap_fdatawrite(mapping); } - if (flags & XFS_B_ASYNC) + if (flags & XBF_ASYNC) return ret; ret2 = xfs_wait_on_pages(ip, first, last); if (!ret) diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index a034cf62443..4ea1ee18ade 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -447,12 +447,12 @@ xfs_attrlist_by_handle( int xfs_attrmulti_attr_get( struct inode *inode, - char *name, - char __user *ubuf, + unsigned char *name, + unsigned char __user *ubuf, __uint32_t *len, __uint32_t flags) { - char *kbuf; + unsigned char *kbuf; int error = EFAULT; if (*len > XATTR_SIZE_MAX) @@ -476,12 +476,12 @@ xfs_attrmulti_attr_get( int xfs_attrmulti_attr_set( struct inode *inode, - char *name, - const char __user *ubuf, + unsigned char *name, + const unsigned char __user *ubuf, __uint32_t len, __uint32_t flags) { - char *kbuf; + unsigned char *kbuf; int error = EFAULT; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) @@ -501,7 +501,7 @@ xfs_attrmulti_attr_set( int xfs_attrmulti_attr_remove( struct inode *inode, - char *name, + unsigned char *name, __uint32_t flags) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) @@ -519,7 +519,7 @@ xfs_attrmulti_by_handle( xfs_fsop_attrmulti_handlereq_t am_hreq; struct dentry *dentry; unsigned int i, size; - char *attr_name; + unsigned char *attr_name; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); @@ -547,7 +547,7 @@ xfs_attrmulti_by_handle( error = 0; for (i = 0; i < am_hreq.opcount; i++) { - ops[i].am_error = strncpy_from_user(attr_name, + ops[i].am_error = strncpy_from_user((char *)attr_name, ops[i].am_attrname, MAXNAMELEN); if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) error = -ERANGE; @@ -1431,6 +1431,9 @@ xfs_file_ioctl( if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (mp->m_flags & XFS_MOUNT_RDONLY) + return -XFS_ERROR(EROFS); + if (copy_from_user(&inout, arg, sizeof(inout))) return -XFS_ERROR(EFAULT); diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h index 7bd7c6afc1e..d56173b34a2 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/linux-2.6/xfs_ioctl.h @@ -45,23 +45,23 @@ xfs_readlink_by_handle( extern int xfs_attrmulti_attr_get( struct inode *inode, - char *name, - char __user *ubuf, + unsigned char *name, + unsigned char __user *ubuf, __uint32_t *len, __uint32_t flags); extern int - xfs_attrmulti_attr_set( +xfs_attrmulti_attr_set( struct inode *inode, - char *name, - const char __user *ubuf, + unsigned char *name, + const unsigned char __user *ubuf, __uint32_t len, __uint32_t flags); extern int xfs_attrmulti_attr_remove( struct inode *inode, - char *name, + unsigned char *name, __uint32_t flags); extern struct dentry * diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index be1527b1670..0bf6d61f052 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -411,7 +411,7 @@ xfs_compat_attrmulti_by_handle( compat_xfs_fsop_attrmulti_handlereq_t am_hreq; struct dentry *dentry; unsigned int i, size; - char *attr_name; + unsigned char *attr_name; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); @@ -440,7 +440,7 @@ xfs_compat_attrmulti_by_handle( error = 0; for (i = 0; i < am_hreq.opcount; i++) { - ops[i].am_error = strncpy_from_user(attr_name, + ops[i].am_error = strncpy_from_user((char *)attr_name, compat_ptr(ops[i].am_attrname), MAXNAMELEN); if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 225946012d0..e8566bbf0f0 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -140,10 +140,10 @@ xfs_init_security( struct xfs_inode *ip = XFS_I(inode); size_t length; void *value; - char *name; + unsigned char *name; int error; - error = security_inode_init_security(inode, dir, &name, + error = security_inode_init_security(inode, dir, (char **)&name, &value, &length); if (error) { if (error == -EOPNOTSUPP) diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 0d32457abef..eac6f80d786 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -630,18 +630,9 @@ start: * by root. This keeps people from modifying setuid and * setgid binaries. */ - - if (((xip->i_d.di_mode & S_ISUID) || - ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) == - (S_ISGID | S_IXGRP))) && - !capable(CAP_FSETID)) { - error = xfs_write_clear_setuid(xip); - if (likely(!error)) - error = -file_remove_suid(file); - if (unlikely(error)) { - goto out_unlock_internal; - } - } + error = -file_remove_suid(file); + if (unlikely(error)) + goto out_unlock_internal; /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -784,53 +775,6 @@ write_retry: } /* - * All xfs metadata buffers except log state machine buffers - * get this attached as their b_bdstrat callback function. - * This is so that we can catch a buffer - * after prematurely unpinning it to forcibly shutdown the filesystem. - */ -int -xfs_bdstrat_cb(struct xfs_buf *bp) -{ - if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); - /* - * Metadata write that didn't get logged but - * written delayed anyway. These aren't associated - * with a transaction, and can be ignored. - */ - if (XFS_BUF_IODONE_FUNC(bp) == NULL && - (XFS_BUF_ISREAD(bp)) == 0) - return (xfs_bioerror_relse(bp)); - else - return (xfs_bioerror(bp)); - } - - xfs_buf_iorequest(bp); - return 0; -} - -/* - * Wrapper around bdstrat so that we can stop data from going to disk in case - * we are shutting down the filesystem. Typically user data goes thru this - * path; one of the exceptions is the superblock. - */ -void -xfsbdstrat( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - ASSERT(mp); - if (!XFS_FORCED_SHUTDOWN(mp)) { - xfs_buf_iorequest(bp); - return; - } - - trace_xfs_bdstrat_shut(bp, _RET_IP_); - xfs_bioerror_relse(bp); -} - -/* * If the underlying (data/log/rt) device is readonly, there are some * operations that cannot proceed. */ diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h index d1f7789c7ff..342ae8c0d01 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.h +++ b/fs/xfs/linux-2.6/xfs_lrw.h @@ -22,9 +22,6 @@ struct xfs_mount; struct xfs_inode; struct xfs_buf; -/* errors from xfsbdstrat() must be extracted from the buffer */ -extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); -extern int xfs_bdstrat_cb(struct xfs_buf *); extern int xfs_dev_is_read_only(struct xfs_mount *, char *); extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 09783cc444a..25ea2408118 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -877,12 +877,11 @@ xfsaild( { struct xfs_ail *ailp = data; xfs_lsn_t last_pushed_lsn = 0; - long tout = 0; + long tout = 0; /* milliseconds */ while (!kthread_should_stop()) { - if (tout) - schedule_timeout_interruptible(msecs_to_jiffies(tout)); - tout = 1000; + schedule_timeout_interruptible(tout ? + msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); /* swsusp */ try_to_freeze(); @@ -954,16 +953,14 @@ xfs_fs_destroy_inode( ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); /* - * If we have nothing to flush with this inode then complete the - * teardown now, otherwise delay the flush operation. + * We always use background reclaim here because even if the + * inode is clean, it still may be under IO and hence we have + * to take the flush lock. The background reclaim path handles + * this more efficiently than we can here, so simply let background + * reclaim tear down all inodes. */ - if (!xfs_inode_clean(ip)) { - xfs_inode_set_reclaim_tag(ip); - return; - } - out_reclaim: - xfs_ireclaim(ip); + xfs_inode_set_reclaim_tag(ip); } /* @@ -1024,12 +1021,45 @@ xfs_fs_dirty_inode( XFS_I(inode)->i_update_core = 1; } -/* - * Attempt to flush the inode, this will actually fail - * if the inode is pinned, but we dirty the inode again - * at the point when it is unpinned after a log write, - * since this is when the inode itself becomes flushable. - */ +STATIC int +xfs_log_inode( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + + if (error) { + xfs_trans_cancel(tp, 0); + /* we need to return with the lock hold shared */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Note - it's possible that we might have pushed ourselves out of the + * way during trans_reserve which would flush the inode. But there's + * no guarantee that the inode buffer has actually gone out yet (it's + * delwri). Plus the buffer could be pinned anyway if it's part of + * an inode in another recent transaction. So we play it safe and + * fire off the transaction anyway. + */ + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + xfs_ilock_demote(ip, XFS_ILOCK_EXCL); + + return error; +} + STATIC int xfs_fs_write_inode( struct inode *inode, @@ -1037,7 +1067,7 @@ xfs_fs_write_inode( { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - int error = 0; + int error = EAGAIN; xfs_itrace_entry(ip); @@ -1048,35 +1078,55 @@ xfs_fs_write_inode( error = xfs_wait_on_pages(ip, 0, -1); if (error) goto out; - } - /* - * Bypass inodes which have already been cleaned by - * the inode flush clustering code inside xfs_iflush - */ - if (xfs_inode_clean(ip)) - goto out; - - /* - * We make this non-blocking if the inode is contended, return - * EAGAIN to indicate to the caller that they did not succeed. - * This prevents the flush path from blocking on inodes inside - * another operation right now, they get caught later by xfs_sync. - */ - if (sync) { + /* + * Make sure the inode has hit stable storage. By using the + * log and the fsync transactions we reduce the IOs we have + * to do here from two (log and inode) to just the log. + * + * Note: We still need to do a delwri write of the inode after + * this to flush it to the backing buffer so that bulkstat + * works properly if this is the first time the inode has been + * written. Because we hold the ilock atomically over the + * transaction commit and the inode flush we are guaranteed + * that the inode is not pinned when it returns. If the flush + * lock is already held, then the inode has already been + * flushed once and we don't need to flush it again. Hence + * the code will only flush the inode if it isn't already + * being flushed. + */ xfs_ilock(ip, XFS_ILOCK_SHARED); - xfs_iflock(ip); - - error = xfs_iflush(ip, XFS_IFLUSH_SYNC); + if (ip->i_update_core) { + error = xfs_log_inode(ip); + if (error) + goto out_unlock; + } } else { - error = EAGAIN; + /* + * We make this non-blocking if the inode is contended, return + * EAGAIN to indicate to the caller that they did not succeed. + * This prevents the flush path from blocking on inodes inside + * another operation right now, they get caught later by xfs_sync. + */ if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) goto out; - if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) - goto out_unlock; + } + + if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) + goto out_unlock; - error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK); + /* + * Now we have the flush lock and the inode is not pinned, we can check + * if the inode is really clean as we know that there are no pending + * transaction completions, it is not waiting on the delayed write + * queue and there is no IO in progress. + */ + if (xfs_inode_clean(ip)) { + xfs_ifunlock(ip); + error = 0; + goto out_unlock; } + error = xfs_iflush(ip, 0); out_unlock: xfs_iunlock(ip, XFS_ILOCK_SHARED); @@ -1259,6 +1309,29 @@ xfs_fs_statfs( return 0; } +STATIC void +xfs_save_resvblks(struct xfs_mount *mp) +{ + __uint64_t resblks = 0; + + mp->m_resblks_save = mp->m_resblks; + xfs_reserve_blocks(mp, &resblks, NULL); +} + +STATIC void +xfs_restore_resvblks(struct xfs_mount *mp) +{ + __uint64_t resblks; + + if (mp->m_resblks_save) { + resblks = mp->m_resblks_save; + mp->m_resblks_save = 0; + } else + resblks = xfs_default_resblks(mp); + + xfs_reserve_blocks(mp, &resblks, NULL); +} + STATIC int xfs_fs_remount( struct super_block *sb, @@ -1338,11 +1411,27 @@ xfs_fs_remount( } mp->m_update_flags = 0; } + + /* + * Fill out the reserve pool if it is empty. Use the stashed + * value if it is non-zero, otherwise go with the default. + */ + xfs_restore_resvblks(mp); } /* rw -> ro */ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { + /* + * After we have synced the data but before we sync the + * metadata, we need to free up the reserve block pool so that + * the used block count in the superblock on disk is correct at + * the end of the remount. Stash the current reserve pool size + * so that if we get remounted rw, we can return it to the same + * size. + */ + xfs_quiesce_data(mp); + xfs_save_resvblks(mp); xfs_quiesce_attr(mp); mp->m_flags |= XFS_MOUNT_RDONLY; } @@ -1361,11 +1450,22 @@ xfs_fs_freeze( { struct xfs_mount *mp = XFS_M(sb); + xfs_save_resvblks(mp); xfs_quiesce_attr(mp); return -xfs_fs_log_dummy(mp); } STATIC int +xfs_fs_unfreeze( + struct super_block *sb) +{ + struct xfs_mount *mp = XFS_M(sb); + + xfs_restore_resvblks(mp); + return 0; +} + +STATIC int xfs_fs_show_options( struct seq_file *m, struct vfsmount *mnt) @@ -1587,6 +1687,7 @@ static const struct super_operations xfs_super_operations = { .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, .freeze_fs = xfs_fs_freeze, + .unfreeze_fs = xfs_fs_unfreeze, .statfs = xfs_fs_statfs, .remount_fs = xfs_fs_remount, .show_options = xfs_fs_show_options, diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 6fed97a8cd3..a9f6d20aff4 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -65,7 +65,6 @@ xfs_inode_ag_lookup( * as the tree is sparse and a gang lookup walks to find * the number of objects requested. */ - read_lock(&pag->pag_ici_lock); if (tag == XFS_ICI_NO_TAG) { nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void **)&ip, *first_index, 1); @@ -74,7 +73,7 @@ xfs_inode_ag_lookup( (void **)&ip, *first_index, 1, tag); } if (!nr_found) - goto unlock; + return NULL; /* * Update the index for the next lookup. Catch overflows @@ -84,25 +83,20 @@ xfs_inode_ag_lookup( */ *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) - goto unlock; - + return NULL; return ip; - -unlock: - read_unlock(&pag->pag_ici_lock); - return NULL; } STATIC int xfs_inode_ag_walk( struct xfs_mount *mp, - xfs_agnumber_t ag, + struct xfs_perag *pag, int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int flags, - int tag) + int tag, + int exclusive) { - struct xfs_perag *pag = &mp->m_perag[ag]; uint32_t first_index; int last_error = 0; int skipped; @@ -114,10 +108,20 @@ restart: int error = 0; xfs_inode_t *ip; + if (exclusive) + write_lock(&pag->pag_ici_lock); + else + read_lock(&pag->pag_ici_lock); ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag); - if (!ip) + if (!ip) { + if (exclusive) + write_unlock(&pag->pag_ici_lock); + else + read_unlock(&pag->pag_ici_lock); break; + } + /* execute releases pag->pag_ici_lock */ error = execute(ip, pag, flags); if (error == EAGAIN) { skipped++; @@ -125,9 +129,8 @@ restart: } if (error) last_error = error; - /* - * bail out if the filesystem is corrupted. - */ + + /* bail out if the filesystem is corrupted. */ if (error == EFSCORRUPTED) break; @@ -137,8 +140,6 @@ restart: delay(1); goto restart; } - - xfs_put_perag(mp, pag); return last_error; } @@ -148,16 +149,24 @@ xfs_inode_ag_iterator( int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int flags, - int tag) + int tag, + int exclusive) { int error = 0; int last_error = 0; xfs_agnumber_t ag; for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { - if (!mp->m_perag[ag].pag_ici_init) + struct xfs_perag *pag; + + pag = xfs_perag_get(mp, ag); + if (!pag->pag_ici_init) { + xfs_perag_put(pag); continue; - error = xfs_inode_ag_walk(mp, ag, execute, flags, tag); + } + error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, + exclusive); + xfs_perag_put(pag); if (error) { last_error = error; if (error == EFSCORRUPTED) @@ -174,30 +183,31 @@ xfs_sync_inode_valid( struct xfs_perag *pag) { struct inode *inode = VFS_I(ip); + int error = EFSCORRUPTED; /* nothing to sync during shutdown */ - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { - read_unlock(&pag->pag_ici_lock); - return EFSCORRUPTED; - } + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + goto out_unlock; - /* - * If we can't get a reference on the inode, it must be in reclaim. - * Leave it for the reclaim code to flush. Also avoid inodes that - * haven't been fully initialised. - */ - if (!igrab(inode)) { - read_unlock(&pag->pag_ici_lock); - return ENOENT; - } - read_unlock(&pag->pag_ici_lock); + /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ + error = ENOENT; + if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) + goto out_unlock; + + /* If we can't grab the inode, it must on it's way to reclaim. */ + if (!igrab(inode)) + goto out_unlock; - if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) { + if (is_bad_inode(inode)) { IRELE(ip); - return ENOENT; + goto out_unlock; } - return 0; + /* inode is valid */ + error = 0; +out_unlock: + read_unlock(&pag->pag_ici_lock); + return error; } STATIC int @@ -224,7 +234,7 @@ xfs_sync_inode_data( } error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? - 0 : XFS_B_ASYNC, FI_NONE); + 0 : XBF_ASYNC, FI_NONE); xfs_iunlock(ip, XFS_IOLOCK_SHARED); out_wait: @@ -260,8 +270,7 @@ xfs_sync_inode_attr( goto out_unlock; } - error = xfs_iflush(ip, (flags & SYNC_WAIT) ? - XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI); + error = xfs_iflush(ip, flags); out_unlock: xfs_iunlock(ip, XFS_ILOCK_SHARED); @@ -282,14 +291,11 @@ xfs_sync_data( ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, - XFS_ICI_NO_TAG); + XFS_ICI_NO_TAG, 0); if (error) return XFS_ERROR(error); - xfs_log_force(mp, 0, - (flags & SYNC_WAIT) ? - XFS_LOG_FORCE | XFS_LOG_SYNC : - XFS_LOG_FORCE); + xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); return 0; } @@ -304,7 +310,7 @@ xfs_sync_attr( ASSERT((flags & ~SYNC_WAIT) == 0); return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, - XFS_ICI_NO_TAG); + XFS_ICI_NO_TAG, 0); } STATIC int @@ -315,10 +321,6 @@ xfs_commit_dummy_trans( struct xfs_inode *ip = mp->m_rootip; struct xfs_trans *tp; int error; - int log_flags = XFS_LOG_FORCE; - - if (flags & SYNC_WAIT) - log_flags |= XFS_LOG_SYNC; /* * Put a dummy transaction in the log to tell recovery @@ -340,11 +342,11 @@ xfs_commit_dummy_trans( xfs_iunlock(ip, XFS_ILOCK_EXCL); /* the log force ensures this transaction is pushed to disk */ - xfs_log_force(mp, 0, log_flags); + xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); return error; } -int +STATIC int xfs_sync_fsdata( struct xfs_mount *mp, int flags) @@ -360,7 +362,7 @@ xfs_sync_fsdata( if (flags & SYNC_TRYLOCK) { ASSERT(!(flags & SYNC_WAIT)); - bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); + bp = xfs_getsb(mp, XBF_TRYLOCK); if (!bp) goto out; @@ -380,7 +382,7 @@ xfs_sync_fsdata( * become pinned in between there and here. */ if (XFS_BUF_ISPINNED(bp)) - xfs_log_force(mp, 0, XFS_LOG_FORCE); + xfs_log_force(mp, 0); } @@ -441,9 +443,6 @@ xfs_quiesce_data( xfs_sync_data(mp, SYNC_WAIT); xfs_qm_sync(mp, SYNC_WAIT); - /* drop inode references pinned by filestreams */ - xfs_filestream_flush(mp); - /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp, SYNC_WAIT); @@ -460,16 +459,18 @@ xfs_quiesce_fs( { int count = 0, pincount; + xfs_reclaim_inodes(mp, 0); xfs_flush_buftarg(mp->m_ddev_targp, 0); - xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); /* * This loop must run at least twice. The first instance of the loop * will flush most meta data but that will generate more meta data * (typically directory updates). Which then must be flushed and - * logged before we can write the unmount record. + * logged before we can write the unmount record. We also so sync + * reclaim of inodes to catch any that the above delwri flush skipped. */ do { + xfs_reclaim_inodes(mp, SYNC_WAIT); xfs_sync_attr(mp, SYNC_WAIT); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { @@ -568,7 +569,7 @@ xfs_flush_inodes( igrab(inode); xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); wait_for_completion(&completion); - xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); + xfs_log_force(ip->i_mount, XFS_LOG_SYNC); } /* @@ -584,8 +585,8 @@ xfs_sync_worker( int error; if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); - xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC); + xfs_log_force(mp, 0); + xfs_reclaim_inodes(mp, 0); /* dgc: errors ignored here */ error = xfs_qm_sync(mp, SYNC_TRYLOCK); error = xfs_sync_fsdata(mp, SYNC_TRYLOCK); @@ -664,60 +665,6 @@ xfs_syncd_stop( kthread_stop(mp->m_sync_task); } -STATIC int -xfs_reclaim_inode( - xfs_inode_t *ip, - int sync_mode) -{ - xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); - - /* The hash lock here protects a thread in xfs_iget_core from - * racing with us on linking the inode back with a vnode. - * Once we have the XFS_IRECLAIM flag set it will not touch - * us. - */ - write_lock(&pag->pag_ici_lock); - spin_lock(&ip->i_flags_lock); - if (__xfs_iflags_test(ip, XFS_IRECLAIM) || - !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { - spin_unlock(&ip->i_flags_lock); - write_unlock(&pag->pag_ici_lock); - return -EAGAIN; - } - __xfs_iflags_set(ip, XFS_IRECLAIM); - spin_unlock(&ip->i_flags_lock); - write_unlock(&pag->pag_ici_lock); - xfs_put_perag(ip->i_mount, pag); - - /* - * If the inode is still dirty, then flush it out. If the inode - * is not in the AIL, then it will be OK to flush it delwri as - * long as xfs_iflush() does not keep any references to the inode. - * We leave that decision up to xfs_iflush() since it has the - * knowledge of whether it's OK to simply do a delwri flush of - * the inode or whether we need to wait until the inode is - * pulled from the AIL. - * We get the flush lock regardless, though, just to make sure - * we don't free it while it is being flushed. - */ - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_iflock(ip); - - /* - * In the case of a forced shutdown we rely on xfs_iflush() to - * wait for the inode to be unpinned before returning an error. - */ - if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) { - /* synchronize with xfs_iflush_done */ - xfs_iflock(ip); - xfs_ifunlock(ip); - } - - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_ireclaim(ip); - return 0; -} - void __xfs_inode_set_reclaim_tag( struct xfs_perag *pag, @@ -737,16 +684,17 @@ void xfs_inode_set_reclaim_tag( xfs_inode_t *ip) { - xfs_mount_t *mp = ip->i_mount; - xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); + struct xfs_mount *mp = ip->i_mount; + struct xfs_perag *pag; + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); read_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); __xfs_inode_set_reclaim_tag(pag, ip); __xfs_iflags_set(ip, XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); read_unlock(&pag->pag_ici_lock); - xfs_put_perag(mp, pag); + xfs_perag_put(pag); } void @@ -759,20 +707,145 @@ __xfs_inode_clear_reclaim_tag( XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); } +/* + * Inodes in different states need to be treated differently, and the return + * value of xfs_iflush is not sufficient to get this right. The following table + * lists the inode states and the reclaim actions necessary for non-blocking + * reclaim: + * + * + * inode state iflush ret required action + * --------------- ---------- --------------- + * bad - reclaim + * shutdown EIO unpin and reclaim + * clean, unpinned 0 reclaim + * stale, unpinned 0 reclaim + * clean, pinned(*) 0 requeue + * stale, pinned EAGAIN requeue + * dirty, delwri ok 0 requeue + * dirty, delwri blocked EAGAIN requeue + * dirty, sync flush 0 reclaim + * + * (*) dgc: I don't think the clean, pinned state is possible but it gets + * handled anyway given the order of checks implemented. + * + * As can be seen from the table, the return value of xfs_iflush() is not + * sufficient to correctly decide the reclaim action here. The checks in + * xfs_iflush() might look like duplicates, but they are not. + * + * Also, because we get the flush lock first, we know that any inode that has + * been flushed delwri has had the flush completed by the time we check that + * the inode is clean. The clean inode check needs to be done before flushing + * the inode delwri otherwise we would loop forever requeuing clean inodes as + * we cannot tell apart a successful delwri flush and a clean inode from the + * return value of xfs_iflush(). + * + * Note that because the inode is flushed delayed write by background + * writeback, the flush lock may already be held here and waiting on it can + * result in very long latencies. Hence for sync reclaims, where we wait on the + * flush lock, the caller should push out delayed write inodes first before + * trying to reclaim them to minimise the amount of time spent waiting. For + * background relaim, we just requeue the inode for the next pass. + * + * Hence the order of actions after gaining the locks should be: + * bad => reclaim + * shutdown => unpin and reclaim + * pinned, delwri => requeue + * pinned, sync => unpin + * stale => reclaim + * clean => reclaim + * dirty, delwri => flush and requeue + * dirty, sync => flush, wait and reclaim + */ STATIC int -xfs_reclaim_inode_now( +xfs_reclaim_inode( struct xfs_inode *ip, struct xfs_perag *pag, - int flags) + int sync_mode) { - /* ignore if already under reclaim */ - if (xfs_iflags_test(ip, XFS_IRECLAIM)) { - read_unlock(&pag->pag_ici_lock); + int error = 0; + + /* + * The radix tree lock here protects a thread in xfs_iget from racing + * with us starting reclaim on the inode. Once we have the + * XFS_IRECLAIM flag set it will not touch us. + */ + spin_lock(&ip->i_flags_lock); + ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); + if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { + /* ignore as it is already under reclaim */ + spin_unlock(&ip->i_flags_lock); + write_unlock(&pag->pag_ici_lock); return 0; } - read_unlock(&pag->pag_ici_lock); + __xfs_iflags_set(ip, XFS_IRECLAIM); + spin_unlock(&ip->i_flags_lock); + write_unlock(&pag->pag_ici_lock); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (!xfs_iflock_nowait(ip)) { + if (!(sync_mode & SYNC_WAIT)) + goto out; + xfs_iflock(ip); + } + + if (is_bad_inode(VFS_I(ip))) + goto reclaim; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_iunpin_wait(ip); + goto reclaim; + } + if (xfs_ipincount(ip)) { + if (!(sync_mode & SYNC_WAIT)) { + xfs_ifunlock(ip); + goto out; + } + xfs_iunpin_wait(ip); + } + if (xfs_iflags_test(ip, XFS_ISTALE)) + goto reclaim; + if (xfs_inode_clean(ip)) + goto reclaim; + + /* Now we have an inode that needs flushing */ + error = xfs_iflush(ip, sync_mode); + if (sync_mode & SYNC_WAIT) { + xfs_iflock(ip); + goto reclaim; + } + + /* + * When we have to flush an inode but don't have SYNC_WAIT set, we + * flush the inode out using a delwri buffer and wait for the next + * call into reclaim to find it in a clean state instead of waiting for + * it now. We also don't return errors here - if the error is transient + * then the next reclaim pass will flush the inode, and if the error + * is permanent then the next sync reclaim will relcaim the inode and + * pass on the error. + */ + if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_fs_cmn_err(CE_WARN, ip->i_mount, + "inode 0x%llx background reclaim flush failed with %d", + (long long)ip->i_ino, error); + } +out: + xfs_iflags_clear(ip, XFS_IRECLAIM); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * We could return EAGAIN here to make reclaim rescan the inode tree in + * a short while. However, this just burns CPU time scanning the tree + * waiting for IO to complete and xfssyncd never goes back to the idle + * state. Instead, return 0 to let the next scheduled background reclaim + * attempt to reclaim the inode again. + */ + return 0; + +reclaim: + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_ireclaim(ip); + return error; - return xfs_reclaim_inode(ip, flags); } int @@ -780,6 +853,6 @@ xfs_reclaim_inodes( xfs_mount_t *mp, int mode) { - return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode, - XFS_ICI_RECLAIM_TAG); + return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode, + XFS_ICI_RECLAIM_TAG, 1); } diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index a500b4d9183..d480c346cab 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -37,7 +37,6 @@ void xfs_syncd_stop(struct xfs_mount *mp); int xfs_sync_attr(struct xfs_mount *mp, int flags); int xfs_sync_data(struct xfs_mount *mp, int flags); -int xfs_sync_fsdata(struct xfs_mount *mp, int flags); int xfs_quiesce_data(struct xfs_mount *mp); void xfs_quiesce_attr(struct xfs_mount *mp); @@ -54,6 +53,6 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); int xfs_inode_ag_iterator(struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), - int flags, int tag); + int flags, int tag, int write_lock); #endif diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index c22a608321a..a4574dcf506 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -78,6 +78,33 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class, ) ) +#define DEFINE_PERAG_REF_EVENT(name) \ +TRACE_EVENT(name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \ + unsigned long caller_ip), \ + TP_ARGS(mp, agno, refcount, caller_ip), \ + TP_STRUCT__entry( \ + __field(dev_t, dev) \ + __field(xfs_agnumber_t, agno) \ + __field(int, refcount) \ + __field(unsigned long, caller_ip) \ + ), \ + TP_fast_assign( \ + __entry->dev = mp->m_super->s_dev; \ + __entry->agno = agno; \ + __entry->refcount = refcount; \ + __entry->caller_ip = caller_ip; \ + ), \ + TP_printk("dev %d:%d agno %u refcount %d caller %pf", \ + MAJOR(__entry->dev), MINOR(__entry->dev), \ + __entry->agno, \ + __entry->refcount, \ + (char *)__entry->caller_ip) \ +); + +DEFINE_PERAG_REF_EVENT(xfs_perag_get) +DEFINE_PERAG_REF_EVENT(xfs_perag_put) + #define DEFINE_ATTR_LIST_EVENT(name) \ DEFINE_EVENT(xfs_attr_list_class, name, \ TP_PROTO(struct xfs_attr_list_context *ctx), \ @@ -456,6 +483,7 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf); DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur); DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb); @@ -1414,6 +1442,59 @@ TRACE_EVENT(xfs_dir2_leafn_moveents, __entry->count) ); +#define XFS_SWAPEXT_INODES \ + { 0, "target" }, \ + { 1, "temp" } + +#define XFS_INODE_FORMAT_STR \ + { 0, "invalid" }, \ + { 1, "local" }, \ + { 2, "extent" }, \ + { 3, "btree" } + +DECLARE_EVENT_CLASS(xfs_swap_extent_class, + TP_PROTO(struct xfs_inode *ip, int which), + TP_ARGS(ip, which), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, which) + __field(xfs_ino_t, ino) + __field(int, format) + __field(int, nex) + __field(int, max_nex) + __field(int, broot_size) + __field(int, fork_off) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->which = which; + __entry->ino = ip->i_ino; + __entry->format = ip->i_d.di_format; + __entry->nex = ip->i_d.di_nextents; + __entry->max_nex = ip->i_df.if_ext_max; + __entry->broot_size = ip->i_df.if_broot_bytes; + __entry->fork_off = XFS_IFORK_BOFF(ip); + ), + TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, " + "Max in-fork extents %d, broot size %d, fork offset %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->which, XFS_SWAPEXT_INODES), + __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR), + __entry->nex, + __entry->max_nex, + __entry->broot_size, + __entry->fork_off) +) + +#define DEFINE_SWAPEXT_EVENT(name) \ +DEFINE_EVENT(xfs_swap_extent_class, name, \ + TP_PROTO(struct xfs_inode *ip, int which), \ + TP_ARGS(ip, which)) + +DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before); +DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); + #endif /* _TRACE_XFS_H */ #undef TRACE_INCLUDE_PATH diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c index 0b1878857fc..fa01b9daba6 100644 --- a/fs/xfs/linux-2.6/xfs_xattr.c +++ b/fs/xfs/linux-2.6/xfs_xattr.c @@ -45,7 +45,7 @@ xfs_xattr_get(struct dentry *dentry, const char *name, value = NULL; } - error = -xfs_attr_get(ip, name, value, &asize, xflags); + error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); if (error) return error; return asize; @@ -67,8 +67,9 @@ xfs_xattr_set(struct dentry *dentry, const char *name, const void *value, xflags |= ATTR_REPLACE; if (!value) - return -xfs_attr_remove(ip, name, xflags); - return -xfs_attr_set(ip, name, (void *)value, size, xflags); + return -xfs_attr_remove(ip, (unsigned char *)name, xflags); + return -xfs_attr_set(ip, (unsigned char *)name, + (void *)value, size, xflags); } static struct xattr_handler xfs_xattr_user_handler = { @@ -124,8 +125,13 @@ static const char *xfs_xattr_prefix(int flags) } static int -xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags, - char *name, int namelen, int valuelen, char *value) +xfs_xattr_put_listent( + struct xfs_attr_list_context *context, + int flags, + unsigned char *name, + int namelen, + int valuelen, + unsigned char *value) { unsigned int prefix_len = xfs_xattr_prefix_len(flags); char *offset; @@ -148,7 +154,7 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags, offset = (char *)context->alist + context->count; strncpy(offset, xfs_xattr_prefix(flags), prefix_len); offset += prefix_len; - strncpy(offset, name, namelen); /* real name */ + strncpy(offset, (char *)name, namelen); /* real name */ offset += namelen; *offset = '\0'; context->count += prefix_len + namelen + 1; @@ -156,8 +162,13 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags, } static int -xfs_xattr_put_listent_sizes(struct xfs_attr_list_context *context, int flags, - char *name, int namelen, int valuelen, char *value) +xfs_xattr_put_listent_sizes( + struct xfs_attr_list_context *context, + int flags, + unsigned char *name, + int namelen, + int valuelen, + unsigned char *value) { context->count += xfs_xattr_prefix_len(flags) + namelen + 1; return 0; diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index d7c7eea09fc..5f79dd78626 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -1187,7 +1187,7 @@ xfs_qm_dqflush( * block, nada. */ if (!XFS_DQ_IS_DIRTY(dqp) || - (!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) { + (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { xfs_dqfunlock(dqp); return 0; } @@ -1248,23 +1248,20 @@ xfs_qm_dqflush( */ if (XFS_BUF_ISPINNED(bp)) { trace_xfs_dqflush_force(dqp); - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + xfs_log_force(mp, 0); } - if (flags & XFS_QMOPT_DELWRI) { - xfs_bdwrite(mp, bp); - } else if (flags & XFS_QMOPT_ASYNC) { - error = xfs_bawrite(mp, bp); - } else { + if (flags & SYNC_WAIT) error = xfs_bwrite(mp, bp); - } + else + xfs_bdwrite(mp, bp); trace_xfs_dqflush_done(dqp); /* * dqp is still locked, but caller is free to unlock it now. */ - return (error); + return error; } @@ -1445,7 +1442,7 @@ xfs_qm_dqpurge( * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ - error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); + error = xfs_qm_dqflush(dqp, SYNC_WAIT); if (error) xfs_fs_cmn_err(CE_WARN, mp, "xfs_qm_dqpurge: dquot %p flush failed", dqp); @@ -1529,25 +1526,17 @@ xfs_qm_dqflock_pushbuf_wait( * the flush lock when the I/O completes. */ bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno, - XFS_QI_DQCHUNKLEN(dqp->q_mount), - XFS_INCORE_TRYLOCK); - if (bp != NULL) { - if (XFS_BUF_ISDELAYWRITE(bp)) { - int error; - if (XFS_BUF_ISPINNED(bp)) { - xfs_log_force(dqp->q_mount, - (xfs_lsn_t)0, - XFS_LOG_FORCE); - } - error = xfs_bawrite(dqp->q_mount, bp); - if (error) - xfs_fs_cmn_err(CE_WARN, dqp->q_mount, - "xfs_qm_dqflock_pushbuf_wait: " - "pushbuf error %d on dqp %p, bp %p", - error, dqp, bp); - } else { - xfs_buf_relse(bp); - } + XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK); + if (!bp) + goto out_lock; + + if (XFS_BUF_ISDELAYWRITE(bp)) { + if (XFS_BUF_ISPINNED(bp)) + xfs_log_force(dqp->q_mount, 0); + xfs_buf_delwri_promote(bp); + wake_up_process(bp->b_target->bt_task); } + xfs_buf_relse(bp); +out_lock: xfs_dqflock(dqp); } diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index d0d4a9a0bbd..4e4ee9a5719 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -74,11 +74,11 @@ xfs_qm_dquot_logitem_format( logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; logvec->i_len = sizeof(xfs_dq_logformat_t); - XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT); + logvec->i_type = XLOG_REG_TYPE_QFORMAT; logvec++; logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; logvec->i_len = sizeof(xfs_disk_dquot_t); - XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT); + logvec->i_type = XLOG_REG_TYPE_DQUOT; ASSERT(2 == logitem->qli_item.li_desc->lid_size); logitem->qli_format.qlf_size = 2; @@ -153,7 +153,7 @@ xfs_qm_dquot_logitem_push( * lock without sleeping, then there must not have been * anyone in the process of flushing the dquot. */ - error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + error = xfs_qm_dqflush(dqp, 0); if (error) xfs_fs_cmn_err(CE_WARN, dqp->q_mount, "xfs_qm_dquot_logitem_push: push error %d on dqp %p", @@ -190,7 +190,7 @@ xfs_qm_dqunpin_wait( /* * Give the log a push so we don't wait here too long. */ - xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); + xfs_log_force(dqp->q_mount, 0); wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); } @@ -212,68 +212,31 @@ xfs_qm_dquot_logitem_pushbuf( xfs_dquot_t *dqp; xfs_mount_t *mp; xfs_buf_t *bp; - uint dopush; dqp = qip->qli_dquot; ASSERT(XFS_DQ_IS_LOCKED(dqp)); /* - * The qli_pushbuf_flag keeps others from - * trying to duplicate our effort. - */ - ASSERT(qip->qli_pushbuf_flag != 0); - ASSERT(qip->qli_push_owner == current_pid()); - - /* * If flushlock isn't locked anymore, chances are that the * inode flush completed and the inode was taken off the AIL. * So, just get out. */ if (completion_done(&dqp->q_flush) || ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { - qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); return; } mp = dqp->q_mount; bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, - XFS_QI_DQCHUNKLEN(mp), - XFS_INCORE_TRYLOCK); - if (bp != NULL) { - if (XFS_BUF_ISDELAYWRITE(bp)) { - dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && - !completion_done(&dqp->q_flush)); - qip->qli_pushbuf_flag = 0; - xfs_dqunlock(dqp); - - if (XFS_BUF_ISPINNED(bp)) { - xfs_log_force(mp, (xfs_lsn_t)0, - XFS_LOG_FORCE); - } - if (dopush) { - int error; -#ifdef XFSRACEDEBUG - delay_for_intr(); - delay(300); -#endif - error = xfs_bawrite(mp, bp); - if (error) - xfs_fs_cmn_err(CE_WARN, mp, - "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p", - error, qip, bp); - } else { - xfs_buf_relse(bp); - } - } else { - qip->qli_pushbuf_flag = 0; - xfs_dqunlock(dqp); - xfs_buf_relse(bp); - } + XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK); + xfs_dqunlock(dqp); + if (!bp) return; - } + if (XFS_BUF_ISDELAYWRITE(bp)) + xfs_buf_delwri_promote(bp); + xfs_buf_relse(bp); + return; - qip->qli_pushbuf_flag = 0; - xfs_dqunlock(dqp); } /* @@ -291,50 +254,24 @@ xfs_qm_dquot_logitem_trylock( xfs_dq_logitem_t *qip) { xfs_dquot_t *dqp; - uint retval; dqp = qip->qli_dquot; if (atomic_read(&dqp->q_pincount) > 0) - return (XFS_ITEM_PINNED); + return XFS_ITEM_PINNED; if (! xfs_qm_dqlock_nowait(dqp)) - return (XFS_ITEM_LOCKED); + return XFS_ITEM_LOCKED; - retval = XFS_ITEM_SUCCESS; if (!xfs_dqflock_nowait(dqp)) { /* - * The dquot is already being flushed. It may have been - * flushed delayed write, however, and we don't want to - * get stuck waiting for that to complete. So, we want to check - * to see if we can lock the dquot's buffer without sleeping. - * If we can and it is marked for delayed write, then we - * hold it and send it out from the push routine. We don't - * want to do that now since we might sleep in the device - * strategy routine. We also don't want to grab the buffer lock - * here because we'd like not to call into the buffer cache - * while holding the AIL lock. - * Make sure to only return PUSHBUF if we set pushbuf_flag - * ourselves. If someone else is doing it then we don't - * want to go to the push routine and duplicate their efforts. + * dquot has already been flushed to the backing buffer, + * leave it locked, pushbuf routine will unlock it. */ - if (qip->qli_pushbuf_flag == 0) { - qip->qli_pushbuf_flag = 1; - ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno); -#ifdef DEBUG - qip->qli_push_owner = current_pid(); -#endif - /* - * The dquot is left locked. - */ - retval = XFS_ITEM_PUSHBUF; - } else { - retval = XFS_ITEM_FLUSHING; - xfs_dqunlock_nonotify(dqp); - } + return XFS_ITEM_PUSHBUF; } ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL); - return (retval); + return XFS_ITEM_SUCCESS; } @@ -467,7 +404,7 @@ xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf, log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); log_vector->i_len = sizeof(xfs_qoff_logitem_t); - XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF); + log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; qf->qql_format.qf_size = 1; } diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/quota/xfs_dquot_item.h index 5a632531f84..5acae2ada70 100644 --- a/fs/xfs/quota/xfs_dquot_item.h +++ b/fs/xfs/quota/xfs_dquot_item.h @@ -27,10 +27,6 @@ typedef struct xfs_dq_logitem { xfs_log_item_t qli_item; /* common portion */ struct xfs_dquot *qli_dquot; /* dquot ptr */ xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ - unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */ -#ifdef DEBUG - uint64_t qli_push_owner; -#endif xfs_dq_logformat_t qli_format; /* logged structure */ } xfs_dq_logitem_t; diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 9e627a8b5b0..417e61e3d9d 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -118,9 +118,14 @@ xfs_Gqm_init(void) */ udqhash = kmem_zalloc_greedy(&hsize, XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), - XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t), - KM_SLEEP | KM_MAYFAIL | KM_LARGE); - gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE); + XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); + if (!udqhash) + goto out; + + gdqhash = kmem_zalloc_large(hsize); + if (!gdqhash) + goto out_free_udqhash; + hsize /= sizeof(xfs_dqhash_t); ndquot = hsize << 8; @@ -170,6 +175,11 @@ xfs_Gqm_init(void) mutex_init(&qcheck_lock); #endif return xqm; + + out_free_udqhash: + kmem_free_large(udqhash); + out: + return NULL; } /* @@ -189,8 +199,8 @@ xfs_qm_destroy( xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); } - kmem_free(xqm->qm_usr_dqhtable); - kmem_free(xqm->qm_grp_dqhtable); + kmem_free_large(xqm->qm_usr_dqhtable); + kmem_free_large(xqm->qm_grp_dqhtable); xqm->qm_usr_dqhtable = NULL; xqm->qm_grp_dqhtable = NULL; xqm->qm_dqhashmask = 0; @@ -219,8 +229,12 @@ xfs_qm_hold_quotafs_ref( */ mutex_lock(&xfs_Gqm_lock); - if (xfs_Gqm == NULL) + if (!xfs_Gqm) { xfs_Gqm = xfs_Gqm_init(); + if (!xfs_Gqm) + return ENOMEM; + } + /* * We can keep a list of all filesystems with quotas mounted for * debugging and statistical purposes, but ... @@ -436,7 +450,7 @@ xfs_qm_unmount_quotas( STATIC int xfs_qm_dqflush_all( xfs_mount_t *mp, - int flags) + int sync_mode) { int recl; xfs_dquot_t *dqp; @@ -472,7 +486,7 @@ again: * across a disk write. */ xfs_qm_mplist_unlock(mp); - error = xfs_qm_dqflush(dqp, flags); + error = xfs_qm_dqflush(dqp, sync_mode); xfs_dqunlock(dqp); if (error) return error; @@ -912,13 +926,11 @@ xfs_qm_sync( { int recl, restarts; xfs_dquot_t *dqp; - uint flush_flags; int error; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; - flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI; restarts = 0; again: @@ -978,7 +990,7 @@ xfs_qm_sync( * across a disk write */ xfs_qm_mplist_unlock(mp); - error = xfs_qm_dqflush(dqp, flush_flags); + error = xfs_qm_dqflush(dqp, flags); xfs_dqunlock(dqp); if (error && XFS_FORCED_SHUTDOWN(mp)) return 0; /* Need to prevent umount failure */ @@ -1782,7 +1794,7 @@ xfs_qm_quotacheck( * successfully. */ if (!error) - error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI); + error = xfs_qm_dqflush_all(mp, 0); /* * We can get this error if we couldn't do a dquot allocation inside @@ -2004,7 +2016,7 @@ xfs_qm_shake_freelist( * We flush it delayed write, so don't bother * releasing the mplock. */ - error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + error = xfs_qm_dqflush(dqp, 0); if (error) { xfs_fs_cmn_err(CE_WARN, dqp->q_mount, "xfs_qm_dqflush_all: dquot %p flush failed", dqp); @@ -2187,7 +2199,7 @@ xfs_qm_dqreclaim_one(void) * We flush it delayed write, so don't bother * releasing the freelist lock. */ - error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI); + error = xfs_qm_dqflush(dqp, 0); if (error) { xfs_fs_cmn_err(CE_WARN, dqp->q_mount, "xfs_qm_dqreclaim: dquot %p flush failed", dqp); diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index a5346630dfa..97b410c1279 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -59,7 +59,7 @@ xfs_fill_statvfs_from_dquot( be64_to_cpu(dp->d_blk_hardlimit); if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; - statp->f_bfree = + statp->f_bfree = statp->f_bavail = (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; } diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 71af76fe8a2..5d0ee8d492d 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -891,7 +891,7 @@ xfs_qm_dqrele_all_inodes( uint flags) { ASSERT(mp->m_quotainfo); - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG); + xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0); } /*------------------------------------------------------------------------*/ @@ -1192,9 +1192,9 @@ xfs_qm_internalqcheck( if (! XFS_IS_QUOTA_ON(mp)) return XFS_ERROR(ESRCH); - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + xfs_log_force(mp, XFS_LOG_SYNC); XFS_bflush(mp->m_ddev_targp); - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + xfs_log_force(mp, XFS_LOG_SYNC); XFS_bflush(mp->m_ddev_targp); mutex_lock(&qcheck_lock); diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 97ac9640be9..c3ab75cb1d9 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -589,12 +589,18 @@ xfs_trans_unreserve_and_mod_dquots( } } -STATIC int -xfs_quota_error(uint flags) +STATIC void +xfs_quota_warn( + struct xfs_mount *mp, + struct xfs_dquot *dqp, + int type) { - if (flags & XFS_QMOPT_ENOSPC) - return ENOSPC; - return EDQUOT; + /* no warnings for project quotas - we just return ENOSPC later */ + if (dqp->dq_flags & XFS_DQ_PROJ) + return; + quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA, + be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev, + type); } /* @@ -612,7 +618,6 @@ xfs_trans_dqresv( long ninos, uint flags) { - int error; xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; time_t timer; @@ -649,7 +654,6 @@ xfs_trans_dqresv( warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount); resbcountp = &dqp->q_res_rtbcount; } - error = 0; if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_core.d_id && @@ -667,18 +671,20 @@ xfs_trans_dqresv( * nblks. */ if (hardlimit > 0ULL && - (hardlimit <= nblks + *resbcountp)) { - error = xfs_quota_error(flags); + hardlimit <= nblks + *resbcountp) { + xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); goto error_return; } - if (softlimit > 0ULL && - (softlimit <= nblks + *resbcountp)) { + softlimit <= nblks + *resbcountp) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { - error = xfs_quota_error(flags); + xfs_quota_warn(mp, dqp, + QUOTA_NL_BSOFTLONGWARN); goto error_return; } + + xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); } } if (ninos > 0) { @@ -692,15 +698,19 @@ xfs_trans_dqresv( softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); if (!softlimit) softlimit = q->qi_isoftlimit; + if (hardlimit > 0ULL && count >= hardlimit) { - error = xfs_quota_error(flags); + xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); goto error_return; - } else if (softlimit > 0ULL && count >= softlimit) { - if ((timer != 0 && get_seconds() > timer) || + } + if (softlimit > 0ULL && count >= softlimit) { + if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { - error = xfs_quota_error(flags); + xfs_quota_warn(mp, dqp, + QUOTA_NL_ISOFTLONGWARN); goto error_return; } + xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); } } } @@ -736,9 +746,14 @@ xfs_trans_dqresv( ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); + xfs_dqunlock(dqp); + return 0; + error_return: xfs_dqunlock(dqp); - return error; + if (flags & XFS_QMOPT_ENOSPC) + return ENOSPC; + return EDQUOT; } diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index 00fd357c3e4..d13eeba2c8f 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h @@ -36,8 +36,8 @@ struct xfs_acl { }; /* On-disk XFS extended attribute names */ -#define SGI_ACL_FILE "SGI_ACL_FILE" -#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" +#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE" +#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT" #define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 6702bd86581..b1a5a1ff88e 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -187,17 +187,13 @@ typedef struct xfs_perag_busy { /* * Per-ag incore structure, copies of information in agf and agi, * to improve the performance of allocation group selection. - * - * pick sizes which fit in allocation buckets well */ -#if (BITS_PER_LONG == 32) -#define XFS_PAGB_NUM_SLOTS 84 -#elif (BITS_PER_LONG == 64) #define XFS_PAGB_NUM_SLOTS 128 -#endif -typedef struct xfs_perag -{ +typedef struct xfs_perag { + struct xfs_mount *pag_mount; /* owner filesystem */ + xfs_agnumber_t pag_agno; /* AG this structure belongs to */ + atomic_t pag_ref; /* perag reference count */ char pagf_init; /* this agf's entry is initialized */ char pagi_init; /* this agi's entry is initialized */ char pagf_metadata; /* the agf is preferred to be metadata */ @@ -210,8 +206,6 @@ typedef struct xfs_perag __uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */ xfs_agino_t pagi_freecount; /* number of free inodes */ xfs_agino_t pagi_count; /* number of allocated inodes */ - int pagb_count; /* pagb slots in use */ - xfs_perag_busy_t *pagb_list; /* unstable blocks */ /* * Inode allocation search lookup optimisation. @@ -230,6 +224,8 @@ typedef struct xfs_perag rwlock_t pag_ici_lock; /* incore inode lock */ struct radix_tree_root pag_ici_root; /* incore inode cache root */ #endif + int pagb_count; /* pagb slots in use */ + xfs_perag_busy_t pagb_list[XFS_PAGB_NUM_SLOTS]; /* unstable blocks */ } xfs_perag_t; /* diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 275b1f4f943..94cddbfb256 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -1662,11 +1662,13 @@ xfs_free_ag_extent( xfs_agf_t *agf; xfs_perag_t *pag; /* per allocation group data */ + pag = xfs_perag_get(mp, agno); + pag->pagf_freeblks += len; + xfs_perag_put(pag); + agf = XFS_BUF_TO_AGF(agbp); - pag = &mp->m_perag[agno]; be32_add_cpu(&agf->agf_freeblks, len); xfs_trans_agblocks_delta(tp, len); - pag->pagf_freeblks += len; XFS_WANT_CORRUPTED_GOTO( be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length), @@ -1969,10 +1971,12 @@ xfs_alloc_get_freelist( xfs_trans_brelse(tp, agflbp); if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) agf->agf_flfirst = 0; - pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; + + pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); be32_add_cpu(&agf->agf_flcount, -1); xfs_trans_agflist_delta(tp, -1); pag->pagf_flcount--; + xfs_perag_put(pag); logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; if (btreeblk) { @@ -2078,7 +2082,8 @@ xfs_alloc_put_freelist( be32_add_cpu(&agf->agf_fllast, 1); if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) agf->agf_fllast = 0; - pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; + + pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); be32_add_cpu(&agf->agf_flcount, 1); xfs_trans_agflist_delta(tp, 1); pag->pagf_flcount++; @@ -2089,6 +2094,7 @@ xfs_alloc_put_freelist( pag->pagf_btreeblks--; logflags |= XFS_AGF_BTREEBLKS; } + xfs_perag_put(pag); xfs_alloc_log_agf(tp, agbp, logflags); @@ -2152,7 +2158,6 @@ xfs_read_agf( xfs_trans_brelse(tp, *bpp); return XFS_ERROR(EFSCORRUPTED); } - XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF); return 0; } @@ -2175,7 +2180,7 @@ xfs_alloc_read_agf( ASSERT(agno != NULLAGNUMBER); error = xfs_read_agf(mp, tp, agno, - (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0, + (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0, bpp); if (error) return error; @@ -2184,7 +2189,7 @@ xfs_alloc_read_agf( ASSERT(!XFS_BUF_GETERROR(*bpp)); agf = XFS_BUF_TO_AGF(*bpp); - pag = &mp->m_perag[agno]; + pag = xfs_perag_get(mp, agno); if (!pag->pagf_init) { pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); @@ -2195,8 +2200,8 @@ xfs_alloc_read_agf( pag->pagf_levels[XFS_BTNUM_CNTi] = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); spin_lock_init(&pag->pagb_lock); - pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * - sizeof(xfs_perag_busy_t), KM_SLEEP); + pag->pagb_count = 0; + memset(pag->pagb_list, 0, sizeof(pag->pagb_list)); pag->pagf_init = 1; } #ifdef DEBUG @@ -2211,6 +2216,7 @@ xfs_alloc_read_agf( be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); } #endif + xfs_perag_put(pag); return 0; } @@ -2270,8 +2276,7 @@ xfs_alloc_vextent( * These three force us into a single a.g. */ args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); - down_read(&mp->m_peraglock); - args->pag = &mp->m_perag[args->agno]; + args->pag = xfs_perag_get(mp, args->agno); args->minleft = 0; error = xfs_alloc_fix_freelist(args, 0); args->minleft = minleft; @@ -2280,14 +2285,12 @@ xfs_alloc_vextent( goto error0; } if (!args->agbp) { - up_read(&mp->m_peraglock); trace_xfs_alloc_vextent_noagbp(args); break; } args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); if ((error = xfs_alloc_ag_vextent(args))) goto error0; - up_read(&mp->m_peraglock); break; case XFS_ALLOCTYPE_START_BNO: /* @@ -2339,9 +2342,8 @@ xfs_alloc_vextent( * Loop over allocation groups twice; first time with * trylock set, second time without. */ - down_read(&mp->m_peraglock); for (;;) { - args->pag = &mp->m_perag[args->agno]; + args->pag = xfs_perag_get(mp, args->agno); if (no_min) args->minleft = 0; error = xfs_alloc_fix_freelist(args, flags); args->minleft = minleft; @@ -2400,8 +2402,8 @@ xfs_alloc_vextent( } } } + xfs_perag_put(args->pag); } - up_read(&mp->m_peraglock); if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) { if (args->agno == sagno) mp->m_agfrotor = (mp->m_agfrotor + 1) % @@ -2427,9 +2429,10 @@ xfs_alloc_vextent( args->len); #endif } + xfs_perag_put(args->pag); return 0; error0: - up_read(&mp->m_peraglock); + xfs_perag_put(args->pag); return error; } @@ -2454,8 +2457,7 @@ xfs_free_extent( args.agno = XFS_FSB_TO_AGNO(args.mp, bno); ASSERT(args.agno < args.mp->m_sb.sb_agcount); args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); - down_read(&args.mp->m_peraglock); - args.pag = &args.mp->m_perag[args.agno]; + args.pag = xfs_perag_get(args.mp, args.agno); if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) goto error0; #ifdef DEBUG @@ -2465,7 +2467,7 @@ xfs_free_extent( #endif error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); error0: - up_read(&args.mp->m_peraglock); + xfs_perag_put(args.pag); return error; } @@ -2486,15 +2488,15 @@ xfs_alloc_mark_busy(xfs_trans_t *tp, xfs_agblock_t bno, xfs_extlen_t len) { - xfs_mount_t *mp; xfs_perag_busy_t *bsy; + struct xfs_perag *pag; int n; - mp = tp->t_mountp; - spin_lock(&mp->m_perag[agno].pagb_lock); + pag = xfs_perag_get(tp->t_mountp, agno); + spin_lock(&pag->pagb_lock); /* search pagb_list for an open slot */ - for (bsy = mp->m_perag[agno].pagb_list, n = 0; + for (bsy = pag->pagb_list, n = 0; n < XFS_PAGB_NUM_SLOTS; bsy++, n++) { if (bsy->busy_tp == NULL) { @@ -2502,11 +2504,11 @@ xfs_alloc_mark_busy(xfs_trans_t *tp, } } - trace_xfs_alloc_busy(mp, agno, bno, len, n); + trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len, n); if (n < XFS_PAGB_NUM_SLOTS) { - bsy = &mp->m_perag[agno].pagb_list[n]; - mp->m_perag[agno].pagb_count++; + bsy = &pag->pagb_list[n]; + pag->pagb_count++; bsy->busy_start = bno; bsy->busy_length = len; bsy->busy_tp = tp; @@ -2521,7 +2523,8 @@ xfs_alloc_mark_busy(xfs_trans_t *tp, xfs_trans_set_sync(tp); } - spin_unlock(&mp->m_perag[agno].pagb_lock); + spin_unlock(&pag->pagb_lock); + xfs_perag_put(pag); } void @@ -2529,24 +2532,23 @@ xfs_alloc_clear_busy(xfs_trans_t *tp, xfs_agnumber_t agno, int idx) { - xfs_mount_t *mp; + struct xfs_perag *pag; xfs_perag_busy_t *list; - mp = tp->t_mountp; - - spin_lock(&mp->m_perag[agno].pagb_lock); - list = mp->m_perag[agno].pagb_list; - ASSERT(idx < XFS_PAGB_NUM_SLOTS); + pag = xfs_perag_get(tp->t_mountp, agno); + spin_lock(&pag->pagb_lock); + list = pag->pagb_list; - trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp); + trace_xfs_alloc_unbusy(tp->t_mountp, agno, idx, list[idx].busy_tp == tp); if (list[idx].busy_tp == tp) { list[idx].busy_tp = NULL; - mp->m_perag[agno].pagb_count--; + pag->pagb_count--; } - spin_unlock(&mp->m_perag[agno].pagb_lock); + spin_unlock(&pag->pagb_lock); + xfs_perag_put(pag); } @@ -2560,17 +2562,15 @@ xfs_alloc_search_busy(xfs_trans_t *tp, xfs_agblock_t bno, xfs_extlen_t len) { - xfs_mount_t *mp; + struct xfs_perag *pag; xfs_perag_busy_t *bsy; xfs_agblock_t uend, bend; xfs_lsn_t lsn = 0; int cnt; - mp = tp->t_mountp; - - spin_lock(&mp->m_perag[agno].pagb_lock); - - uend = bno + len - 1; + pag = xfs_perag_get(tp->t_mountp, agno); + spin_lock(&pag->pagb_lock); + cnt = pag->pagb_count; /* * search pagb_list for this slot, skipping open slots. We have to @@ -2578,8 +2578,9 @@ xfs_alloc_search_busy(xfs_trans_t *tp, * we have to get the most recent LSN for the log force to push out * all the transactions that span the range. */ - for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) { - bsy = &mp->m_perag[agno].pagb_list[cnt]; + uend = bno + len - 1; + for (cnt = 0; cnt < pag->pagb_count; cnt++) { + bsy = &pag->pagb_list[cnt]; if (!bsy->busy_tp) continue; @@ -2591,7 +2592,8 @@ xfs_alloc_search_busy(xfs_trans_t *tp, if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0) lsn = bsy->busy_tp->t_commit_lsn; } - spin_unlock(&mp->m_perag[agno].pagb_lock); + spin_unlock(&pag->pagb_lock); + xfs_perag_put(pag); trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn); /* @@ -2599,5 +2601,5 @@ xfs_alloc_search_busy(xfs_trans_t *tp, * transaction that freed the block */ if (lsn) - xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); + xfs_log_force_lsn(tp->t_mountp, lsn, XFS_LOG_SYNC); } diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index adbd9141aea..b726e10d2c1 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -61,12 +61,14 @@ xfs_allocbt_set_root( struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); int btnum = cur->bc_btnum; + struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); ASSERT(ptr->s != 0); agf->agf_roots[btnum] = ptr->s; be32_add_cpu(&agf->agf_levels[btnum], inc); - cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc; + pag->pagf_levels[btnum] += inc; + xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); } @@ -150,6 +152,7 @@ xfs_allocbt_update_lastrec( { struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); + struct xfs_perag *pag; __be32 len; int numrecs; @@ -193,7 +196,9 @@ xfs_allocbt_update_lastrec( } agf->agf_longest = len; - cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len); + pag = xfs_perag_get(cur->bc_mp, seqno); + pag->pagf_longest = be32_to_cpu(len); + xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); } diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index e953b6cfb2a..b9c196a53c4 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -93,12 +93,12 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); STATIC int xfs_attr_name_to_xname( struct xfs_name *xname, - const char *aname) + const unsigned char *aname) { if (!aname) return EINVAL; xname->name = aname; - xname->len = strlen(aname); + xname->len = strlen((char *)aname); if (xname->len >= MAXNAMELEN) return EFAULT; /* match IRIX behaviour */ @@ -124,7 +124,7 @@ STATIC int xfs_attr_get_int( struct xfs_inode *ip, struct xfs_name *name, - char *value, + unsigned char *value, int *valuelenp, int flags) { @@ -171,8 +171,8 @@ xfs_attr_get_int( int xfs_attr_get( xfs_inode_t *ip, - const char *name, - char *value, + const unsigned char *name, + unsigned char *value, int *valuelenp, int flags) { @@ -197,7 +197,7 @@ xfs_attr_get( /* * Calculate how many blocks we need for the new attribute, */ -int +STATIC int xfs_attr_calc_size( struct xfs_inode *ip, int namelen, @@ -235,8 +235,12 @@ xfs_attr_calc_size( } STATIC int -xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, - char *value, int valuelen, int flags) +xfs_attr_set_int( + struct xfs_inode *dp, + struct xfs_name *name, + unsigned char *value, + int valuelen, + int flags) { xfs_da_args_t args; xfs_fsblock_t firstblock; @@ -452,8 +456,8 @@ out: int xfs_attr_set( xfs_inode_t *dp, - const char *name, - char *value, + const unsigned char *name, + unsigned char *value, int valuelen, int flags) { @@ -600,7 +604,7 @@ out: int xfs_attr_remove( xfs_inode_t *dp, - const char *name, + const unsigned char *name, int flags) { int error; @@ -669,9 +673,13 @@ xfs_attr_list_int(xfs_attr_list_context_t *context) */ /*ARGSUSED*/ STATIC int -xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags, - char *name, int namelen, - int valuelen, char *value) +xfs_attr_put_listent( + xfs_attr_list_context_t *context, + int flags, + unsigned char *name, + int namelen, + int valuelen, + unsigned char *value) { struct attrlist *alist = (struct attrlist *)context->alist; attrlist_ent_t *aep; @@ -1980,7 +1988,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE]; xfs_mount_t *mp; xfs_daddr_t dblkno; - xfs_caddr_t dst; + void *dst; xfs_buf_t *bp; int nmap, error, tmp, valuelen, blkcnt, i; xfs_dablk_t lblkno; @@ -2007,15 +2015,14 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno, - blkcnt, - XFS_BUF_LOCK | XBF_DONT_BLOCK, + blkcnt, XBF_LOCK | XBF_DONT_BLOCK, &bp); if (error) return(error); tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : XFS_BUF_SIZE(bp); - xfs_biomove(bp, 0, tmp, dst, XFS_B_READ); + xfs_biomove(bp, 0, tmp, dst, XBF_READ); xfs_buf_relse(bp); dst += tmp; valuelen -= tmp; @@ -2039,7 +2046,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) xfs_inode_t *dp; xfs_bmbt_irec_t map; xfs_daddr_t dblkno; - xfs_caddr_t src; + void *src; xfs_buf_t *bp; xfs_dablk_t lblkno; int blkcnt, valuelen, nmap, error, tmp, committed; @@ -2141,13 +2148,13 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, - XFS_BUF_LOCK | XBF_DONT_BLOCK); + XBF_LOCK | XBF_DONT_BLOCK); ASSERT(bp); ASSERT(!XFS_BUF_GETERROR(bp)); tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : XFS_BUF_SIZE(bp); - xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE); + xfs_biomove(bp, 0, tmp, src, XBF_WRITE); if (tmp < XFS_BUF_SIZE(bp)) xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp); if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */ @@ -2208,8 +2215,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) /* * If the "remote" value is in the cache, remove it. */ - bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, - XFS_INCORE_TRYLOCK); + bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK); if (bp) { XFS_BUF_STALE(bp); XFS_BUF_UNDELAYWRITE(bp); diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h index 59b410ce69a..e920d68ef50 100644 --- a/fs/xfs/xfs_attr.h +++ b/fs/xfs/xfs_attr.h @@ -113,7 +113,7 @@ typedef struct attrlist_cursor_kern { typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int, - char *, int, int, char *); + unsigned char *, int, int, unsigned char *); typedef struct xfs_attr_list_context { struct xfs_inode *dp; /* inode */ @@ -139,7 +139,6 @@ typedef struct xfs_attr_list_context { /* * Overall external interface routines. */ -int xfs_attr_calc_size(struct xfs_inode *, int, int, int *); int xfs_attr_inactive(struct xfs_inode *dp); int xfs_attr_rmtval_get(struct xfs_da_args *args); int xfs_attr_list_int(struct xfs_attr_list_context *); diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index baf41b5af75..a90ce74fc25 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -521,11 +521,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) sfe = &sf->list[0]; for (i = 0; i < sf->hdr.count; i++) { - nargs.name = (char *)sfe->nameval; + nargs.name = sfe->nameval; nargs.namelen = sfe->namelen; - nargs.value = (char *)&sfe->nameval[nargs.namelen]; + nargs.value = &sfe->nameval[nargs.namelen]; nargs.valuelen = sfe->valuelen; - nargs.hashval = xfs_da_hashname((char *)sfe->nameval, + nargs.hashval = xfs_da_hashname(sfe->nameval, sfe->namelen); nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags); error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */ @@ -612,10 +612,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { error = context->put_listent(context, sfe->flags, - (char *)sfe->nameval, + sfe->nameval, (int)sfe->namelen, (int)sfe->valuelen, - (char*)&sfe->nameval[sfe->namelen]); + &sfe->nameval[sfe->namelen]); /* * Either search callback finished early or @@ -659,8 +659,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) } sbp->entno = i; - sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); - sbp->name = (char *)sfe->nameval; + sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); + sbp->name = sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ sbp->valuelen = sfe->valuelen; @@ -818,9 +818,9 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) continue; ASSERT(entry->flags & XFS_ATTR_LOCAL); name_loc = xfs_attr_leaf_name_local(leaf, i); - nargs.name = (char *)name_loc->nameval; + nargs.name = name_loc->nameval; nargs.namelen = name_loc->namelen; - nargs.value = (char *)&name_loc->nameval[nargs.namelen]; + nargs.value = &name_loc->nameval[nargs.namelen]; nargs.valuelen = be16_to_cpu(name_loc->valuelen); nargs.hashval = be32_to_cpu(entry->hashval); nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags); @@ -2370,10 +2370,10 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = context->put_listent(context, entry->flags, - (char *)name_loc->nameval, + name_loc->nameval, (int)name_loc->namelen, be16_to_cpu(name_loc->valuelen), - (char *)&name_loc->nameval[name_loc->namelen]); + &name_loc->nameval[name_loc->namelen]); if (retval) return retval; } else { @@ -2397,15 +2397,15 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) return retval; retval = context->put_listent(context, entry->flags, - (char *)name_rmt->name, + name_rmt->name, (int)name_rmt->namelen, valuelen, - (char*)args.value); + args.value); kmem_free(args.value); } else { retval = context->put_listent(context, entry->flags, - (char *)name_rmt->name, + name_rmt->name, (int)name_rmt->namelen, valuelen, NULL); @@ -2950,7 +2950,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, map.br_blockcount); bp = xfs_trans_get_buf(*trans, dp->i_mount->m_ddev_targp, - dblkno, dblkcnt, XFS_BUF_LOCK); + dblkno, dblkcnt, XBF_LOCK); xfs_trans_binval(*trans, bp); /* * Roll to next transaction. diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h index 76ab7b0cbb3..919756e3ba5 100644 --- a/fs/xfs/xfs_attr_sf.h +++ b/fs/xfs/xfs_attr_sf.h @@ -52,7 +52,7 @@ typedef struct xfs_attr_sf_sort { __uint8_t valuelen; /* length of value */ __uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */ xfs_dahash_t hash; /* this entry's hash value */ - char *name; /* name value, pointer into buffer */ + unsigned char *name; /* name value, pointer into buffer */ } xfs_attr_sf_sort_t; #define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 98251cdc52a..1869fb97381 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -2629,13 +2629,12 @@ xfs_bmap_btalloc( if (startag == NULLAGNUMBER) startag = ag = 0; notinit = 0; - down_read(&mp->m_peraglock); + pag = xfs_perag_get(mp, ag); while (blen < ap->alen) { - pag = &mp->m_perag[ag]; if (!pag->pagf_init && (error = xfs_alloc_pagf_init(mp, args.tp, ag, XFS_ALLOC_FLAG_TRYLOCK))) { - up_read(&mp->m_peraglock); + xfs_perag_put(pag); return error; } /* @@ -2667,13 +2666,13 @@ xfs_bmap_btalloc( break; error = xfs_filestream_new_ag(ap, &ag); - if (error) { - up_read(&mp->m_peraglock); + xfs_perag_put(pag); + if (error) return error; - } /* loop again to set 'blen'*/ startag = NULLAGNUMBER; + pag = xfs_perag_get(mp, ag); continue; } } @@ -2681,8 +2680,10 @@ xfs_bmap_btalloc( ag = 0; if (ag == startag) break; + xfs_perag_put(pag); + pag = xfs_perag_get(mp, ag); } - up_read(&mp->m_peraglock); + xfs_perag_put(pag); /* * Since the above loop did a BUF_TRYLOCK, it is * possible that there is space for this request. @@ -4470,7 +4471,7 @@ xfs_bmapi( xfs_fsblock_t abno; /* allocated block number */ xfs_extlen_t alen; /* allocated extent length */ xfs_fileoff_t aoff; /* allocated file offset */ - xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ + xfs_bmalloca_t bma = { 0 }; /* args for xfs_bmap_alloc */ xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_fileoff_t end; /* end of mapped file region */ int eof; /* we've hit the end of extents */ diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 38751d5fac6..416e47e54b8 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -334,7 +334,7 @@ xfs_bmbt_disk_set_allf( /* * Set all the fields in a bmap extent record from the uncompressed form. */ -void +STATIC void xfs_bmbt_disk_set_all( xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s) diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index cf07ca7c22e..0e66c4ea0f8 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -223,7 +223,6 @@ extern void xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t *r, xfs_fsblock_t v); extern void xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t *r, xfs_fileoff_t v); extern void xfs_bmbt_set_state(xfs_bmbt_rec_host_t *r, xfs_exntst_t v); -extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o, xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 36a0992dd66..96be4b0f249 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -977,7 +977,7 @@ xfs_btree_get_buf_block( xfs_daddr_t d; /* need to sort out how callers deal with failures first */ - ASSERT(!(flags & XFS_BUF_TRYLOCK)); + ASSERT(!(flags & XBF_TRYLOCK)); d = xfs_btree_ptr_to_daddr(cur, ptr); *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, @@ -1008,7 +1008,7 @@ xfs_btree_read_buf_block( int error; /* need to sort out how callers deal with failures first */ - ASSERT(!(flags & XFS_BUF_TRYLOCK)); + ASSERT(!(flags & XBF_TRYLOCK)); d = xfs_btree_ptr_to_daddr(cur, ptr); error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a30f7e9eb2b..f3c49e69eab 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -250,7 +250,7 @@ xfs_buf_item_format( ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); vecp->i_addr = (xfs_caddr_t)&bip->bli_format; vecp->i_len = base_size; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT); + vecp->i_type = XLOG_REG_TYPE_BFORMAT; vecp++; nvecs = 1; @@ -297,14 +297,14 @@ xfs_buf_item_format( buffer_offset = first_bit * XFS_BLI_CHUNK; vecp->i_addr = xfs_buf_offset(bp, buffer_offset); vecp->i_len = nbits * XFS_BLI_CHUNK; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK); + vecp->i_type = XLOG_REG_TYPE_BCHUNK; nvecs++; break; } else if (next_bit != last_bit + 1) { buffer_offset = first_bit * XFS_BLI_CHUNK; vecp->i_addr = xfs_buf_offset(bp, buffer_offset); vecp->i_len = nbits * XFS_BLI_CHUNK; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK); + vecp->i_type = XLOG_REG_TYPE_BCHUNK; nvecs++; vecp++; first_bit = next_bit; @@ -316,7 +316,7 @@ xfs_buf_item_format( buffer_offset = first_bit * XFS_BLI_CHUNK; vecp->i_addr = xfs_buf_offset(bp, buffer_offset); vecp->i_len = nbits * XFS_BLI_CHUNK; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK); + vecp->i_type = XLOG_REG_TYPE_BCHUNK; /* You would think we need to bump the nvecs here too, but we do not * this number is used by recovery, and it gets confused by the boundary * split here @@ -467,8 +467,10 @@ xfs_buf_item_unpin_remove( /* * This is called to attempt to lock the buffer associated with this * buf log item. Don't sleep on the buffer lock. If we can't get - * the lock right away, return 0. If we can get the lock, pull the - * buffer from the free list, mark it busy, and return 1. + * the lock right away, return 0. If we can get the lock, take a + * reference to the buffer. If this is a delayed write buffer that + * needs AIL help to be written back, invoke the pushbuf routine + * rather than the normal success path. */ STATIC uint xfs_buf_item_trylock( @@ -477,24 +479,18 @@ xfs_buf_item_trylock( xfs_buf_t *bp; bp = bip->bli_buf; - - if (XFS_BUF_ISPINNED(bp)) { + if (XFS_BUF_ISPINNED(bp)) return XFS_ITEM_PINNED; - } - - if (!XFS_BUF_CPSEMA(bp)) { + if (!XFS_BUF_CPSEMA(bp)) return XFS_ITEM_LOCKED; - } - /* - * Remove the buffer from the free list. Only do this - * if it's on the free list. Private buffers like the - * superblock buffer are not. - */ + /* take a reference to the buffer. */ XFS_BUF_HOLD(bp); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); trace_xfs_buf_item_trylock(bip); + if (XFS_BUF_ISDELAYWRITE(bp)) + return XFS_ITEM_PUSHBUF; return XFS_ITEM_SUCCESS; } @@ -626,11 +622,9 @@ xfs_buf_item_committed( } /* - * This is called to asynchronously write the buffer associated with this - * buf log item out to disk. The buffer will already have been locked by - * a successful call to xfs_buf_item_trylock(). If the buffer still has - * B_DELWRI set, then get it going out to disk with a call to bawrite(). - * If not, then just release the buffer. + * The buffer is locked, but is not a delayed write buffer. This happens + * if we race with IO completion and hence we don't want to try to write it + * again. Just release the buffer. */ STATIC void xfs_buf_item_push( @@ -642,17 +636,29 @@ xfs_buf_item_push( trace_xfs_buf_item_push(bip); bp = bip->bli_buf; + ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); + xfs_buf_relse(bp); +} - if (XFS_BUF_ISDELAYWRITE(bp)) { - int error; - error = xfs_bawrite(bip->bli_item.li_mountp, bp); - if (error) - xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp, - "xfs_buf_item_push: pushbuf error %d on bip %p, bp %p", - error, bip, bp); - } else { - xfs_buf_relse(bp); - } +/* + * The buffer is locked and is a delayed write buffer. Promote the buffer + * in the delayed write queue as the caller knows that they must invoke + * the xfsbufd to get this buffer written. We have to unlock the buffer + * to allow the xfsbufd to write it, too. + */ +STATIC void +xfs_buf_item_pushbuf( + xfs_buf_log_item_t *bip) +{ + xfs_buf_t *bp; + + ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); + trace_xfs_buf_item_pushbuf(bip); + + bp = bip->bli_buf; + ASSERT(XFS_BUF_ISDELAYWRITE(bp)); + xfs_buf_delwri_promote(bp); + xfs_buf_relse(bp); } /* ARGSUSED */ @@ -677,7 +683,7 @@ static struct xfs_item_ops xfs_buf_item_ops = { .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) xfs_buf_item_committed, .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push, - .iop_pushbuf = NULL, + .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf, .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) xfs_buf_item_committing }; diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index c0c8869115b..0ca556b4bf3 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -1534,8 +1534,8 @@ xfs_da_hashname(const __uint8_t *name, int namelen) enum xfs_dacmp xfs_da_compname( struct xfs_da_args *args, - const char *name, - int len) + const unsigned char *name, + int len) { return (args->namelen == len && memcmp(args->name, name, len) == 0) ? XFS_CMP_EXACT : XFS_CMP_DIFFERENT; diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 30cd08f56a3..fe9f5a8c1d2 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -209,7 +209,8 @@ typedef struct xfs_da_state { */ struct xfs_nameops { xfs_dahash_t (*hashname)(struct xfs_name *); - enum xfs_dacmp (*compname)(struct xfs_da_args *, const char *, int); + enum xfs_dacmp (*compname)(struct xfs_da_args *, + const unsigned char *, int); }; @@ -260,7 +261,7 @@ int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, uint xfs_da_hashname(const __uint8_t *name_string, int name_length); enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args, - const char *name, int len); + const unsigned char *name, int len); xfs_da_state_t *xfs_da_state_alloc(void); diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index d1483a4f71b..cd27c9d6c71 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -45,15 +45,21 @@ #include "xfs_vnodeops.h" #include "xfs_trace.h" + +static int xfs_swap_extents( + xfs_inode_t *ip, /* target inode */ + xfs_inode_t *tip, /* tmp inode */ + xfs_swapext_t *sxp); + /* - * Syssgi interface for swapext + * ioctl interface for swapext */ int xfs_swapext( xfs_swapext_t *sxp) { xfs_inode_t *ip, *tip; - struct file *file, *target_file; + struct file *file, *tmp_file; int error = 0; /* Pull information for the target fd */ @@ -68,56 +74,128 @@ xfs_swapext( goto out_put_file; } - target_file = fget((int)sxp->sx_fdtmp); - if (!target_file) { + tmp_file = fget((int)sxp->sx_fdtmp); + if (!tmp_file) { error = XFS_ERROR(EINVAL); goto out_put_file; } - if (!(target_file->f_mode & FMODE_WRITE) || - (target_file->f_flags & O_APPEND)) { + if (!(tmp_file->f_mode & FMODE_WRITE) || + (tmp_file->f_flags & O_APPEND)) { error = XFS_ERROR(EBADF); - goto out_put_target_file; + goto out_put_tmp_file; } if (IS_SWAPFILE(file->f_path.dentry->d_inode) || - IS_SWAPFILE(target_file->f_path.dentry->d_inode)) { + IS_SWAPFILE(tmp_file->f_path.dentry->d_inode)) { error = XFS_ERROR(EINVAL); - goto out_put_target_file; + goto out_put_tmp_file; } ip = XFS_I(file->f_path.dentry->d_inode); - tip = XFS_I(target_file->f_path.dentry->d_inode); + tip = XFS_I(tmp_file->f_path.dentry->d_inode); if (ip->i_mount != tip->i_mount) { error = XFS_ERROR(EINVAL); - goto out_put_target_file; + goto out_put_tmp_file; } if (ip->i_ino == tip->i_ino) { error = XFS_ERROR(EINVAL); - goto out_put_target_file; + goto out_put_tmp_file; } if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { error = XFS_ERROR(EIO); - goto out_put_target_file; + goto out_put_tmp_file; } error = xfs_swap_extents(ip, tip, sxp); - out_put_target_file: - fput(target_file); + out_put_tmp_file: + fput(tmp_file); out_put_file: fput(file); out: return error; } -int +/* + * We need to check that the format of the data fork in the temporary inode is + * valid for the target inode before doing the swap. This is not a problem with + * attr1 because of the fixed fork offset, but attr2 has a dynamically sized + * data fork depending on the space the attribute fork is taking so we can get + * invalid formats on the target inode. + * + * E.g. target has space for 7 extents in extent format, temp inode only has + * space for 6. If we defragment down to 7 extents, then the tmp format is a + * btree, but when swapped it needs to be in extent format. Hence we can't just + * blindly swap data forks on attr2 filesystems. + * + * Note that we check the swap in both directions so that we don't end up with + * a corrupt temporary inode, either. + * + * Note that fixing the way xfs_fsr sets up the attribute fork in the source + * inode will prevent this situation from occurring, so all we do here is + * reject and log the attempt. basically we are putting the responsibility on + * userspace to get this right. + */ +static int +xfs_swap_extents_check_format( + xfs_inode_t *ip, /* target inode */ + xfs_inode_t *tip) /* tmp inode */ +{ + + /* Should never get a local format */ + if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || + tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) + return EINVAL; + + /* + * if the target inode has less extents that then temporary inode then + * why did userspace call us? + */ + if (ip->i_d.di_nextents < tip->i_d.di_nextents) + return EINVAL; + + /* + * if the target inode is in extent form and the temp inode is in btree + * form then we will end up with the target inode in the wrong format + * as we already know there are less extents in the temp inode. + */ + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + tip->i_d.di_format == XFS_DINODE_FMT_BTREE) + return EINVAL; + + /* Check temp in extent form to max in target */ + if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max) + return EINVAL; + + /* Check target in extent form to max in temp */ + if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && + XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max) + return EINVAL; + + /* Check root block of temp in btree form to max in target */ + if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE && + XFS_IFORK_BOFF(ip) && + tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) + return EINVAL; + + /* Check root block of target in btree form to max in temp */ + if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && + XFS_IFORK_BOFF(tip) && + ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) + return EINVAL; + + return 0; +} + +static int xfs_swap_extents( - xfs_inode_t *ip, - xfs_inode_t *tip, + xfs_inode_t *ip, /* target inode */ + xfs_inode_t *tip, /* tmp inode */ xfs_swapext_t *sxp) { xfs_mount_t *mp; @@ -161,13 +239,6 @@ xfs_swap_extents( goto out_unlock; } - /* Should never get a local format */ - if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || - tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) { - error = XFS_ERROR(EINVAL); - goto out_unlock; - } - if (VN_CACHED(VFS_I(tip)) != 0) { error = xfs_flushinval_pages(tip, 0, -1, FI_REMAPF_LOCKED); @@ -189,13 +260,15 @@ xfs_swap_extents( goto out_unlock; } - /* - * If the target has extended attributes, the tmp file - * must also in order to ensure the correct data fork - * format. - */ - if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) { - error = XFS_ERROR(EINVAL); + trace_xfs_swap_extent_before(ip, 0); + trace_xfs_swap_extent_before(tip, 1); + + /* check inode formats now that data is flushed */ + error = xfs_swap_extents_check_format(ip, tip); + if (error) { + xfs_fs_cmn_err(CE_NOTE, mp, + "%s: inode 0x%llx format is incompatible for exchanging.", + __FILE__, ip->i_ino); goto out_unlock; } @@ -276,6 +349,16 @@ xfs_swap_extents( *tifp = *tempifp; /* struct copy */ /* + * Fix the in-memory data fork values that are dependent on the fork + * offset in the inode. We can't assume they remain the same as attr2 + * has dynamic fork offsets. + */ + ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) / + (uint)sizeof(xfs_bmbt_rec_t); + tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) / + (uint)sizeof(xfs_bmbt_rec_t); + + /* * Fix the on-disk inode values */ tmp = (__uint64_t)ip->i_d.di_nblocks; @@ -347,6 +430,8 @@ xfs_swap_extents( error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT); + trace_xfs_swap_extent_after(ip, 0); + trace_xfs_swap_extent_after(tip, 1); out: kmem_free(tempifp); return error; diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h index 4f55a630655..20bdd935c12 100644 --- a/fs/xfs/xfs_dfrag.h +++ b/fs/xfs/xfs_dfrag.h @@ -48,9 +48,6 @@ typedef struct xfs_swapext */ int xfs_swapext(struct xfs_swapext *sx); -int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip, - struct xfs_swapext *sxp); - #endif /* __KERNEL__ */ #endif /* __XFS_DFRAG_H__ */ diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index 93634a7e90e..42520f04126 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -44,7 +44,7 @@ #include "xfs_vnodeops.h" #include "xfs_trace.h" -struct xfs_name xfs_name_dotdot = {"..", 2}; +struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2}; /* * ASCII case-insensitive (ie. A-Z) support for directories that was @@ -66,8 +66,8 @@ xfs_ascii_ci_hashname( STATIC enum xfs_dacmp xfs_ascii_ci_compname( struct xfs_da_args *args, - const char *name, - int len) + const unsigned char *name, + int len) { enum xfs_dacmp result; int i; @@ -247,7 +247,7 @@ xfs_dir_createname( int xfs_dir_cilookup_result( struct xfs_da_args *args, - const char *name, + const unsigned char *name, int len) { if (args->cmpresult == XFS_CMP_DIFFERENT) diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h index 1d9ef96f33a..74a3b105768 100644 --- a/fs/xfs/xfs_dir2.h +++ b/fs/xfs/xfs_dir2.h @@ -100,7 +100,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, struct xfs_dabuf *bp); -extern int xfs_dir_cilookup_result(struct xfs_da_args *args, const char *name, - int len); +extern int xfs_dir_cilookup_result(struct xfs_da_args *args, + const unsigned char *name, int len); #endif /* __XFS_DIR2_H__ */ diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index ddc4ecc7807..779a267b0a8 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -57,8 +57,8 @@ static xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot; void xfs_dir_startup(void) { - xfs_dir_hash_dot = xfs_da_hashname(".", 1); - xfs_dir_hash_dotdot = xfs_da_hashname("..", 2); + xfs_dir_hash_dot = xfs_da_hashname((unsigned char *)".", 1); + xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2); } /* @@ -513,8 +513,9 @@ xfs_dir2_block_getdents( /* * If it didn't fit, set the final offset to here & return. */ - if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff, - be64_to_cpu(dep->inumber), DT_UNKNOWN)) { + if (filldir(dirent, (char *)dep->name, dep->namelen, + cook & 0x7fffffff, be64_to_cpu(dep->inumber), + DT_UNKNOWN)) { *offset = cook & 0x7fffffff; xfs_da_brelse(NULL, bp); return 0; diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 29f484c11b3..e2d89854ec9 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -1081,7 +1081,7 @@ xfs_dir2_leaf_getdents( dep = (xfs_dir2_data_entry_t *)ptr; length = xfs_dir2_data_entsize(dep->namelen); - if (filldir(dirent, dep->name, dep->namelen, + if (filldir(dirent, (char *)dep->name, dep->namelen, xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff, be64_to_cpu(dep->inumber), DT_UNKNOWN)) break; diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index ce6e355199b..78fc4d9ae75 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -65,7 +65,7 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args, /* * Log entries from a freespace block. */ -void +STATIC void xfs_dir2_free_log_bests( xfs_trans_t *tp, /* transaction pointer */ xfs_dabuf_t *bp, /* freespace buffer */ diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h index dde72db3d69..82dfe714719 100644 --- a/fs/xfs/xfs_dir2_node.h +++ b/fs/xfs/xfs_dir2_node.h @@ -75,8 +75,6 @@ xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db) return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp)); } -extern void xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp, - int first, int last); extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args, struct xfs_dabuf *lbp); extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count); diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 9d4f17a6967..c1a5945d463 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -782,7 +782,7 @@ xfs_dir2_sf_getdents( } ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); - if (filldir(dirent, sfep->name, sfep->namelen, + if (filldir(dirent, (char *)sfep->name, sfep->namelen, off & 0x7fffffff, ino, DT_UNKNOWN)) { *offset = off & 0x7fffffff; return 0; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 05a4bdd4be3..6f35ed1b39b 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -82,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip, log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); log_vector->i_len = size; - XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFI_FORMAT); + log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; ASSERT(size >= sizeof(xfs_efi_log_format_t)); } @@ -406,7 +406,7 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp, log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); log_vector->i_len = size; - XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFD_FORMAT); + log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; ASSERT(size >= sizeof(xfs_efd_log_format_t)); } diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index a631e1451ab..390850ee660 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c @@ -140,6 +140,7 @@ _xfs_filestream_pick_ag( int flags, xfs_extlen_t minlen) { + int streams, max_streams; int err, trylock, nscan; xfs_extlen_t longest, free, minfree, maxfree = 0; xfs_agnumber_t ag, max_ag = NULLAGNUMBER; @@ -155,15 +156,15 @@ _xfs_filestream_pick_ag( trylock = XFS_ALLOC_FLAG_TRYLOCK; for (nscan = 0; 1; nscan++) { - - TRACE_AG_SCAN(mp, ag, xfs_filestream_peek_ag(mp, ag)); - - pag = mp->m_perag + ag; + pag = xfs_perag_get(mp, ag); + TRACE_AG_SCAN(mp, ag, atomic_read(&pag->pagf_fstrms)); if (!pag->pagf_init) { err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); - if (err && !trylock) + if (err && !trylock) { + xfs_perag_put(pag); return err; + } } /* Might fail sometimes during the 1st pass with trylock set. */ @@ -173,6 +174,7 @@ _xfs_filestream_pick_ag( /* Keep track of the AG with the most free blocks. */ if (pag->pagf_freeblks > maxfree) { maxfree = pag->pagf_freeblks; + max_streams = atomic_read(&pag->pagf_fstrms); max_ag = ag; } @@ -195,6 +197,8 @@ _xfs_filestream_pick_ag( /* Break out, retaining the reference on the AG. */ free = pag->pagf_freeblks; + streams = atomic_read(&pag->pagf_fstrms); + xfs_perag_put(pag); *agp = ag; break; } @@ -202,6 +206,7 @@ _xfs_filestream_pick_ag( /* Drop the reference on this AG, it's not usable. */ xfs_filestream_put_ag(mp, ag); next_ag: + xfs_perag_put(pag); /* Move to the next AG, wrapping to AG 0 if necessary. */ if (++ag >= mp->m_sb.sb_agcount) ag = 0; @@ -229,6 +234,7 @@ next_ag: if (max_ag != NULLAGNUMBER) { xfs_filestream_get_ag(mp, max_ag); TRACE_AG_PICK1(mp, max_ag, maxfree); + streams = max_streams; free = maxfree; *agp = max_ag; break; @@ -240,16 +246,14 @@ next_ag: return 0; } - TRACE_AG_PICK2(mp, startag, *agp, xfs_filestream_peek_ag(mp, *agp), - free, nscan, flags); + TRACE_AG_PICK2(mp, startag, *agp, streams, free, nscan, flags); return 0; } /* * Set the allocation group number for a file or a directory, updating inode - * references and per-AG references as appropriate. Must be called with the - * m_peraglock held in read mode. + * references and per-AG references as appropriate. */ static int _xfs_filestream_update_ag( @@ -451,20 +455,6 @@ xfs_filestream_unmount( } /* - * If the mount point's m_perag array is going to be reallocated, all - * outstanding cache entries must be flushed to avoid accessing reference count - * addresses that have been freed. The call to xfs_filestream_flush() must be - * made inside the block that holds the m_peraglock in write mode to do the - * reallocation. - */ -void -xfs_filestream_flush( - xfs_mount_t *mp) -{ - xfs_mru_cache_flush(mp->m_filestream); -} - -/* * Return the AG of the filestream the file or directory belongs to, or * NULLAGNUMBER otherwise. */ @@ -526,7 +516,6 @@ xfs_filestream_associate( mp = pip->i_mount; cache = mp->m_filestream; - down_read(&mp->m_peraglock); /* * We have a problem, Houston. @@ -543,10 +532,8 @@ xfs_filestream_associate( * * So, if we can't get the iolock without sleeping then just give up */ - if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) { - up_read(&mp->m_peraglock); + if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) return 1; - } /* If the parent directory is already in the cache, use its AG. */ item = xfs_mru_cache_lookup(cache, pip->i_ino); @@ -601,7 +588,6 @@ exit_did_pick: exit: xfs_iunlock(pip, XFS_IOLOCK_EXCL); - up_read(&mp->m_peraglock); return -err; } diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h index 4aba67c5f64..260f757bbc5 100644 --- a/fs/xfs/xfs_filestream.h +++ b/fs/xfs/xfs_filestream.h @@ -79,12 +79,21 @@ extern ktrace_t *xfs_filestreams_trace_buf; * the cache that reference per-ag array elements that have since been * reallocated. */ +/* + * xfs_filestream_peek_ag is only used in tracing code + */ static inline int xfs_filestream_peek_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { - return atomic_read(&mp->m_perag[agno].pagf_fstrms); + struct xfs_perag *pag; + int ret; + + pag = xfs_perag_get(mp, agno); + ret = atomic_read(&pag->pagf_fstrms); + xfs_perag_put(pag); + return ret; } static inline int @@ -92,7 +101,13 @@ xfs_filestream_get_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { - return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms); + struct xfs_perag *pag; + int ret; + + pag = xfs_perag_get(mp, agno); + ret = atomic_inc_return(&pag->pagf_fstrms); + xfs_perag_put(pag); + return ret; } static inline int @@ -100,7 +115,13 @@ xfs_filestream_put_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { - return atomic_dec_return(&mp->m_perag[agno].pagf_fstrms); + struct xfs_perag *pag; + int ret; + + pag = xfs_perag_get(mp, agno); + ret = atomic_dec_return(&pag->pagf_fstrms); + xfs_perag_put(pag); + return ret; } /* allocation selection flags */ @@ -114,7 +135,6 @@ int xfs_filestream_init(void); void xfs_filestream_uninit(void); int xfs_filestream_mount(struct xfs_mount *mp); void xfs_filestream_unmount(struct xfs_mount *mp); -void xfs_filestream_flush(struct xfs_mount *mp); xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip); int xfs_filestream_associate(struct xfs_inode *dip, struct xfs_inode *ip); void xfs_filestream_deassociate(struct xfs_inode *ip); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index a13919a6a36..37a6f62c57b 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -167,27 +167,14 @@ xfs_growfs_data_private( } new = nb - mp->m_sb.sb_dblocks; oagcount = mp->m_sb.sb_agcount; - if (nagcount > oagcount) { - void *new_perag, *old_perag; - - xfs_filestream_flush(mp); - - new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount, - KM_MAYFAIL); - if (!new_perag) - return XFS_ERROR(ENOMEM); - - down_write(&mp->m_peraglock); - memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount); - old_perag = mp->m_perag; - mp->m_perag = new_perag; - - mp->m_flags |= XFS_MOUNT_32BITINODES; - nagimax = xfs_initialize_perag(mp, nagcount); - up_write(&mp->m_peraglock); - kmem_free(old_perag); + /* allocate the new per-ag structures */ + if (nagcount > oagcount) { + error = xfs_initialize_perag(mp, nagcount, &nagimax); + if (error) + return error; } + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); tp->t_flags |= XFS_TRANS_RESERVE; if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), @@ -196,6 +183,11 @@ xfs_growfs_data_private( return error; } + /* + * Write new AG headers to disk. Non-transactional, but written + * synchronously so they are completed prior to the growfs transaction + * being logged. + */ nfree = 0; for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { /* @@ -359,6 +351,12 @@ xfs_growfs_data_private( goto error0; } } + + /* + * Update changed superblock fields transactionally. These are not + * seen by the rest of the world until the transaction commit applies + * them atomically to the superblock. + */ if (nagcount > oagcount) xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); if (nb > mp->m_sb.sb_dblocks) @@ -369,9 +367,9 @@ xfs_growfs_data_private( if (dpct) xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); error = xfs_trans_commit(tp, 0); - if (error) { + if (error) return error; - } + /* New allocation groups fully initialized, so update mount struct */ if (nagimax) mp->m_maxagi = nagimax; @@ -381,6 +379,8 @@ xfs_growfs_data_private( mp->m_maxicount = icount << mp->m_sb.sb_inopblog; } else mp->m_maxicount = 0; + + /* update secondary superblocks. */ for (agno = 1; agno < nagcount; agno++) { error = xfs_read_buf(mp, mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index cb907ba69c4..9d884c127bb 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -205,7 +205,7 @@ xfs_ialloc_inode_init( d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize * blks_per_cluster, - XFS_BUF_LOCK); + XBF_LOCK); ASSERT(fbuf); ASSERT(!XFS_BUF_GETERROR(fbuf)); @@ -253,6 +253,7 @@ xfs_ialloc_ag_alloc( xfs_agino_t thisino; /* current inode number, for loop */ int isaligned = 0; /* inode allocation at stripe unit */ /* boundary */ + struct xfs_perag *pag; args.tp = tp; args.mp = tp->t_mountp; @@ -382,9 +383,9 @@ xfs_ialloc_ag_alloc( newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); be32_add_cpu(&agi->agi_count, newlen); be32_add_cpu(&agi->agi_freecount, newlen); - down_read(&args.mp->m_peraglock); - args.mp->m_perag[agno].pagi_freecount += newlen; - up_read(&args.mp->m_peraglock); + pag = xfs_perag_get(args.mp, agno); + pag->pagi_freecount += newlen; + xfs_perag_put(pag); agi->agi_newino = cpu_to_be32(newino); /* @@ -486,9 +487,8 @@ xfs_ialloc_ag_select( */ agno = pagno; flags = XFS_ALLOC_FLAG_TRYLOCK; - down_read(&mp->m_peraglock); for (;;) { - pag = &mp->m_perag[agno]; + pag = xfs_perag_get(mp, agno); if (!pag->pagi_init) { if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { agbp = NULL; @@ -527,7 +527,7 @@ xfs_ialloc_ag_select( agbp = NULL; goto nextag; } - up_read(&mp->m_peraglock); + xfs_perag_put(pag); return agbp; } } @@ -535,22 +535,19 @@ unlock_nextag: if (agbp) xfs_trans_brelse(tp, agbp); nextag: + xfs_perag_put(pag); /* * No point in iterating over the rest, if we're shutting * down. */ - if (XFS_FORCED_SHUTDOWN(mp)) { - up_read(&mp->m_peraglock); + if (XFS_FORCED_SHUTDOWN(mp)) return NULL; - } agno++; if (agno >= agcount) agno = 0; if (agno == pagno) { - if (flags == 0) { - up_read(&mp->m_peraglock); + if (flags == 0) return NULL; - } flags = 0; } } @@ -672,6 +669,7 @@ xfs_dialloc( xfs_agnumber_t tagno; /* testing allocation group number */ xfs_btree_cur_t *tcur; /* temp cursor */ xfs_inobt_rec_incore_t trec; /* temp inode allocation record */ + struct xfs_perag *pag; if (*IO_agbp == NULL) { @@ -771,13 +769,13 @@ nextag: *inop = NULLFSINO; return noroom ? ENOSPC : 0; } - down_read(&mp->m_peraglock); - if (mp->m_perag[tagno].pagi_inodeok == 0) { - up_read(&mp->m_peraglock); + pag = xfs_perag_get(mp, tagno); + if (pag->pagi_inodeok == 0) { + xfs_perag_put(pag); goto nextag; } error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp); - up_read(&mp->m_peraglock); + xfs_perag_put(pag); if (error) goto nextag; agi = XFS_BUF_TO_AGI(agbp); @@ -790,6 +788,7 @@ nextag: */ agno = tagno; *IO_agbp = NULL; + pag = xfs_perag_get(mp, agno); restart_pagno: cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno)); @@ -808,7 +807,6 @@ nextag: * If in the same AG as the parent, try to get near the parent. */ if (pagno == agno) { - xfs_perag_t *pag = &mp->m_perag[agno]; int doneleft; /* done, to the left */ int doneright; /* done, to the right */ int searchdistance = 10; @@ -1006,9 +1004,7 @@ alloc_inode: goto error0; be32_add_cpu(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); - down_read(&mp->m_peraglock); - mp->m_perag[tagno].pagi_freecount--; - up_read(&mp->m_peraglock); + pag->pagi_freecount--; error = xfs_check_agi_freecount(cur, agi); if (error) @@ -1016,12 +1012,14 @@ alloc_inode: xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); + xfs_perag_put(pag); *inop = ino; return 0; error1: xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); + xfs_perag_put(pag); return error; } @@ -1052,6 +1050,7 @@ xfs_difree( xfs_mount_t *mp; /* mount structure for filesystem */ int off; /* offset of inode in inode chunk */ xfs_inobt_rec_incore_t rec; /* btree record */ + struct xfs_perag *pag; mp = tp->t_mountp; @@ -1088,9 +1087,7 @@ xfs_difree( /* * Get the allocation group header. */ - down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); - up_read(&mp->m_peraglock); if (error) { cmn_err(CE_WARN, "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.", @@ -1157,9 +1154,9 @@ xfs_difree( be32_add_cpu(&agi->agi_count, -ilen); be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); - down_read(&mp->m_peraglock); - mp->m_perag[agno].pagi_freecount -= ilen - 1; - up_read(&mp->m_peraglock); + pag = xfs_perag_get(mp, agno); + pag->pagi_freecount -= ilen - 1; + xfs_perag_put(pag); xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); @@ -1188,9 +1185,9 @@ xfs_difree( */ be32_add_cpu(&agi->agi_freecount, 1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); - down_read(&mp->m_peraglock); - mp->m_perag[agno].pagi_freecount++; - up_read(&mp->m_peraglock); + pag = xfs_perag_get(mp, agno); + pag->pagi_freecount++; + xfs_perag_put(pag); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); } @@ -1312,9 +1309,7 @@ xfs_imap( xfs_buf_t *agbp; /* agi buffer */ int i; /* temp state */ - down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); - up_read(&mp->m_peraglock); if (error) { xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " "xfs_ialloc_read_agi() returned " @@ -1379,7 +1374,6 @@ xfs_imap( XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); return XFS_ERROR(EINVAL); } - return 0; } @@ -1523,8 +1517,7 @@ xfs_ialloc_read_agi( return error; agi = XFS_BUF_TO_AGI(*bpp); - pag = &mp->m_perag[agno]; - + pag = xfs_perag_get(mp, agno); if (!pag->pagi_init) { pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); pag->pagi_count = be32_to_cpu(agi->agi_count); @@ -1537,6 +1530,7 @@ xfs_ialloc_read_agi( */ ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || XFS_FORCED_SHUTDOWN(mp)); + xfs_perag_put(pag); return 0; } diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index fa402a6bbbc..e281eb4a1c4 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -73,7 +73,6 @@ xfs_inode_alloc( ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); - ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); @@ -375,7 +374,7 @@ xfs_iget( return EINVAL; /* get the perag structure and ensure that it's inode capable */ - pag = xfs_get_perag(mp, ino); + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); if (!pag->pagi_inodeok) return EINVAL; ASSERT(pag->pag_ici_init); @@ -399,7 +398,7 @@ again: if (error) goto out_error_or_again; } - xfs_put_perag(mp, pag); + xfs_perag_put(pag); *ipp = ip; @@ -418,7 +417,7 @@ out_error_or_again: delay(1); goto again; } - xfs_put_perag(mp, pag); + xfs_perag_put(pag); return error; } @@ -489,12 +488,12 @@ xfs_ireclaim( * added to the tree assert that it's been there before to catch * problems with the inode life time early on. */ - pag = xfs_get_perag(mp, ip->i_ino); + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); write_lock(&pag->pag_ici_lock); if (!radix_tree_delete(&pag->pag_ici_root, agino)) ASSERT(0); write_unlock(&pag->pag_ici_lock); - xfs_put_perag(mp, pag); + xfs_perag_put(pag); /* * Here we do an (almost) spurious inode lock in order to coordinate diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 391d36b0e68..fa31360046d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -151,7 +151,7 @@ xfs_imap_to_bp( "an error %d on %s. Returning error.", error, mp->m_fsname); } else { - ASSERT(buf_flags & XFS_BUF_TRYLOCK); + ASSERT(buf_flags & XBF_TRYLOCK); } return error; } @@ -239,7 +239,7 @@ xfs_inotobp( if (error) return error; - error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); + error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags); if (error) return error; @@ -285,7 +285,7 @@ xfs_itobp( return error; if (!bp) { - ASSERT(buf_flags & XFS_BUF_TRYLOCK); + ASSERT(buf_flags & XBF_TRYLOCK); ASSERT(tp == NULL); *bpp = NULL; return EAGAIN; @@ -807,7 +807,7 @@ xfs_iread( * Get pointers to the on-disk inode and the buffer containing it. */ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, - XFS_BUF_LOCK, iget_flags); + XBF_LOCK, iget_flags); if (error) return error; dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); @@ -1751,7 +1751,7 @@ xfs_iunlink( * Here we put the head pointer into our next pointer, * and then we fall through to point the head at us. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); if (error) return error; @@ -1833,7 +1833,7 @@ xfs_iunlink_remove( * of dealing with the buffer when there is no need to * change it. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -1895,7 +1895,7 @@ xfs_iunlink_remove( * Now last_ibp points to the buffer previous to us on * the unlinked list. Pull us from the list. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -1946,8 +1946,9 @@ xfs_ifree_cluster( xfs_inode_t *ip, **ip_found; xfs_inode_log_item_t *iip; xfs_log_item_t *lip; - xfs_perag_t *pag = xfs_get_perag(mp, inum); + struct xfs_perag *pag; + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { blks_per_cluster = 1; ninodes = mp->m_sb.sb_inopblock; @@ -2039,7 +2040,7 @@ xfs_ifree_cluster( bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, mp->m_bsize * blks_per_cluster, - XFS_BUF_LOCK); + XBF_LOCK); pre_flushed = 0; lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); @@ -2088,7 +2089,7 @@ xfs_ifree_cluster( } kmem_free(ip_found); - xfs_put_perag(mp, pag); + xfs_perag_put(pag); } /* @@ -2150,7 +2151,7 @@ xfs_ifree( xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); + error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK); if (error) return error; @@ -2483,13 +2484,16 @@ __xfs_iunpin_wait( return; /* Give the log a push to start the unpinning I/O */ - xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ? - iip->ili_last_lsn : 0, XFS_LOG_FORCE); + if (iip && iip->ili_last_lsn) + xfs_log_force_lsn(ip->i_mount, iip->ili_last_lsn, 0); + else + xfs_log_force(ip->i_mount, 0); + if (wait) wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); } -static inline void +void xfs_iunpin_wait( xfs_inode_t *ip) { @@ -2675,7 +2679,7 @@ xfs_iflush_cluster( xfs_buf_t *bp) { xfs_mount_t *mp = ip->i_mount; - xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); + struct xfs_perag *pag; unsigned long first_index, mask; unsigned long inodes_per_cluster; int ilist_size; @@ -2686,6 +2690,7 @@ xfs_iflush_cluster( int bufwasdelwri; int i; + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); ASSERT(pag->pagi_inodeok); ASSERT(pag->pag_ici_init); @@ -2693,7 +2698,7 @@ xfs_iflush_cluster( ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *); ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS); if (!ilist) - return 0; + goto out_put; mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; @@ -2762,6 +2767,8 @@ xfs_iflush_cluster( out_free: read_unlock(&pag->pag_ici_lock); kmem_free(ilist); +out_put: + xfs_perag_put(pag); return 0; @@ -2805,6 +2812,7 @@ cluster_corrupt_out: */ xfs_iflush_abort(iq); kmem_free(ilist); + xfs_perag_put(pag); return XFS_ERROR(EFSCORRUPTED); } @@ -2827,8 +2835,6 @@ xfs_iflush( xfs_dinode_t *dip; xfs_mount_t *mp; int error; - int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK); - enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) }; XFS_STATS_INC(xs_iflush_count); @@ -2841,19 +2847,6 @@ xfs_iflush( mp = ip->i_mount; /* - * If the inode isn't dirty, then just release the inode flush lock and - * do nothing. Treat stale inodes the same; we cannot rely on the - * backing buffer remaining stale in cache for the remaining life of - * the stale inode and so xfs_itobp() below may give us a buffer that - * no longer contains inodes below. Doing this stale check here also - * avoids forcing the log on pinned, stale inodes. - */ - if (xfs_inode_clean(ip) || xfs_iflags_test(ip, XFS_ISTALE)) { - xfs_ifunlock(ip); - return 0; - } - - /* * We can't flush the inode until it is unpinned, so wait for it if we * are allowed to block. We know noone new can pin it, because we are * holding the inode lock shared and you need to hold it exclusively to @@ -2864,7 +2857,7 @@ xfs_iflush( * in the same cluster are dirty, they will probably write the inode * out for us if they occur after the log force completes. */ - if (noblock && xfs_ipincount(ip)) { + if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) { xfs_iunpin_nowait(ip); xfs_ifunlock(ip); return EAGAIN; @@ -2872,6 +2865,19 @@ xfs_iflush( xfs_iunpin_wait(ip); /* + * For stale inodes we cannot rely on the backing buffer remaining + * stale in cache for the remaining life of the stale inode and so + * xfs_itobp() below may give us a buffer that no longer contains + * inodes below. We have to check this after ensuring the inode is + * unpinned so that it is safe to reclaim the stale inode after the + * flush call. + */ + if (xfs_iflags_test(ip, XFS_ISTALE)) { + xfs_ifunlock(ip); + return 0; + } + + /* * This may have been unpinned because the filesystem is shutting * down forcibly. If that's the case we must not write this inode * to disk, because the log record didn't make it to disk! @@ -2885,60 +2891,10 @@ xfs_iflush( } /* - * Decide how buffer will be flushed out. This is done before - * the call to xfs_iflush_int because this field is zeroed by it. - */ - if (iip != NULL && iip->ili_format.ilf_fields != 0) { - /* - * Flush out the inode buffer according to the directions - * of the caller. In the cases where the caller has given - * us a choice choose the non-delwri case. This is because - * the inode is in the AIL and we need to get it out soon. - */ - switch (flags) { - case XFS_IFLUSH_SYNC: - case XFS_IFLUSH_DELWRI_ELSE_SYNC: - flags = 0; - break; - case XFS_IFLUSH_ASYNC_NOBLOCK: - case XFS_IFLUSH_ASYNC: - case XFS_IFLUSH_DELWRI_ELSE_ASYNC: - flags = INT_ASYNC; - break; - case XFS_IFLUSH_DELWRI: - flags = INT_DELWRI; - break; - default: - ASSERT(0); - flags = 0; - break; - } - } else { - switch (flags) { - case XFS_IFLUSH_DELWRI_ELSE_SYNC: - case XFS_IFLUSH_DELWRI_ELSE_ASYNC: - case XFS_IFLUSH_DELWRI: - flags = INT_DELWRI; - break; - case XFS_IFLUSH_ASYNC_NOBLOCK: - case XFS_IFLUSH_ASYNC: - flags = INT_ASYNC; - break; - case XFS_IFLUSH_SYNC: - flags = 0; - break; - default: - ASSERT(0); - flags = 0; - break; - } - } - - /* * Get the buffer containing the on-disk inode. */ error = xfs_itobp(mp, NULL, ip, &dip, &bp, - noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); + (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK); if (error || !bp) { xfs_ifunlock(ip); return error; @@ -2956,7 +2912,7 @@ xfs_iflush( * get stuck waiting in the write for too long. */ if (XFS_BUF_ISPINNED(bp)) - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + xfs_log_force(mp, 0); /* * inode clustering: @@ -2966,13 +2922,10 @@ xfs_iflush( if (error) goto cluster_corrupt_out; - if (flags & INT_DELWRI) { - xfs_bdwrite(mp, bp); - } else if (flags & INT_ASYNC) { - error = xfs_bawrite(mp, bp); - } else { + if (flags & SYNC_WAIT) error = xfs_bwrite(mp, bp); - } + else + xfs_bdwrite(mp, bp); return error; corrupt_out: @@ -3007,16 +2960,6 @@ xfs_iflush_int( iip = ip->i_itemp; mp = ip->i_mount; - - /* - * If the inode isn't dirty, then just release the inode - * flush lock and do nothing. - */ - if (xfs_inode_clean(ip)) { - xfs_ifunlock(ip); - return 0; - } - /* set *dip = inode's place in the buffer */ dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index ec1f28c4fc4..6c912b02759 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -420,16 +420,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) #define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT) /* - * Flags for xfs_iflush() - */ -#define XFS_IFLUSH_DELWRI_ELSE_SYNC 1 -#define XFS_IFLUSH_DELWRI_ELSE_ASYNC 2 -#define XFS_IFLUSH_SYNC 3 -#define XFS_IFLUSH_ASYNC 4 -#define XFS_IFLUSH_DELWRI 5 -#define XFS_IFLUSH_ASYNC_NOBLOCK 6 - -/* * Flags for xfs_itruncate_start(). */ #define XFS_ITRUNC_DEFINITE 0x1 @@ -483,6 +473,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); void xfs_ipin(xfs_inode_t *); void xfs_iunpin(xfs_inode_t *); +void xfs_iunpin_wait(xfs_inode_t *); int xfs_iflush(xfs_inode_t *, uint); void xfs_ichgtime(xfs_inode_t *, int); void xfs_lock_inodes(xfs_inode_t **, int, uint); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index f38855d21ea..d4dc063111f 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -228,7 +228,7 @@ xfs_inode_item_format( vecp->i_addr = (xfs_caddr_t)&iip->ili_format; vecp->i_len = sizeof(xfs_inode_log_format_t); - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IFORMAT); + vecp->i_type = XLOG_REG_TYPE_IFORMAT; vecp++; nvecs = 1; @@ -279,7 +279,7 @@ xfs_inode_item_format( vecp->i_addr = (xfs_caddr_t)&ip->i_d; vecp->i_len = sizeof(struct xfs_icdinode); - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); + vecp->i_type = XLOG_REG_TYPE_ICORE; vecp++; nvecs++; iip->ili_format.ilf_fields |= XFS_ILOG_CORE; @@ -336,7 +336,7 @@ xfs_inode_item_format( vecp->i_addr = (char *)(ip->i_df.if_u1.if_extents); vecp->i_len = ip->i_df.if_bytes; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT); + vecp->i_type = XLOG_REG_TYPE_IEXT; } else #endif { @@ -355,7 +355,7 @@ xfs_inode_item_format( vecp->i_addr = (xfs_caddr_t)ext_buffer; vecp->i_len = xfs_iextents_copy(ip, ext_buffer, XFS_DATA_FORK); - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT); + vecp->i_type = XLOG_REG_TYPE_IEXT; } ASSERT(vecp->i_len <= ip->i_df.if_bytes); iip->ili_format.ilf_dsize = vecp->i_len; @@ -373,7 +373,7 @@ xfs_inode_item_format( ASSERT(ip->i_df.if_broot != NULL); vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; vecp->i_len = ip->i_df.if_broot_bytes; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IBROOT); + vecp->i_type = XLOG_REG_TYPE_IBROOT; vecp++; nvecs++; iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; @@ -399,7 +399,7 @@ xfs_inode_item_format( ASSERT((ip->i_df.if_real_bytes == 0) || (ip->i_df.if_real_bytes == data_bytes)); vecp->i_len = (int)data_bytes; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ILOCAL); + vecp->i_type = XLOG_REG_TYPE_ILOCAL; vecp++; nvecs++; iip->ili_format.ilf_dsize = (unsigned)data_bytes; @@ -477,7 +477,7 @@ xfs_inode_item_format( vecp->i_len = xfs_iextents_copy(ip, ext_buffer, XFS_ATTR_FORK); #endif - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_EXT); + vecp->i_type = XLOG_REG_TYPE_IATTR_EXT; iip->ili_format.ilf_asize = vecp->i_len; vecp++; nvecs++; @@ -492,7 +492,7 @@ xfs_inode_item_format( ASSERT(ip->i_afp->if_broot != NULL); vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; vecp->i_len = ip->i_afp->if_broot_bytes; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_BROOT); + vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; vecp++; nvecs++; iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; @@ -516,7 +516,7 @@ xfs_inode_item_format( ASSERT((ip->i_afp->if_real_bytes == 0) || (ip->i_afp->if_real_bytes == data_bytes)); vecp->i_len = (int)data_bytes; - XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_LOCAL); + vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL; vecp++; nvecs++; iip->ili_format.ilf_asize = (unsigned)data_bytes; @@ -602,33 +602,20 @@ xfs_inode_item_trylock( if (!xfs_iflock_nowait(ip)) { /* - * If someone else isn't already trying to push the inode - * buffer, we get to do it. + * inode has already been flushed to the backing buffer, + * leave it locked in shared mode, pushbuf routine will + * unlock it. */ - if (iip->ili_pushbuf_flag == 0) { - iip->ili_pushbuf_flag = 1; -#ifdef DEBUG - iip->ili_push_owner = current_pid(); -#endif - /* - * Inode is left locked in shared mode. - * Pushbuf routine gets to unlock it. - */ - return XFS_ITEM_PUSHBUF; - } else { - /* - * We hold the AIL lock, so we must specify the - * NONOTIFY flag so that we won't double trip. - */ - xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); - return XFS_ITEM_FLUSHING; - } - /* NOTREACHED */ + return XFS_ITEM_PUSHBUF; } /* Stale items should force out the iclog */ if (ip->i_flags & XFS_ISTALE) { xfs_ifunlock(ip); + /* + * we hold the AIL lock - notify the unlock routine of this + * so it doesn't try to get the lock again. + */ xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); return XFS_ITEM_PINNED; } @@ -746,11 +733,8 @@ xfs_inode_item_committed( * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK * failed to get the inode flush lock but did get the inode locked SHARED. * Here we're trying to see if the inode buffer is incore, and if so whether it's - * marked delayed write. If that's the case, we'll initiate a bawrite on that - * buffer to expedite the process. - * - * We aren't holding the AIL lock (or the flush lock) when this gets called, - * so it is inherently race-y. + * marked delayed write. If that's the case, we'll promote it and that will + * allow the caller to write the buffer by triggering the xfsbufd to run. */ STATIC void xfs_inode_item_pushbuf( @@ -759,82 +743,30 @@ xfs_inode_item_pushbuf( xfs_inode_t *ip; xfs_mount_t *mp; xfs_buf_t *bp; - uint dopush; ip = iip->ili_inode; - ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); /* - * The ili_pushbuf_flag keeps others from - * trying to duplicate our effort. - */ - ASSERT(iip->ili_pushbuf_flag != 0); - ASSERT(iip->ili_push_owner == current_pid()); - - /* * If a flush is not in progress anymore, chances are that the * inode was taken off the AIL. So, just get out. */ if (completion_done(&ip->i_flush) || ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { - iip->ili_pushbuf_flag = 0; xfs_iunlock(ip, XFS_ILOCK_SHARED); return; } mp = ip->i_mount; bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno, - iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK); + iip->ili_format.ilf_len, XBF_TRYLOCK); - if (bp != NULL) { - if (XFS_BUF_ISDELAYWRITE(bp)) { - /* - * We were racing with iflush because we don't hold - * the AIL lock or the flush lock. However, at this point, - * we have the buffer, and we know that it's dirty. - * So, it's possible that iflush raced with us, and - * this item is already taken off the AIL. - * If not, we can flush it async. - */ - dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && - !completion_done(&ip->i_flush)); - iip->ili_pushbuf_flag = 0; - xfs_iunlock(ip, XFS_ILOCK_SHARED); - - trace_xfs_inode_item_push(bp, _RET_IP_); - - if (XFS_BUF_ISPINNED(bp)) { - xfs_log_force(mp, (xfs_lsn_t)0, - XFS_LOG_FORCE); - } - if (dopush) { - int error; - error = xfs_bawrite(mp, bp); - if (error) - xfs_fs_cmn_err(CE_WARN, mp, - "xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p", - error, iip, bp); - } else { - xfs_buf_relse(bp); - } - } else { - iip->ili_pushbuf_flag = 0; - xfs_iunlock(ip, XFS_ILOCK_SHARED); - xfs_buf_relse(bp); - } - return; - } - /* - * We have to be careful about resetting pushbuf flag too early (above). - * Even though in theory we can do it as soon as we have the buflock, - * we don't want others to be doing work needlessly. They'll come to - * this function thinking that pushing the buffer is their - * responsibility only to find that the buffer is still locked by - * another doing the same thing - */ - iip->ili_pushbuf_flag = 0; xfs_iunlock(ip, XFS_ILOCK_SHARED); + if (!bp) + return; + if (XFS_BUF_ISDELAYWRITE(bp)) + xfs_buf_delwri_promote(bp); + xfs_buf_relse(bp); return; } @@ -867,10 +799,14 @@ xfs_inode_item_push( iip->ili_format.ilf_fields != 0); /* - * Write out the inode. The completion routine ('iflush_done') will - * pull it from the AIL, mark it clean, unlock the flush lock. + * Push the inode to it's backing buffer. This will not remove the + * inode from the AIL - a further push will be required to trigger a + * buffer push. However, this allows all the dirty inodes to be pushed + * to the buffer before it is pushed to disk. THe buffer IO completion + * will pull th einode from the AIL, mark it clean and unlock the flush + * lock. */ - (void) xfs_iflush(ip, XFS_IFLUSH_ASYNC); + (void) xfs_iflush(ip, 0); xfs_iunlock(ip, XFS_ILOCK_SHARED); return; @@ -934,7 +870,6 @@ xfs_inode_item_init( /* We have zeroed memory. No need ... iip->ili_extents_buf = NULL; - iip->ili_pushbuf_flag = 0; */ iip->ili_format.ilf_type = XFS_LI_INODE; diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index cc8df1ac778..9a467958ecd 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -144,12 +144,6 @@ typedef struct xfs_inode_log_item { data exts */ struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged attr exts */ - unsigned int ili_pushbuf_flag; /* one bit used in push_ail */ - -#ifdef DEBUG - uint64_t ili_push_owner; /* one who sets pushbuf_flag - above gets to push the buf */ -#endif #ifdef XFS_TRANS_DEBUG int ili_root_size; char *ili_orig_root; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 62efab2f383..3af02314c60 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -408,8 +408,10 @@ xfs_bulkstat( (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); nimask = ~(nicluster - 1); nbcluster = nicluster >> mp->m_sb.sb_inopblog; - irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4, - KM_SLEEP | KM_MAYFAIL | KM_LARGE); + irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); + if (!irbuf) + return ENOMEM; + nirbuf = irbsize / sizeof(*irbuf); /* @@ -420,9 +422,7 @@ xfs_bulkstat( while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { cond_resched(); bp = NULL; - down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); - up_read(&mp->m_peraglock); if (error) { /* * Skip this allocation group and go to the next one. @@ -729,7 +729,7 @@ xfs_bulkstat( /* * Done, we're either out of filesystem or space to put the data. */ - kmem_free(irbuf); + kmem_free_large(irbuf); *ubcountp = ubelem; /* * Found some inodes, return them now and return the error next time. @@ -849,9 +849,7 @@ xfs_inumbers( agbp = NULL; while (left > 0 && agno < mp->m_sb.sb_agcount) { if (agbp == NULL) { - down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); - up_read(&mp->m_peraglock); if (error) { /* * If we can't read the AGI of this ag, diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 600b5b06aae..4f16be4b6ee 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -50,7 +50,6 @@ kmem_zone_t *xfs_log_ticket_zone; (off) += (bytes);} /* Local miscellaneous function prototypes */ -STATIC int xlog_bdstrat_cb(struct xfs_buf *); STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket, xlog_in_core_t **, xfs_lsn_t *); STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, @@ -80,11 +79,6 @@ STATIC int xlog_state_release_iclog(xlog_t *log, STATIC void xlog_state_switch_iclogs(xlog_t *log, xlog_in_core_t *iclog, int eventual_size); -STATIC int xlog_state_sync(xlog_t *log, - xfs_lsn_t lsn, - uint flags, - int *log_flushed); -STATIC int xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed); STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); /* local functions to manipulate grant head */ @@ -297,65 +291,6 @@ xfs_log_done(xfs_mount_t *mp, return lsn; } /* xfs_log_done */ - -/* - * Force the in-core log to disk. If flags == XFS_LOG_SYNC, - * the force is done synchronously. - * - * Asynchronous forces are implemented by setting the WANT_SYNC - * bit in the appropriate in-core log and then returning. - * - * Synchronous forces are implemented with a signal variable. All callers - * to force a given lsn to disk will wait on a the sv attached to the - * specific in-core log. When given in-core log finally completes its - * write to disk, that thread will wake up all threads waiting on the - * sv. - */ -int -_xfs_log_force( - xfs_mount_t *mp, - xfs_lsn_t lsn, - uint flags, - int *log_flushed) -{ - xlog_t *log = mp->m_log; - int dummy; - - if (!log_flushed) - log_flushed = &dummy; - - ASSERT(flags & XFS_LOG_FORCE); - - XFS_STATS_INC(xs_log_force); - - if (log->l_flags & XLOG_IO_ERROR) - return XFS_ERROR(EIO); - if (lsn == 0) - return xlog_state_sync_all(log, flags, log_flushed); - else - return xlog_state_sync(log, lsn, flags, log_flushed); -} /* _xfs_log_force */ - -/* - * Wrapper for _xfs_log_force(), to be used when caller doesn't care - * about errors or whether the log was flushed or not. This is the normal - * interface to use when trying to unpin items or move the log forward. - */ -void -xfs_log_force( - xfs_mount_t *mp, - xfs_lsn_t lsn, - uint flags) -{ - int error; - error = _xfs_log_force(mp, lsn, flags, NULL); - if (error) { - xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: " - "error %d returned.", error); - } -} - - /* * Attaches a new iclog I/O completion callback routine during * transaction commit. If the log is in error state, a non-zero @@ -602,7 +537,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) if (mp->m_flags & XFS_MOUNT_RDONLY) return 0; - error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL); + error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); #ifdef DEBUG @@ -618,7 +553,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) if (! (XLOG_FORCED_SHUTDOWN(log))) { reg[0].i_addr = (void*)&magic; reg[0].i_len = sizeof(magic); - XLOG_VEC_SET_TYPE(®[0], XLOG_REG_TYPE_UNMOUNT); + reg[0].i_type = XLOG_REG_TYPE_UNMOUNT; error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE); @@ -988,35 +923,6 @@ xlog_iodone(xfs_buf_t *bp) } /* xlog_iodone */ /* - * The bdstrat callback function for log bufs. This gives us a central - * place to trap bufs in case we get hit by a log I/O error and need to - * shutdown. Actually, in practice, even when we didn't get a log error, - * we transition the iclogs to IOERROR state *after* flushing all existing - * iclogs to disk. This is because we don't want anymore new transactions to be - * started or completed afterwards. - */ -STATIC int -xlog_bdstrat_cb(struct xfs_buf *bp) -{ - xlog_in_core_t *iclog; - - iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); - - if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) { - /* note for irix bstrat will need struct bdevsw passed - * Fix the following macro if the code ever is merged - */ - XFS_bdstrat(bp); - return 0; - } - - XFS_BUF_ERROR(bp, EIO); - XFS_BUF_STALE(bp); - xfs_biodone(bp); - return XFS_ERROR(EIO); -} - -/* * Return size of each in-core log record buffer. * * All machines get 8 x 32kB buffers by default, unless tuned otherwise. @@ -1158,7 +1064,6 @@ xlog_alloc_log(xfs_mount_t *mp, if (!bp) goto out_free_log; XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); - XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); @@ -1196,7 +1101,6 @@ xlog_alloc_log(xfs_mount_t *mp, if (!XFS_BUF_CPSEMA(bp)) ASSERT(0); XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); - XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); iclog->ic_bp = bp; iclog->ic_data = bp->b_addr; @@ -1268,7 +1172,7 @@ xlog_commit_record(xfs_mount_t *mp, reg[0].i_addr = NULL; reg[0].i_len = 0; - XLOG_VEC_SET_TYPE(®[0], XLOG_REG_TYPE_COMMIT); + reg[0].i_type = XLOG_REG_TYPE_COMMIT; ASSERT_ALWAYS(iclog); if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp, @@ -1343,6 +1247,37 @@ xlog_grant_push_ail(xfs_mount_t *mp, xfs_trans_ail_push(log->l_ailp, threshold_lsn); } /* xlog_grant_push_ail */ +/* + * The bdstrat callback function for log bufs. This gives us a central + * place to trap bufs in case we get hit by a log I/O error and need to + * shutdown. Actually, in practice, even when we didn't get a log error, + * we transition the iclogs to IOERROR state *after* flushing all existing + * iclogs to disk. This is because we don't want anymore new transactions to be + * started or completed afterwards. + */ +STATIC int +xlog_bdstrat( + struct xfs_buf *bp) +{ + struct xlog_in_core *iclog; + + iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); + if (iclog->ic_state & XLOG_STATE_IOERROR) { + XFS_BUF_ERROR(bp, EIO); + XFS_BUF_STALE(bp); + xfs_biodone(bp); + /* + * It would seem logical to return EIO here, but we rely on + * the log state machine to propagate I/O errors instead of + * doing it here. + */ + return 0; + } + + bp->b_flags |= _XBF_RUN_QUEUES; + xfs_buf_iorequest(bp); + return 0; +} /* * Flush out the in-core log (iclog) to the on-disk log in an asynchronous @@ -1462,7 +1397,7 @@ xlog_sync(xlog_t *log, */ XFS_BUF_WRITE(bp); - if ((error = XFS_bwrite(bp))) { + if ((error = xlog_bdstrat(bp))) { xfs_ioerror_alert("xlog_sync", log->l_mp, bp, XFS_BUF_ADDR(bp)); return error; @@ -1502,7 +1437,7 @@ xlog_sync(xlog_t *log, /* account for internal log which doesn't start at block #0 */ XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); XFS_BUF_WRITE(bp); - if ((error = XFS_bwrite(bp))) { + if ((error = xlog_bdstrat(bp))) { xfs_ioerror_alert("xlog_sync (split)", log->l_mp, bp, XFS_BUF_ADDR(bp)); return error; @@ -2854,7 +2789,6 @@ xlog_state_switch_iclogs(xlog_t *log, log->l_iclog = iclog->ic_next; } /* xlog_state_switch_iclogs */ - /* * Write out all data in the in-core log as of this exact moment in time. * @@ -2882,11 +2816,17 @@ xlog_state_switch_iclogs(xlog_t *log, * b) when we return from flushing out this iclog, it is still * not in the active nor dirty state. */ -STATIC int -xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) +int +_xfs_log_force( + struct xfs_mount *mp, + uint flags, + int *log_flushed) { - xlog_in_core_t *iclog; - xfs_lsn_t lsn; + struct log *log = mp->m_log; + struct xlog_in_core *iclog; + xfs_lsn_t lsn; + + XFS_STATS_INC(xs_log_force); spin_lock(&log->l_icloglock); @@ -2932,7 +2872,9 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) if (xlog_state_release_iclog(log, iclog)) return XFS_ERROR(EIO); - *log_flushed = 1; + + if (log_flushed) + *log_flushed = 1; spin_lock(&log->l_icloglock); if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && iclog->ic_state != XLOG_STATE_DIRTY) @@ -2976,19 +2918,37 @@ maybe_sleep: */ if (iclog->ic_state & XLOG_STATE_IOERROR) return XFS_ERROR(EIO); - *log_flushed = 1; - + if (log_flushed) + *log_flushed = 1; } else { no_sleep: spin_unlock(&log->l_icloglock); } return 0; -} /* xlog_state_sync_all */ +} +/* + * Wrapper for _xfs_log_force(), to be used when caller doesn't care + * about errors or whether the log was flushed or not. This is the normal + * interface to use when trying to unpin items or move the log forward. + */ +void +xfs_log_force( + xfs_mount_t *mp, + uint flags) +{ + int error; + + error = _xfs_log_force(mp, flags, NULL); + if (error) { + xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: " + "error %d returned.", error); + } +} /* - * Used by code which implements synchronous log forces. + * Force the in-core log to disk for a specific LSN. * * Find in-core log with lsn. * If it is in the DIRTY state, just return. @@ -2996,109 +2956,142 @@ no_sleep: * state and go to sleep or return. * If it is in any other state, go to sleep or return. * - * If filesystem activity goes to zero, the iclog will get flushed only by - * bdflush(). + * Synchronous forces are implemented with a signal variable. All callers + * to force a given lsn to disk will wait on a the sv attached to the + * specific in-core log. When given in-core log finally completes its + * write to disk, that thread will wake up all threads waiting on the + * sv. */ -STATIC int -xlog_state_sync(xlog_t *log, - xfs_lsn_t lsn, - uint flags, - int *log_flushed) +int +_xfs_log_force_lsn( + struct xfs_mount *mp, + xfs_lsn_t lsn, + uint flags, + int *log_flushed) { - xlog_in_core_t *iclog; - int already_slept = 0; + struct log *log = mp->m_log; + struct xlog_in_core *iclog; + int already_slept = 0; -try_again: - spin_lock(&log->l_icloglock); - iclog = log->l_iclog; + ASSERT(lsn != 0); - if (iclog->ic_state & XLOG_STATE_IOERROR) { - spin_unlock(&log->l_icloglock); - return XFS_ERROR(EIO); - } - - do { - if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { - iclog = iclog->ic_next; - continue; - } + XFS_STATS_INC(xs_log_force); - if (iclog->ic_state == XLOG_STATE_DIRTY) { +try_again: + spin_lock(&log->l_icloglock); + iclog = log->l_iclog; + if (iclog->ic_state & XLOG_STATE_IOERROR) { spin_unlock(&log->l_icloglock); - return 0; + return XFS_ERROR(EIO); } - if (iclog->ic_state == XLOG_STATE_ACTIVE) { - /* - * We sleep here if we haven't already slept (e.g. - * this is the first time we've looked at the correct - * iclog buf) and the buffer before us is going to - * be sync'ed. The reason for this is that if we - * are doing sync transactions here, by waiting for - * the previous I/O to complete, we can allow a few - * more transactions into this iclog before we close - * it down. - * - * Otherwise, we mark the buffer WANT_SYNC, and bump - * up the refcnt so we can release the log (which drops - * the ref count). The state switch keeps new transaction - * commits from using this buffer. When the current commits - * finish writing into the buffer, the refcount will drop to - * zero and the buffer will go out then. - */ - if (!already_slept && - (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC | - XLOG_STATE_SYNCING))) { - ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); - XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_prev->ic_write_wait, PSWP, - &log->l_icloglock, s); - *log_flushed = 1; - already_slept = 1; - goto try_again; - } else { + do { + if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { + iclog = iclog->ic_next; + continue; + } + + if (iclog->ic_state == XLOG_STATE_DIRTY) { + spin_unlock(&log->l_icloglock); + return 0; + } + + if (iclog->ic_state == XLOG_STATE_ACTIVE) { + /* + * We sleep here if we haven't already slept (e.g. + * this is the first time we've looked at the correct + * iclog buf) and the buffer before us is going to + * be sync'ed. The reason for this is that if we + * are doing sync transactions here, by waiting for + * the previous I/O to complete, we can allow a few + * more transactions into this iclog before we close + * it down. + * + * Otherwise, we mark the buffer WANT_SYNC, and bump + * up the refcnt so we can release the log (which + * drops the ref count). The state switch keeps new + * transaction commits from using this buffer. When + * the current commits finish writing into the buffer, + * the refcount will drop to zero and the buffer will + * go out then. + */ + if (!already_slept && + (iclog->ic_prev->ic_state & + (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { + ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); + + XFS_STATS_INC(xs_log_force_sleep); + + sv_wait(&iclog->ic_prev->ic_write_wait, + PSWP, &log->l_icloglock, s); + if (log_flushed) + *log_flushed = 1; + already_slept = 1; + goto try_again; + } atomic_inc(&iclog->ic_refcnt); xlog_state_switch_iclogs(log, iclog, 0); spin_unlock(&log->l_icloglock); if (xlog_state_release_iclog(log, iclog)) return XFS_ERROR(EIO); - *log_flushed = 1; + if (log_flushed) + *log_flushed = 1; spin_lock(&log->l_icloglock); } - } - if ((flags & XFS_LOG_SYNC) && /* sleep */ - !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { + if ((flags & XFS_LOG_SYNC) && /* sleep */ + !(iclog->ic_state & + (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { + /* + * Don't wait on completion if we know that we've + * gotten a log write error. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) { + spin_unlock(&log->l_icloglock); + return XFS_ERROR(EIO); + } + XFS_STATS_INC(xs_log_force_sleep); + sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); + /* + * No need to grab the log lock here since we're + * only deciding whether or not to return EIO + * and the memory read should be atomic. + */ + if (iclog->ic_state & XLOG_STATE_IOERROR) + return XFS_ERROR(EIO); - /* - * Don't wait on completion if we know that we've - * gotten a log write error. - */ - if (iclog->ic_state & XLOG_STATE_IOERROR) { + if (log_flushed) + *log_flushed = 1; + } else { /* just return */ spin_unlock(&log->l_icloglock); - return XFS_ERROR(EIO); } - XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); - /* - * No need to grab the log lock here since we're - * only deciding whether or not to return EIO - * and the memory read should be atomic. - */ - if (iclog->ic_state & XLOG_STATE_IOERROR) - return XFS_ERROR(EIO); - *log_flushed = 1; - } else { /* just return */ - spin_unlock(&log->l_icloglock); - } - return 0; - } while (iclog != log->l_iclog); + return 0; + } while (iclog != log->l_iclog); - spin_unlock(&log->l_icloglock); - return 0; -} /* xlog_state_sync */ + spin_unlock(&log->l_icloglock); + return 0; +} +/* + * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care + * about errors or whether the log was flushed or not. This is the normal + * interface to use when trying to unpin items or move the log forward. + */ +void +xfs_log_force_lsn( + xfs_mount_t *mp, + xfs_lsn_t lsn, + uint flags) +{ + int error; + + error = _xfs_log_force_lsn(mp, lsn, flags, NULL); + if (error) { + xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: " + "error %d returned.", error); + } +} /* * Called when we want to mark the current iclog as being ready to sync to @@ -3463,7 +3456,6 @@ xfs_log_force_umount( xlog_ticket_t *tic; xlog_t *log; int retval; - int dummy; log = mp->m_log; @@ -3537,13 +3529,14 @@ xfs_log_force_umount( } spin_unlock(&log->l_grant_lock); - if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { + if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { ASSERT(!logerror); /* * Force the incore logs to disk before shutting the * log down completely. */ - xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); + _xfs_log_force(mp, XFS_LOG_SYNC, NULL); + spin_lock(&log->l_icloglock); retval = xlog_state_ioerror(log); spin_unlock(&log->l_icloglock); diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index d0c9baa50b1..7074be9d13e 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -70,14 +70,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) * Flags to xfs_log_force() * * XFS_LOG_SYNC: Synchronous force in-core log to disk - * XFS_LOG_FORCE: Start in-core log write now. - * XFS_LOG_URGE: Start write within some window of time. - * - * Note: Either XFS_LOG_FORCE or XFS_LOG_URGE must be set. */ #define XFS_LOG_SYNC 0x1 -#define XFS_LOG_FORCE 0x2 -#define XFS_LOG_URGE 0x4 #endif /* __KERNEL__ */ @@ -110,10 +104,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) #define XLOG_REG_TYPE_TRANSHDR 19 #define XLOG_REG_TYPE_MAX 19 -#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t)) - typedef struct xfs_log_iovec { - xfs_caddr_t i_addr; /* beginning address of region */ + xfs_caddr_t i_addr; /* beginning address of region */ int i_len; /* length in bytes of region */ uint i_type; /* type of region */ } xfs_log_iovec_t; @@ -140,12 +132,17 @@ xfs_lsn_t xfs_log_done(struct xfs_mount *mp, void **iclog, uint flags); int _xfs_log_force(struct xfs_mount *mp, - xfs_lsn_t lsn, uint flags, int *log_forced); void xfs_log_force(struct xfs_mount *mp, - xfs_lsn_t lsn, uint flags); +int _xfs_log_force_lsn(struct xfs_mount *mp, + xfs_lsn_t lsn, + uint flags, + int *log_forced); +void xfs_log_force_lsn(struct xfs_mount *mp, + xfs_lsn_t lsn, + uint flags); int xfs_log_mount(struct xfs_mount *mp, struct xfs_buftarg *log_target, xfs_daddr_t start_block, diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index d55662db707..fd02a18facd 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -443,14 +443,9 @@ typedef struct log { /* common routines */ extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); -extern int xlog_find_tail(xlog_t *log, - xfs_daddr_t *head_blk, - xfs_daddr_t *tail_blk); extern int xlog_recover(xlog_t *log); extern int xlog_recover_finish(xlog_t *log); extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); -extern struct xfs_buf *xlog_get_bp(xlog_t *, int); -extern void xlog_put_bp(struct xfs_buf *); extern kmem_zone_t *xfs_log_ticket_zone; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 69ac2e5ef20..22e6efdc17e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -50,8 +50,6 @@ STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); -STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q, - xlog_recover_item_t *item); #if defined(DEBUG) STATIC void xlog_recover_check_summary(xlog_t *); #else @@ -68,7 +66,7 @@ STATIC void xlog_recover_check_summary(xlog_t *); ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) ) #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask) -xfs_buf_t * +STATIC xfs_buf_t * xlog_get_bp( xlog_t *log, int nbblks) @@ -88,7 +86,7 @@ xlog_get_bp( return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp); } -void +STATIC void xlog_put_bp( xfs_buf_t *bp) { @@ -805,7 +803,7 @@ xlog_find_head( * We could speed up search by using current head_blk buffer, but it is not * available. */ -int +STATIC int xlog_find_tail( xlog_t *log, xfs_daddr_t *head_blk, @@ -1367,36 +1365,45 @@ xlog_clear_stale_blocks( STATIC xlog_recover_t * xlog_recover_find_tid( - xlog_recover_t *q, + struct hlist_head *head, xlog_tid_t tid) { - xlog_recover_t *p = q; + xlog_recover_t *trans; + struct hlist_node *n; - while (p != NULL) { - if (p->r_log_tid == tid) - break; - p = p->r_next; + hlist_for_each_entry(trans, n, head, r_list) { + if (trans->r_log_tid == tid) + return trans; } - return p; + return NULL; } STATIC void -xlog_recover_put_hashq( - xlog_recover_t **q, - xlog_recover_t *trans) +xlog_recover_new_tid( + struct hlist_head *head, + xlog_tid_t tid, + xfs_lsn_t lsn) { - trans->r_next = *q; - *q = trans; + xlog_recover_t *trans; + + trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP); + trans->r_log_tid = tid; + trans->r_lsn = lsn; + INIT_LIST_HEAD(&trans->r_itemq); + + INIT_HLIST_NODE(&trans->r_list); + hlist_add_head(&trans->r_list, head); } STATIC void xlog_recover_add_item( - xlog_recover_item_t **itemq) + struct list_head *head) { xlog_recover_item_t *item; item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); - xlog_recover_insert_item_backq(itemq, item); + INIT_LIST_HEAD(&item->ri_list); + list_add_tail(&item->ri_list, head); } STATIC int @@ -1409,8 +1416,7 @@ xlog_recover_add_to_cont_trans( xfs_caddr_t ptr, old_ptr; int old_len; - item = trans->r_itemq; - if (item == NULL) { + if (list_empty(&trans->r_itemq)) { /* finish copying rest of trans header */ xlog_recover_add_item(&trans->r_itemq); ptr = (xfs_caddr_t) &trans->r_theader + @@ -1418,7 +1424,8 @@ xlog_recover_add_to_cont_trans( memcpy(ptr, dp, len); /* d, s, l */ return 0; } - item = item->ri_prev; + /* take the tail entry */ + item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; old_len = item->ri_buf[item->ri_cnt-1].i_len; @@ -1455,8 +1462,7 @@ xlog_recover_add_to_trans( if (!len) return 0; - item = trans->r_itemq; - if (item == NULL) { + if (list_empty(&trans->r_itemq)) { /* we need to catch log corruptions here */ if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { xlog_warn("XFS: xlog_recover_add_to_trans: " @@ -1474,12 +1480,15 @@ xlog_recover_add_to_trans( memcpy(ptr, dp, len); in_f = (xfs_inode_log_format_t *)ptr; - if (item->ri_prev->ri_total != 0 && - item->ri_prev->ri_total == item->ri_prev->ri_cnt) { + /* take the tail entry */ + item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); + if (item->ri_total != 0 && + item->ri_total == item->ri_cnt) { + /* tail item is in use, get a new one */ xlog_recover_add_item(&trans->r_itemq); + item = list_entry(trans->r_itemq.prev, + xlog_recover_item_t, ri_list); } - item = trans->r_itemq; - item = item->ri_prev; if (item->ri_total == 0) { /* first region to be added */ if (in_f->ilf_size == 0 || @@ -1504,96 +1513,29 @@ xlog_recover_add_to_trans( return 0; } -STATIC void -xlog_recover_new_tid( - xlog_recover_t **q, - xlog_tid_t tid, - xfs_lsn_t lsn) -{ - xlog_recover_t *trans; - - trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP); - trans->r_log_tid = tid; - trans->r_lsn = lsn; - xlog_recover_put_hashq(q, trans); -} - -STATIC int -xlog_recover_unlink_tid( - xlog_recover_t **q, - xlog_recover_t *trans) -{ - xlog_recover_t *tp; - int found = 0; - - ASSERT(trans != NULL); - if (trans == *q) { - *q = (*q)->r_next; - } else { - tp = *q; - while (tp) { - if (tp->r_next == trans) { - found = 1; - break; - } - tp = tp->r_next; - } - if (!found) { - xlog_warn( - "XFS: xlog_recover_unlink_tid: trans not found"); - ASSERT(0); - return XFS_ERROR(EIO); - } - tp->r_next = tp->r_next->r_next; - } - return 0; -} - -STATIC void -xlog_recover_insert_item_backq( - xlog_recover_item_t **q, - xlog_recover_item_t *item) -{ - if (*q == NULL) { - item->ri_prev = item->ri_next = item; - *q = item; - } else { - item->ri_next = *q; - item->ri_prev = (*q)->ri_prev; - (*q)->ri_prev = item; - item->ri_prev->ri_next = item; - } -} - -STATIC void -xlog_recover_insert_item_frontq( - xlog_recover_item_t **q, - xlog_recover_item_t *item) -{ - xlog_recover_insert_item_backq(q, item); - *q = item; -} - +/* + * Sort the log items in the transaction. Cancelled buffers need + * to be put first so they are processed before any items that might + * modify the buffers. If they are cancelled, then the modifications + * don't need to be replayed. + */ STATIC int xlog_recover_reorder_trans( xlog_recover_t *trans) { - xlog_recover_item_t *first_item, *itemq, *itemq_next; - xfs_buf_log_format_t *buf_f; - ushort flags = 0; + xlog_recover_item_t *item, *n; + LIST_HEAD(sort_list); - first_item = itemq = trans->r_itemq; - trans->r_itemq = NULL; - do { - itemq_next = itemq->ri_next; - buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr; + list_splice_init(&trans->r_itemq, &sort_list); + list_for_each_entry_safe(item, n, &sort_list, ri_list) { + xfs_buf_log_format_t *buf_f; - switch (ITEM_TYPE(itemq)) { + buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; + + switch (ITEM_TYPE(item)) { case XFS_LI_BUF: - flags = buf_f->blf_flags; - if (!(flags & XFS_BLI_CANCEL)) { - xlog_recover_insert_item_frontq(&trans->r_itemq, - itemq); + if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) { + list_move(&item->ri_list, &trans->r_itemq); break; } case XFS_LI_INODE: @@ -1601,7 +1543,7 @@ xlog_recover_reorder_trans( case XFS_LI_QUOTAOFF: case XFS_LI_EFD: case XFS_LI_EFI: - xlog_recover_insert_item_backq(&trans->r_itemq, itemq); + list_move_tail(&item->ri_list, &trans->r_itemq); break; default: xlog_warn( @@ -1609,8 +1551,8 @@ xlog_recover_reorder_trans( ASSERT(0); return XFS_ERROR(EIO); } - itemq = itemq_next; - } while (first_item != itemq); + } + ASSERT(list_empty(&sort_list)); return 0; } @@ -2242,9 +2184,9 @@ xlog_recover_do_buffer_trans( } mp = log->l_mp; - buf_flags = XFS_BUF_LOCK; + buf_flags = XBF_LOCK; if (!(flags & XFS_BLI_INODE_BUF)) - buf_flags |= XFS_BUF_MAPPED; + buf_flags |= XBF_MAPPED; bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags); if (XFS_BUF_ISERROR(bp)) { @@ -2346,7 +2288,7 @@ xlog_recover_do_inode_trans( } bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, - XFS_BUF_LOCK); + XBF_LOCK); if (XFS_BUF_ISERROR(bp)) { xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, bp, in_f->ilf_blkno); @@ -2814,14 +2756,13 @@ xlog_recover_do_trans( int pass) { int error = 0; - xlog_recover_item_t *item, *first_item; + xlog_recover_item_t *item; error = xlog_recover_reorder_trans(trans); if (error) return error; - first_item = item = trans->r_itemq; - do { + list_for_each_entry(item, &trans->r_itemq, ri_list) { switch (ITEM_TYPE(item)) { case XFS_LI_BUF: error = xlog_recover_do_buffer_trans(log, item, pass); @@ -2854,8 +2795,7 @@ xlog_recover_do_trans( if (error) return error; - item = item->ri_next; - } while (first_item != item); + } return 0; } @@ -2869,21 +2809,18 @@ STATIC void xlog_recover_free_trans( xlog_recover_t *trans) { - xlog_recover_item_t *first_item, *item, *free_item; + xlog_recover_item_t *item, *n; int i; - item = first_item = trans->r_itemq; - do { - free_item = item; - item = item->ri_next; - /* Free the regions in the item. */ - for (i = 0; i < free_item->ri_cnt; i++) { - kmem_free(free_item->ri_buf[i].i_addr); - } + list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { + /* Free the regions in the item. */ + list_del(&item->ri_list); + for (i = 0; i < item->ri_cnt; i++) + kmem_free(item->ri_buf[i].i_addr); /* Free the item itself */ - kmem_free(free_item->ri_buf); - kmem_free(free_item); - } while (first_item != item); + kmem_free(item->ri_buf); + kmem_free(item); + } /* Free the transaction recover structure */ kmem_free(trans); } @@ -2891,14 +2828,12 @@ xlog_recover_free_trans( STATIC int xlog_recover_commit_trans( xlog_t *log, - xlog_recover_t **q, xlog_recover_t *trans, int pass) { int error; - if ((error = xlog_recover_unlink_tid(q, trans))) - return error; + hlist_del(&trans->r_list); if ((error = xlog_recover_do_trans(log, trans, pass))) return error; xlog_recover_free_trans(trans); /* no error */ @@ -2926,7 +2861,7 @@ xlog_recover_unmount_trans( STATIC int xlog_recover_process_data( xlog_t *log, - xlog_recover_t *rhash[], + struct hlist_head rhash[], xlog_rec_header_t *rhead, xfs_caddr_t dp, int pass) @@ -2960,7 +2895,7 @@ xlog_recover_process_data( } tid = be32_to_cpu(ohead->oh_tid); hash = XLOG_RHASH(tid); - trans = xlog_recover_find_tid(rhash[hash], tid); + trans = xlog_recover_find_tid(&rhash[hash], tid); if (trans == NULL) { /* not found; add new tid */ if (ohead->oh_flags & XLOG_START_TRANS) xlog_recover_new_tid(&rhash[hash], tid, @@ -2978,7 +2913,7 @@ xlog_recover_process_data( switch (flags) { case XLOG_COMMIT_TRANS: error = xlog_recover_commit_trans(log, - &rhash[hash], trans, pass); + trans, pass); break; case XLOG_UNMOUNT_TRANS: error = xlog_recover_unmount_trans(trans); @@ -3211,7 +3146,7 @@ xlog_recover_process_one_iunlink( /* * Get the on disk inode to find the next inode in the bucket. */ - error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK); + error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK); if (error) goto fail_iput; @@ -3517,7 +3452,7 @@ xlog_do_recovery_pass( int error = 0, h_size; int bblks, split_bblks; int hblks, split_hblks, wrapped_hblks; - xlog_recover_t *rhash[XLOG_RHASH_SIZE]; + struct hlist_head rhash[XLOG_RHASH_SIZE]; ASSERT(head_blk != tail_blk); @@ -3978,8 +3913,7 @@ xlog_recover_finish( * case the unlink transactions would have problems * pushing the EFIs out of the way. */ - xfs_log_force(log->l_mp, (xfs_lsn_t)0, - (XFS_LOG_FORCE | XFS_LOG_SYNC)); + xfs_log_force(log->l_mp, XFS_LOG_SYNC); xlog_recover_process_iunlinks(log); diff --git a/fs/xfs/xfs_log_recover.h b/fs/xfs/xfs_log_recover.h index b2254555530..75d74920725 100644 --- a/fs/xfs/xfs_log_recover.h +++ b/fs/xfs/xfs_log_recover.h @@ -35,22 +35,21 @@ * item headers are in ri_buf[0]. Additional buffers follow. */ typedef struct xlog_recover_item { - struct xlog_recover_item *ri_next; - struct xlog_recover_item *ri_prev; - int ri_type; - int ri_cnt; /* count of regions found */ - int ri_total; /* total regions */ - xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */ + struct list_head ri_list; + int ri_type; + int ri_cnt; /* count of regions found */ + int ri_total; /* total regions */ + xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */ } xlog_recover_item_t; struct xlog_tid; typedef struct xlog_recover { - struct xlog_recover *r_next; - xlog_tid_t r_log_tid; /* log's transaction id */ - xfs_trans_header_t r_theader; /* trans header for partial */ - int r_state; /* not needed */ - xfs_lsn_t r_lsn; /* xact lsn */ - xlog_recover_item_t *r_itemq; /* q for items */ + struct hlist_node r_list; + xlog_tid_t r_log_tid; /* log's transaction id */ + xfs_trans_header_t r_theader; /* trans header for partial */ + int r_state; /* not needed */ + xfs_lsn_t r_lsn; /* xact lsn */ + struct list_head r_itemq; /* q for items */ } xlog_recover_t; #define ITEM_TYPE(i) (*(ushort *)(i)->ri_buf[0].i_addr) diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index eb403b40e12..6afaaeb2950 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -201,6 +201,38 @@ xfs_uuid_unmount( /* + * Reference counting access wrappers to the perag structures. + */ +struct xfs_perag * +xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno) +{ + struct xfs_perag *pag; + int ref = 0; + + spin_lock(&mp->m_perag_lock); + pag = radix_tree_lookup(&mp->m_perag_tree, agno); + if (pag) { + ASSERT(atomic_read(&pag->pag_ref) >= 0); + /* catch leaks in the positive direction during testing */ + ASSERT(atomic_read(&pag->pag_ref) < 1000); + ref = atomic_inc_return(&pag->pag_ref); + } + spin_unlock(&mp->m_perag_lock); + trace_xfs_perag_get(mp, agno, ref, _RET_IP_); + return pag; +} + +void +xfs_perag_put(struct xfs_perag *pag) +{ + int ref; + + ASSERT(atomic_read(&pag->pag_ref) > 0); + ref = atomic_dec_return(&pag->pag_ref); + trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_); +} + +/* * Free up the resources associated with a mount structure. Assume that * the structure was initially zeroed, so we can tell which fields got * initialized. @@ -209,13 +241,16 @@ STATIC void xfs_free_perag( xfs_mount_t *mp) { - if (mp->m_perag) { - int agno; + xfs_agnumber_t agno; + struct xfs_perag *pag; - for (agno = 0; agno < mp->m_maxagi; agno++) - if (mp->m_perag[agno].pagb_list) - kmem_free(mp->m_perag[agno].pagb_list); - kmem_free(mp->m_perag); + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { + spin_lock(&mp->m_perag_lock); + pag = radix_tree_delete(&mp->m_perag_tree, agno); + ASSERT(pag); + ASSERT(atomic_read(&pag->pag_ref) == 0); + spin_unlock(&mp->m_perag_lock); + kmem_free(pag); } } @@ -389,22 +424,57 @@ xfs_initialize_perag_icache( } } -xfs_agnumber_t +int xfs_initialize_perag( xfs_mount_t *mp, - xfs_agnumber_t agcount) + xfs_agnumber_t agcount, + xfs_agnumber_t *maxagi) { xfs_agnumber_t index, max_metadata; + xfs_agnumber_t first_initialised = 0; xfs_perag_t *pag; xfs_agino_t agino; xfs_ino_t ino; xfs_sb_t *sbp = &mp->m_sb; xfs_ino_t max_inum = XFS_MAXINUMBER_32; + int error = -ENOMEM; /* Check to see if the filesystem can overflow 32 bit inodes */ agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); + /* + * Walk the current per-ag tree so we don't try to initialise AGs + * that already exist (growfs case). Allocate and insert all the + * AGs we don't find ready for initialisation. + */ + for (index = 0; index < agcount; index++) { + pag = xfs_perag_get(mp, index); + if (pag) { + xfs_perag_put(pag); + continue; + } + if (!first_initialised) + first_initialised = index; + pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); + if (!pag) + goto out_unwind; + if (radix_tree_preload(GFP_NOFS)) + goto out_unwind; + spin_lock(&mp->m_perag_lock); + if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { + BUG(); + spin_unlock(&mp->m_perag_lock); + radix_tree_preload_end(); + error = -EEXIST; + goto out_unwind; + } + pag->pag_agno = index; + pag->pag_mount = mp; + spin_unlock(&mp->m_perag_lock); + radix_tree_preload_end(); + } + /* Clear the mount flag if no inode can overflow 32 bits * on this filesystem, or if specifically requested.. */ @@ -438,21 +508,33 @@ xfs_initialize_perag( } /* This ag is preferred for inodes */ - pag = &mp->m_perag[index]; + pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 1; if (index < max_metadata) pag->pagf_metadata = 1; xfs_initialize_perag_icache(pag); + xfs_perag_put(pag); } } else { /* Setup default behavior for smaller filesystems */ for (index = 0; index < agcount; index++) { - pag = &mp->m_perag[index]; + pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 1; xfs_initialize_perag_icache(pag); + xfs_perag_put(pag); } } - return index; + if (maxagi) + *maxagi = index; + return 0; + +out_unwind: + kmem_free(pag); + for (; index > first_initialised; index--) { + pag = radix_tree_delete(&mp->m_perag_tree, index); + kmem_free(pag); + } + return error; } void @@ -583,7 +665,7 @@ xfs_readsb(xfs_mount_t *mp, int flags) * access to the superblock. */ sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); - extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; + extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED; bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size), extra_flags); @@ -731,12 +813,13 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) error = xfs_ialloc_pagi_init(mp, NULL, index); if (error) return error; - pag = &mp->m_perag[index]; + pag = xfs_perag_get(mp, index); ifree += pag->pagi_freecount; ialloc += pag->pagi_count; bfree += pag->pagf_freeblks; bfreelst += pag->pagf_flcount; btree += pag->pagf_btreeblks; + xfs_perag_put(pag); } /* * Overwrite incore superblock counters with just-read data @@ -1008,6 +1091,22 @@ xfs_mount_reset_sbqflags( return xfs_trans_commit(tp, 0); } +__uint64_t +xfs_default_resblks(xfs_mount_t *mp) +{ + __uint64_t resblks; + + /* + * We default to 5% or 1024 fsbs of space reserved, whichever is smaller. + * This may drive us straight to ENOSPC on mount, but that implies + * we were already there on the last unmount. Warn if this occurs. + */ + resblks = mp->m_sb.sb_dblocks; + do_div(resblks, 20); + resblks = min_t(__uint64_t, resblks, 1024); + return resblks; +} + /* * This function does the following on an initial mount of a file system: * - reads the superblock from disk and init the mount struct @@ -1152,13 +1251,13 @@ xfs_mountfs( /* * Allocate and initialize the per-ag data. */ - init_rwsem(&mp->m_peraglock); - mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), - KM_MAYFAIL); - if (!mp->m_perag) + spin_lock_init(&mp->m_perag_lock); + INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS); + error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); + if (error) { + cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error); goto out_remove_uuid; - - mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount); + } if (!sbp->sb_logblocks) { cmn_err(CE_WARN, "XFS: no log defined"); @@ -1318,18 +1417,14 @@ xfs_mountfs( * when at ENOSPC. This is needed for operations like create with * attr, unwritten extent conversion at ENOSPC, etc. Data allocations * are not allowed to use this reserved space. - * - * We default to 5% or 1024 fsbs of space reserved, whichever is smaller. - * This may drive us straight to ENOSPC on mount, but that implies - * we were already there on the last unmount. Warn if this occurs. */ - resblks = mp->m_sb.sb_dblocks; - do_div(resblks, 20); - resblks = min_t(__uint64_t, resblks, 1024); - error = xfs_reserve_blocks(mp, &resblks, NULL); - if (error) - cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. " - "Continuing without a reserve pool."); + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { + resblks = xfs_default_resblks(mp); + error = xfs_reserve_blocks(mp, &resblks, NULL); + if (error) + cmn_err(CE_WARN, "XFS: Unable to allocate reserve " + "blocks. Continuing without a reserve pool."); + } return 0; @@ -1372,8 +1467,19 @@ xfs_unmountfs( * push out the iclog we will never get that unlocked. hence we * need to force the log first. */ - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); - xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC); + xfs_log_force(mp, XFS_LOG_SYNC); + + /* + * Do a delwri reclaim pass first so that as many dirty inodes are + * queued up for IO as possible. Then flush the buffers before making + * a synchronous path to catch all the remaining inodes are reclaimed. + * This makes the reclaim process as quick as possible by avoiding + * synchronous writeout and blocking on inodes already in the delwri + * state as much as possible. + */ + xfs_reclaim_inodes(mp, 0); + XFS_bflush(mp->m_ddev_targp); + xfs_reclaim_inodes(mp, SYNC_WAIT); xfs_qm_unmount(mp); @@ -1382,7 +1488,7 @@ xfs_unmountfs( * that nothing is pinned. This is important because bflush() * will skip pinned buffers. */ - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); + xfs_log_force(mp, XFS_LOG_SYNC); xfs_binval(mp->m_ddev_targp); if (mp->m_rtdev_targp) { @@ -1548,15 +1654,14 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields); /* find modified range */ + f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); + ASSERT((1LL << f) & XFS_SB_MOD_BITS); + last = xfs_sb_info[f + 1].offset - 1; f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); ASSERT((1LL << f) & XFS_SB_MOD_BITS); first = xfs_sb_info[f].offset; - f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); - ASSERT((1LL << f) & XFS_SB_MOD_BITS); - last = xfs_sb_info[f + 1].offset - 1; - xfs_trans_log_buf(tp, bp, first, last); } @@ -1887,7 +1992,7 @@ xfs_getsb( ASSERT(mp->m_sb_bp != NULL); bp = mp->m_sb_bp; - if (flags & XFS_BUF_TRYLOCK) { + if (flags & XBF_TRYLOCK) { if (!XFS_BUF_CPSEMA(bp)) { return NULL; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1df7e450296..14dafd60823 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -78,7 +78,8 @@ typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t); typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *, struct xfs_inode *, dm_right_t, struct xfs_inode *, dm_right_t, - const char *, const char *, mode_t, int, int); + const unsigned char *, const unsigned char *, + mode_t, int, int); typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t, char *, char *); typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *, @@ -207,8 +208,8 @@ typedef struct xfs_mount { uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ uint m_in_maxlevels; /* max inobt btree levels. */ - struct xfs_perag *m_perag; /* per-ag accounting info */ - struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */ + struct radix_tree_root m_perag_tree; /* per-ag accounting info */ + spinlock_t m_perag_lock; /* lock for m_perag_tree */ struct mutex m_growlock; /* growfs mutex */ int m_fixedfsid[2]; /* unchanged for life of FS */ uint m_dmevmask; /* DMI events for this FS */ @@ -224,6 +225,7 @@ typedef struct xfs_mount { __uint64_t m_maxioffset; /* maximum inode offset */ __uint64_t m_resblks; /* total reserved blocks */ __uint64_t m_resblks_avail;/* available reserved blocks */ + __uint64_t m_resblks_save; /* reserved blks @ remount,ro */ int m_dalign; /* stripe unit */ int m_swidth; /* stripe width */ int m_sinoalign; /* stripe unit inode alignment */ @@ -243,7 +245,7 @@ typedef struct xfs_mount { struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ atomic_t m_active_trans; /* number trans frozen */ #ifdef HAVE_PERCPU_SB - xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ + xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ unsigned long m_icsb_counters; /* disabled per-cpu counters */ struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ struct mutex m_icsb_mutex; /* balancer sync lock */ @@ -384,19 +386,10 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d) } /* - * perag get/put wrappers for eventual ref counting + * perag get/put wrappers for ref counting */ -static inline xfs_perag_t * -xfs_get_perag(struct xfs_mount *mp, xfs_ino_t ino) -{ - return &mp->m_perag[XFS_INO_TO_AGNO(mp, ino)]; -} - -static inline void -xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag) -{ - /* nothing to see here, move along */ -} +struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno); +void xfs_perag_put(struct xfs_perag *pag); /* * Per-cpu superblock locking functions @@ -428,6 +421,7 @@ typedef struct xfs_mod_sb { } xfs_mod_sb_t; extern int xfs_log_sbcount(xfs_mount_t *, uint); +extern __uint64_t xfs_default_resblks(xfs_mount_t *mp); extern int xfs_mountfs(xfs_mount_t *mp); extern void xfs_unmountfs(xfs_mount_t *); @@ -450,7 +444,8 @@ extern struct xfs_dmops xfs_dmcore_xfs; #endif /* __KERNEL__ */ extern void xfs_mod_sb(struct xfs_trans *, __int64_t); -extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t); +extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t, + xfs_agnumber_t *); extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index 4b0613d99fa..45ce15dc5b2 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -398,7 +398,7 @@ exit: * guaranteed that all the free functions for all the elements have finished * executing and the reaper is not running. */ -void +static void xfs_mru_cache_flush( xfs_mru_cache_t *mru) { diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h index 5d439f34b0c..36dd3ec8b4e 100644 --- a/fs/xfs/xfs_mru_cache.h +++ b/fs/xfs/xfs_mru_cache.h @@ -42,7 +42,6 @@ void xfs_mru_cache_uninit(void); int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, unsigned int grp_count, xfs_mru_cache_free_func_t free_func); -void xfs_mru_cache_flush(xfs_mru_cache_t *mru); void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, void *value); diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index 91bfd60f4c7..fdcab3f81dd 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -223,16 +223,9 @@ typedef struct xfs_qoff_logformat { #define XFS_QMOPT_RES_INOS 0x0800000 /* - * flags for dqflush and dqflush_all. - */ -#define XFS_QMOPT_SYNC 0x1000000 -#define XFS_QMOPT_ASYNC 0x2000000 -#define XFS_QMOPT_DELWRI 0x4000000 - -/* * flags for dqalloc. */ -#define XFS_QMOPT_INHERIT 0x8000000 +#define XFS_QMOPT_INHERIT 0x1000000 /* * flags to xfs_trans_mod_dquot. diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 9e15a118536..6be05f756d5 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -1517,6 +1517,8 @@ xfs_rtfree_range( */ error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1, &postblock); + if (error) + return error; /* * If there are blocks not being freed at the front of the * old extent, add summary data for them to be allocated. diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index 5aa07caea5f..e336742a58a 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -47,48 +47,6 @@ #include "xfs_trace.h" /* - * This is a subroutine for xfs_write() and other writers (xfs_ioctl) - * which clears the setuid and setgid bits when a file is written. - */ -int -xfs_write_clear_setuid( - xfs_inode_t *ip) -{ - xfs_mount_t *mp; - xfs_trans_t *tp; - int error; - - mp = ip->i_mount; - tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID); - if ((error = xfs_trans_reserve(tp, 0, - XFS_WRITEID_LOG_RES(mp), - 0, 0, 0))) { - xfs_trans_cancel(tp, 0); - return error; - } - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_ihold(tp, ip); - ip->i_d.di_mode &= ~S_ISUID; - - /* - * Note that we don't have to worry about mandatory - * file locking being disabled here because we only - * clear the S_ISGID bit if the Group execute bit is - * on, but if it was on then mandatory locking wouldn't - * have been enabled. - */ - if (ip->i_d.di_mode & S_IXGRP) { - ip->i_d.di_mode &= ~S_ISGID; - } - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - xfs_trans_set_sync(tp); - error = xfs_trans_commit(tp, 0); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - return 0; -} - -/* * Force a shutdown of the filesystem instantly while keeping * the filesystem consistent. We don't do an unmount here; just shutdown * the shop, make sure that absolutely nothing persistent happens to @@ -153,88 +111,6 @@ xfs_do_force_shutdown( } } - -/* - * Called when we want to stop a buffer from getting written or read. - * We attach the EIO error, muck with its flags, and call biodone - * so that the proper iodone callbacks get called. - */ -int -xfs_bioerror( - xfs_buf_t *bp) -{ - -#ifdef XFSERRORDEBUG - ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); -#endif - - /* - * No need to wait until the buffer is unpinned. - * We aren't flushing it. - */ - XFS_BUF_ERROR(bp, EIO); - /* - * We're calling biodone, so delete B_DONE flag. Either way - * we have to call the iodone callback, and calling biodone - * probably is the best way since it takes care of - * GRIO as well. - */ - XFS_BUF_UNREAD(bp); - XFS_BUF_UNDELAYWRITE(bp); - XFS_BUF_UNDONE(bp); - XFS_BUF_STALE(bp); - - XFS_BUF_CLR_BDSTRAT_FUNC(bp); - xfs_biodone(bp); - - return (EIO); -} - -/* - * Same as xfs_bioerror, except that we are releasing the buffer - * here ourselves, and avoiding the biodone call. - * This is meant for userdata errors; metadata bufs come with - * iodone functions attached, so that we can track down errors. - */ -int -xfs_bioerror_relse( - xfs_buf_t *bp) -{ - int64_t fl; - - ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks); - ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone); - - fl = XFS_BUF_BFLAGS(bp); - /* - * No need to wait until the buffer is unpinned. - * We aren't flushing it. - * - * chunkhold expects B_DONE to be set, whether - * we actually finish the I/O or not. We don't want to - * change that interface. - */ - XFS_BUF_UNREAD(bp); - XFS_BUF_UNDELAYWRITE(bp); - XFS_BUF_DONE(bp); - XFS_BUF_STALE(bp); - XFS_BUF_CLR_IODONE_FUNC(bp); - XFS_BUF_CLR_BDSTRAT_FUNC(bp); - if (!(fl & XFS_B_ASYNC)) { - /* - * Mark b_error and B_ERROR _both_. - * Lot's of chunkcache code assumes that. - * There's no reason to mark error for - * ASYNC buffers. - */ - XFS_BUF_ERROR(bp, EIO); - XFS_BUF_FINISH_IOWAIT(bp); - } else { - xfs_buf_relse(bp); - } - return (EIO); -} - /* * Prints out an ALERT message about I/O error. */ @@ -306,37 +182,6 @@ xfs_read_buf( } /* - * Wrapper around bwrite() so that we can trap - * write errors, and act accordingly. - */ -int -xfs_bwrite( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - int error; - - /* - * XXXsup how does this work for quotas. - */ - XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); - bp->b_mount = mp; - XFS_BUF_WRITE(bp); - - if ((error = XFS_bwrite(bp))) { - ASSERT(mp); - /* - * Cannot put a buftrace here since if the buffer is not - * B_HOLD then we will brelse() the buffer before returning - * from bwrite and we could be tracing a buffer that has - * been reused. - */ - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); - } - return (error); -} - -/* * helper function to extract extent size hint from inode */ xfs_extlen_t diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index 571f2174435..11c41ec6ed7 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h @@ -39,10 +39,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) /* * Prototypes for functions in xfs_rw.c. */ -extern int xfs_write_clear_setuid(struct xfs_inode *ip); -extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); -extern int xfs_bioerror(struct xfs_buf *bp); -extern int xfs_bioerror_relse(struct xfs_buf *bp); extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp, xfs_daddr_t blkno, int len, uint flags, struct xfs_buf **bpp); diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 237badcbac3..be942d4e332 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -981,9 +981,8 @@ shut_us_down: */ if (sync) { if (!error) { - error = _xfs_log_force(mp, commit_lsn, - XFS_LOG_FORCE | XFS_LOG_SYNC, - log_flushed); + error = _xfs_log_force_lsn(mp, commit_lsn, + XFS_LOG_SYNC, log_flushed); } XFS_STATS_INC(xs_trans_sync); } else { @@ -1121,7 +1120,7 @@ xfs_trans_fill_vecs( tp->t_header.th_num_items = nitems; log_vector->i_addr = (xfs_caddr_t)&tp->t_header; log_vector->i_len = sizeof(xfs_trans_header_t); - XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR); + log_vector->i_type = XLOG_REG_TYPE_TRANSHDR; } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index ca64f33c63a..c93e3a10285 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -861,8 +861,7 @@ typedef struct xfs_item_ops { #define XFS_ITEM_SUCCESS 0 #define XFS_ITEM_PINNED 1 #define XFS_ITEM_LOCKED 2 -#define XFS_ITEM_FLUSHING 3 -#define XFS_ITEM_PUSHBUF 4 +#define XFS_ITEM_PUSHBUF 3 /* * This structure is used to maintain a list of block ranges that have been diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 2ffc570679b..e799824f724 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -237,14 +237,15 @@ out: } /* - * Function that does the work of pushing on the AIL + * xfsaild_push does the work of pushing on the AIL. Returning a timeout of + * zero indicates that the caller should sleep until woken. */ long xfsaild_push( struct xfs_ail *ailp, xfs_lsn_t *last_lsn) { - long tout = 1000; /* milliseconds */ + long tout = 0; xfs_lsn_t last_pushed_lsn = *last_lsn; xfs_lsn_t target = ailp->xa_target; xfs_lsn_t lsn; @@ -252,6 +253,7 @@ xfsaild_push( int flush_log, count, stuck; xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor *cur = &ailp->xa_cursors; + int push_xfsbufd = 0; spin_lock(&ailp->xa_lock); xfs_trans_ail_cursor_init(ailp, cur); @@ -262,7 +264,7 @@ xfsaild_push( */ xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&ailp->xa_lock); - last_pushed_lsn = 0; + *last_lsn = 0; return tout; } @@ -279,7 +281,6 @@ xfsaild_push( * prevents use from spinning when we can't do anything or there is * lots of contention on the AIL lists. */ - tout = 10; lsn = lip->li_lsn; flush_log = stuck = count = 0; while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { @@ -308,6 +309,7 @@ xfsaild_push( XFS_STATS_INC(xs_push_ail_pushbuf); IOP_PUSHBUF(lip); last_pushed_lsn = lsn; + push_xfsbufd = 1; break; case XFS_ITEM_PINNED: @@ -322,12 +324,6 @@ xfsaild_push( stuck++; break; - case XFS_ITEM_FLUSHING: - XFS_STATS_INC(xs_push_ail_flushing); - last_pushed_lsn = lsn; - stuck++; - break; - default: ASSERT(0); break; @@ -371,19 +367,24 @@ xfsaild_push( * move forward in the AIL. */ XFS_STATS_INC(xs_push_ail_flush); - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + xfs_log_force(mp, 0); + } + + if (push_xfsbufd) { + /* we've got delayed write buffers to flush */ + wake_up_process(mp->m_ddev_targp->bt_task); } if (!count) { /* We're past our target or empty, so idle */ - tout = 1000; + last_pushed_lsn = 0; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* * We reached the target so wait a bit longer for I/O to * complete and remove pushed items from the AIL before we * start the next scan from the start of the AIL. */ - tout += 20; + tout = 50; last_pushed_lsn = 0; } else if ((stuck * 100) / count > 90) { /* @@ -395,11 +396,14 @@ xfsaild_push( * Backoff a bit more to allow some I/O to complete before * continuing from where we were. */ - tout += 10; + tout = 20; + } else { + /* more to do, but wait a short while before continuing */ + tout = 10; } *last_lsn = last_pushed_lsn; return tout; -} /* xfsaild_push */ +} /* diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 49130628d5e..5ffd544434e 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -75,13 +75,14 @@ xfs_trans_get_buf(xfs_trans_t *tp, xfs_buf_log_item_t *bip; if (flags == 0) - flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; + flags = XBF_LOCK | XBF_MAPPED; /* * Default to a normal get_buf() call if the tp is NULL. */ if (tp == NULL) - return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY); + return xfs_buf_get(target_dev, blkno, len, + flags | XBF_DONT_BLOCK); /* * If we find the buffer in the cache with this transaction @@ -117,14 +118,14 @@ xfs_trans_get_buf(xfs_trans_t *tp, } /* - * We always specify the BUF_BUSY flag within a transaction so - * that get_buf does not try to push out a delayed write buffer + * We always specify the XBF_DONT_BLOCK flag within a transaction + * so that get_buf does not try to push out a delayed write buffer * which might cause another transaction to take place (if the * buffer was delayed alloc). Such recursive transactions can * easily deadlock with our current transaction as well as cause * us to run out of stack space. */ - bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY); + bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK); if (bp == NULL) { return NULL; } @@ -290,15 +291,15 @@ xfs_trans_read_buf( int error; if (flags == 0) - flags = XFS_BUF_LOCK | XFS_BUF_MAPPED; + flags = XBF_LOCK | XBF_MAPPED; /* * Default to a normal get_buf() call if the tp is NULL. */ if (tp == NULL) { - bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY); + bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); if (!bp) - return (flags & XFS_BUF_TRYLOCK) ? + return (flags & XBF_TRYLOCK) ? EAGAIN : XFS_ERROR(ENOMEM); if (XFS_BUF_GETERROR(bp) != 0) { @@ -385,14 +386,14 @@ xfs_trans_read_buf( } /* - * We always specify the BUF_BUSY flag within a transaction so - * that get_buf does not try to push out a delayed write buffer + * We always specify the XBF_DONT_BLOCK flag within a transaction + * so that get_buf does not try to push out a delayed write buffer * which might cause another transaction to take place (if the * buffer was delayed alloc). Such recursive transactions can * easily deadlock with our current transaction as well as cause * us to run out of stack space. */ - bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY); + bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK); if (bp == NULL) { *bpp = NULL; return 0; @@ -472,8 +473,8 @@ shutdown_abort: if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); #endif - ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) != - (XFS_B_STALE|XFS_B_DELWRI)); + ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) != + (XBF_STALE|XBF_DELWRI)); trace_xfs_trans_read_buf_shut(bp, _RET_IP_); xfs_buf_relse(bp); diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h index d725428c9df..b09904555d0 100644 --- a/fs/xfs/xfs_types.h +++ b/fs/xfs/xfs_types.h @@ -151,8 +151,8 @@ typedef enum { } xfs_btnum_t; struct xfs_name { - const char *name; - int len; + const unsigned char *name; + int len; }; #endif /* __XFS_TYPES_H__ */ diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 6f268756bf3..ddd2c5d1b85 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -256,7 +256,7 @@ xfs_setattr( iattr->ia_size > ip->i_d.di_size) { code = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, - XFS_B_ASYNC, FI_NONE); + XBF_ASYNC, FI_NONE); } /* wait for all I/O to complete */ @@ -597,7 +597,7 @@ xfs_fsync( { xfs_trans_t *tp; int error = 0; - int log_flushed = 0, changed = 1; + int log_flushed = 0; xfs_itrace_entry(ip); @@ -627,19 +627,16 @@ xfs_fsync( * disk yet, the inode will be still be pinned. If it is, * force the log. */ - xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (xfs_ipincount(ip)) { - error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0, - XFS_LOG_FORCE | XFS_LOG_SYNC, - &log_flushed); - } else { - /* - * If the inode is not pinned and nothing has changed - * we don't need to flush the cache. - */ - changed = 0; + if (ip->i_itemp->ili_last_lsn) { + error = _xfs_log_force_lsn(ip->i_mount, + ip->i_itemp->ili_last_lsn, + XFS_LOG_SYNC, &log_flushed); + } else { + error = _xfs_log_force(ip->i_mount, + XFS_LOG_SYNC, &log_flushed); + } } } else { /* @@ -674,7 +671,7 @@ xfs_fsync( xfs_iunlock(ip, XFS_ILOCK_EXCL); } - if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) { + if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { /* * If the log write didn't issue an ordered tag we need * to flush the disk cache for the data device now. @@ -1096,7 +1093,7 @@ xfs_release( */ truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) - xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); + xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); } if (ip->i_d.di_nlink != 0) { @@ -2199,7 +2196,8 @@ xfs_symlink( if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) { error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp, DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, - link_name->name, target_path, 0, 0, 0); + link_name->name, + (unsigned char *)target_path, 0, 0, 0); if (error) return error; } @@ -2395,7 +2393,8 @@ std_return: dp, DM_RIGHT_NULL, error ? NULL : ip, DM_RIGHT_NULL, link_name->name, - target_path, 0, error, 0); + (unsigned char *)target_path, + 0, error, 0); } if (!error) diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 167a467403a..774f40729ca 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -43,11 +43,11 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd, int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, struct xfs_inode *src_ip, struct xfs_inode *target_dp, struct xfs_name *target_name, struct xfs_inode *target_ip); -int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value, - int *valuelenp, int flags); -int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value, - int valuelen, int flags); -int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags); +int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name, + unsigned char *value, int *valuelenp, int flags); +int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name, + unsigned char *value, int valuelen, int flags); +int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags); int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize, int flags, struct attrlist_cursor_kern *cursor); ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb, |