diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/block_dev.c | 21 | ||||
-rw-r--r-- | fs/cifs/cifssmb.c | 3 | ||||
-rw-r--r-- | fs/exec.c | 10 | ||||
-rw-r--r-- | fs/exportfs/expfs.c | 4 | ||||
-rw-r--r-- | fs/ext4/balloc.c | 4 | ||||
-rw-r--r-- | fs/fcntl.c | 7 | ||||
-rw-r--r-- | fs/inotify.c | 2 | ||||
-rw-r--r-- | fs/ioctl.c | 12 | ||||
-rw-r--r-- | fs/lockd/host.c | 3 | ||||
-rw-r--r-- | fs/lockd/svc.c | 1 | ||||
-rw-r--r-- | fs/nfsd/nfs4recover.c | 2 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 1 | ||||
-rw-r--r-- | fs/ocfs2/ocfs2_fs.h | 8 | ||||
-rw-r--r-- | fs/ocfs2/xattr.c | 4 | ||||
-rw-r--r-- | fs/proc/base.c | 2 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 4 | ||||
-rw-r--r-- | fs/ubifs/commit.c | 4 | ||||
-rw-r--r-- | fs/ubifs/debug.c | 66 | ||||
-rw-r--r-- | fs/ubifs/dir.c | 5 | ||||
-rw-r--r-- | fs/ubifs/file.c | 91 | ||||
-rw-r--r-- | fs/ubifs/journal.c | 8 | ||||
-rw-r--r-- | fs/ubifs/key.h | 4 | ||||
-rw-r--r-- | fs/ubifs/lpt_commit.c | 2 | ||||
-rw-r--r-- | fs/ubifs/orphan.c | 28 | ||||
-rw-r--r-- | fs/ubifs/recovery.c | 17 | ||||
-rw-r--r-- | fs/ubifs/replay.c | 2 | ||||
-rw-r--r-- | fs/ubifs/sb.c | 9 | ||||
-rw-r--r-- | fs/ubifs/super.c | 70 | ||||
-rw-r--r-- | fs/ubifs/tnc.c | 12 | ||||
-rw-r--r-- | fs/ubifs/ubifs.h | 12 | ||||
-rw-r--r-- | fs/xfs/xfs_rename.c | 2 |
31 files changed, 289 insertions, 131 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index db831efbdbb..99e0ae1a4c7 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1135,12 +1135,15 @@ static int blkdev_open(struct inode * inode, struct file * filp) if (res) return res; - if (!(filp->f_mode & FMODE_EXCL)) - return 0; + if (filp->f_mode & FMODE_EXCL) { + res = bd_claim(bdev, filp); + if (res) + goto out_blkdev_put; + } - if (!(res = bd_claim(bdev, filp))) - return 0; + return 0; + out_blkdev_put: blkdev_put(bdev, filp->f_mode); return res; } @@ -1203,8 +1206,16 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct block_device *bdev = I_BDEV(file->f_mapping->host); fmode_t mode = file->f_mode; + + /* + * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have + * to updated it before every ioctl. + */ if (file->f_flags & O_NDELAY) - mode |= FMODE_NDELAY_NOW; + mode |= FMODE_NDELAY; + else + mode &= ~FMODE_NDELAY; + return blkdev_ioctl(bdev, mode, cmd, arg); } diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 2af8626ced4..6d51696dc76 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -3983,7 +3983,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, node->flags = le16_to_cpu(pSMBr->DFSFlags); if (is_unicode) { - __le16 *tmp = kmalloc(strlen(searchName)*2, GFP_KERNEL); + __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, + GFP_KERNEL); cifsConvertToUCS((__le16 *) tmp, searchName, PATH_MAX, nls_codepage, remap); node->path_consumed = hostlen_fromUCS(tmp, diff --git a/fs/exec.c b/fs/exec.c index 4e834f16d9d..ec5df9a3831 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1159,6 +1159,7 @@ EXPORT_SYMBOL(remove_arg_zero); */ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) { + unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; #ifdef __alpha__ @@ -1219,8 +1220,15 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) continue; read_unlock(&binfmt_lock); retval = fn(bprm, regs); + /* + * Restore the depth counter to its starting value + * in this call, so we don't have to rely on every + * load_binary function to restore it on return. + */ + bprm->recursion_depth = depth; if (retval >= 0) { - tracehook_report_exec(fmt, bprm, regs); + if (depth == 0) + tracehook_report_exec(fmt, bprm, regs); put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 80246bad1b7..890e0182881 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -367,6 +367,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, * Try to get any dentry for the given file handle from the filesystem. */ result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type); + if (!result) + result = ERR_PTR(-ESTALE); if (IS_ERR(result)) return result; @@ -420,6 +422,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, target_dir = nop->fh_to_parent(mnt->mnt_sb, fid, fh_len, fileid_type); + if (!target_dir) + goto err_result; err = PTR_ERR(target_dir); if (IS_ERR(target_dir)) goto err_result; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d2003cdc36a..db35cfdb3c8 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -609,8 +609,8 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) if (free_blocks - (nblocks + root_blocks + dirty_blocks) < EXT4_FREEBLOCKS_WATERMARK) { - free_blocks = percpu_counter_sum(fbc); - dirty_blocks = percpu_counter_sum(dbc); + free_blocks = percpu_counter_sum_positive(fbc); + dirty_blocks = percpu_counter_sum_positive(dbc); if (dirty_blocks < 0) { printk(KERN_CRIT "Dirty block accounting " "went wrong %lld\n", diff --git a/fs/fcntl.c b/fs/fcntl.c index ac4f7db9f13..549daf8005f 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -19,6 +19,7 @@ #include <linux/signal.h> #include <linux/rcupdate.h> #include <linux/pid_namespace.h> +#include <linux/smp_lock.h> #include <asm/poll.h> #include <asm/siginfo.h> @@ -175,6 +176,11 @@ static int setfl(int fd, struct file * filp, unsigned long arg) if (error) return error; + /* + * We still need a lock here for now to keep multiple FASYNC calls + * from racing with each other. + */ + lock_kernel(); if ((arg ^ filp->f_flags) & FASYNC) { if (filp->f_op && filp->f_op->fasync) { error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); @@ -185,6 +191,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg) filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); out: + unlock_kernel(); return error; } diff --git a/fs/inotify.c b/fs/inotify.c index 7bbed1b8982..dae3f28f30d 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -428,11 +428,13 @@ void inotify_unmount_inodes(struct list_head *list) watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_handle *ih= watch->ih; + get_inotify_watch(watch); mutex_lock(&ih->mutex); ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, NULL, NULL); inotify_remove_watch_locked(ih, watch); mutex_unlock(&ih->mutex); + put_inotify_watch(watch); } mutex_unlock(&inode->inotify_mutex); iput(inode); diff --git a/fs/ioctl.c b/fs/ioctl.c index d152856c371..43e8b2c0664 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -400,11 +400,9 @@ static int ioctl_fioasync(unsigned int fd, struct file *filp, /* Did FASYNC state change ? */ if ((flag ^ filp->f_flags) & FASYNC) { - if (filp->f_op && filp->f_op->fasync) { - lock_kernel(); + if (filp->f_op && filp->f_op->fasync) error = filp->f_op->fasync(fd, filp, on); - unlock_kernel(); - } else + else error = -ENOTTY; } if (error) @@ -440,11 +438,17 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, break; case FIONBIO: + /* BKL needed to avoid races tweaking f_flags */ + lock_kernel(); error = ioctl_fionbio(filp, argp); + unlock_kernel(); break; case FIOASYNC: + /* BKL needed to avoid races tweaking f_flags */ + lock_kernel(); error = ioctl_fioasync(fd, filp, argp); + unlock_kernel(); break; case FIOQSIZE: diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 9fd8889097b..70fc63a1727 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -167,7 +167,8 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) continue; if (host->h_server != ni->server) continue; - if (!nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) + if (ni->server && + !nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) continue; /* Move to head of hash chain. */ diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index c631a83931c..56b076736b5 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -181,6 +181,7 @@ lockd(void *vrqstp) } flush_signals(current); cancel_delayed_work_sync(&grace_period_end); + locks_end_grace(&lockd_manager); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index bb93946ace2..b79ec930d9f 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -225,12 +225,12 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) return 0; nfs4_save_user(&uid, &gid); + INIT_LIST_HEAD(dentries); filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY); status = PTR_ERR(filp); if (IS_ERR(filp)) goto out; - INIT_LIST_HEAD(dentries); status = vfs_readdir(filp, nfsd4_build_dentrylist, &dla); fput(filp); while (!list_empty(dentries)) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b0bebc552a1..1a052ac2bde 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3261,6 +3261,7 @@ nfs4_state_shutdown(void) { cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work); destroy_workqueue(laundry_wq); + locks_end_grace(&nfsd4_manager); nfs4_lock_state(); nfs4_release_reclaim(); __nfs4_state_shutdown(); diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 5f180cf7abb..5e0c0d0aef7 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -86,7 +86,8 @@ #define OCFS2_CLEAR_INCOMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_incompat &= ~(mask) -#define OCFS2_FEATURE_COMPAT_SUPP OCFS2_FEATURE_COMPAT_BACKUP_SB +#define OCFS2_FEATURE_COMPAT_SUPP (OCFS2_FEATURE_COMPAT_BACKUP_SB \ + | OCFS2_FEATURE_COMPAT_JBD2_SB) #define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \ | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \ | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \ @@ -153,6 +154,11 @@ #define OCFS2_FEATURE_COMPAT_BACKUP_SB 0x0001 /* + * The filesystem will correctly handle journal feature bits. + */ +#define OCFS2_FEATURE_COMPAT_JBD2_SB 0x0002 + +/* * Unwritten extents support. */ #define OCFS2_FEATURE_RO_COMPAT_UNWRITTEN 0x0001 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 054e2efb0b7..74d7367ade1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2645,9 +2645,9 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, return ret; } - i = xs->here - old_xh->xh_entries; - xs->here = &xs->header->xh_entries[i]; } + i = xs->here - old_xh->xh_entries; + xs->here = &xs->header->xh_entries[i]; } return ret; diff --git a/fs/proc/base.c b/fs/proc/base.c index 486cf3fe713..d4677603c88 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -371,7 +371,7 @@ static int lstats_show_proc(struct seq_file *m, void *v) task->latency_record[i].time, task->latency_record[i].max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_NAME_LEN]; + char sym[KSYM_SYMBOL_LEN]; char *c; if (!task->latency_record[i].backtrace[q]) break; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b770c095e45..3a8bdd7f575 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -557,9 +557,9 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); } -static unsigned long pte_to_pagemap_entry(pte_t pte) +static u64 pte_to_pagemap_entry(pte_t pte) { - unsigned long pme = 0; + u64 pme = 0; if (is_swap_pte(pte)) pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 0a6aa2cc78f..b49884c8c10 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -234,8 +234,8 @@ int ubifs_bg_thread(void *info) int err; struct ubifs_info *c = info; - ubifs_msg("background thread \"%s\" started, PID %d", - c->bgt_name, current->pid); + dbg_msg("background thread \"%s\" started, PID %d", + c->bgt_name, current->pid); set_freezable(); while (1) { diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 7186400750e..510ffa0bbda 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -101,21 +101,24 @@ static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key, if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { switch (type) { case UBIFS_INO_KEY: - sprintf(p, "(%lu, %s)", key_inum(c, key), + sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key), get_key_type(type)); break; case UBIFS_DENT_KEY: case UBIFS_XENT_KEY: - sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key), + sprintf(p, "(%lu, %s, %#08x)", + (unsigned long)key_inum(c, key), get_key_type(type), key_hash(c, key)); break; case UBIFS_DATA_KEY: - sprintf(p, "(%lu, %s, %u)", key_inum(c, key), + sprintf(p, "(%lu, %s, %u)", + (unsigned long)key_inum(c, key), get_key_type(type), key_block(c, key)); break; case UBIFS_TRUN_KEY: sprintf(p, "(%lu, %s)", - key_inum(c, key), get_key_type(type)); + (unsigned long)key_inum(c, key), + get_key_type(type)); break; default: sprintf(p, "(bad key type: %#08x, %#08x)", @@ -364,8 +367,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) le32_to_cpu(mst->ihead_lnum)); printk(KERN_DEBUG "\tihead_offs %u\n", le32_to_cpu(mst->ihead_offs)); - printk(KERN_DEBUG "\tindex_size %u\n", - le32_to_cpu(mst->index_size)); + printk(KERN_DEBUG "\tindex_size %llu\n", + (unsigned long long)le64_to_cpu(mst->index_size)); printk(KERN_DEBUG "\tlpt_lnum %u\n", le32_to_cpu(mst->lpt_lnum)); printk(KERN_DEBUG "\tlpt_offs %u\n", @@ -1589,7 +1592,7 @@ static struct fsck_inode *add_inode(struct ubifs_info *c, if (inum > c->highest_inum) { ubifs_err("too high inode number, max. is %lu", - c->highest_inum); + (unsigned long)c->highest_inum); return ERR_PTR(-EINVAL); } @@ -1668,16 +1671,18 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, ino_key_init(c, &key, inum); err = ubifs_lookup_level0(c, &key, &znode, &n); if (!err) { - ubifs_err("inode %lu not found in index", inum); + ubifs_err("inode %lu not found in index", (unsigned long)inum); return ERR_PTR(-ENOENT); } else if (err < 0) { - ubifs_err("error %d while looking up inode %lu", err, inum); + ubifs_err("error %d while looking up inode %lu", + err, (unsigned long)inum); return ERR_PTR(err); } zbr = &znode->zbranch[n]; if (zbr->len < UBIFS_INO_NODE_SZ) { - ubifs_err("bad node %lu node length %d", inum, zbr->len); + ubifs_err("bad node %lu node length %d", + (unsigned long)inum, zbr->len); return ERR_PTR(-EINVAL); } @@ -1697,7 +1702,7 @@ static struct fsck_inode *read_add_inode(struct ubifs_info *c, kfree(ino); if (IS_ERR(fscki)) { ubifs_err("error %ld while adding inode %lu node", - PTR_ERR(fscki), inum); + PTR_ERR(fscki), (unsigned long)inum); return fscki; } @@ -1786,7 +1791,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (IS_ERR(fscki)) { err = PTR_ERR(fscki); ubifs_err("error %d while processing data node and " - "trying to find inode node %lu", err, inum); + "trying to find inode node %lu", + err, (unsigned long)inum); goto out_dump; } @@ -1819,7 +1825,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (IS_ERR(fscki)) { err = PTR_ERR(fscki); ubifs_err("error %d while processing entry node and " - "trying to find inode node %lu", err, inum); + "trying to find inode node %lu", + err, (unsigned long)inum); goto out_dump; } @@ -1832,7 +1839,7 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, err = PTR_ERR(fscki); ubifs_err("error %d while processing entry node and " "trying to find parent inode node %lu", - err, inum); + err, (unsigned long)inum); goto out_dump; } @@ -1923,7 +1930,8 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) fscki->references != 1) { ubifs_err("directory inode %lu has %d " "direntries which refer it, but " - "should be 1", fscki->inum, + "should be 1", + (unsigned long)fscki->inum, fscki->references); goto out_dump; } @@ -1931,27 +1939,29 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) fscki->references != 0) { ubifs_err("root inode %lu has non-zero (%d) " "direntries which refer it", - fscki->inum, fscki->references); + (unsigned long)fscki->inum, + fscki->references); goto out_dump; } if (fscki->calc_sz != fscki->size) { ubifs_err("directory inode %lu size is %lld, " "but calculated size is %lld", - fscki->inum, fscki->size, - fscki->calc_sz); + (unsigned long)fscki->inum, + fscki->size, fscki->calc_sz); goto out_dump; } if (fscki->calc_cnt != fscki->nlink) { ubifs_err("directory inode %lu nlink is %d, " "but calculated nlink is %d", - fscki->inum, fscki->nlink, - fscki->calc_cnt); + (unsigned long)fscki->inum, + fscki->nlink, fscki->calc_cnt); goto out_dump; } } else { if (fscki->references != fscki->nlink) { ubifs_err("inode %lu nlink is %d, but " - "calculated nlink is %d", fscki->inum, + "calculated nlink is %d", + (unsigned long)fscki->inum, fscki->nlink, fscki->references); goto out_dump; } @@ -1959,20 +1969,21 @@ static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd) if (fscki->xattr_sz != fscki->calc_xsz) { ubifs_err("inode %lu has xattr size %u, but " "calculated size is %lld", - fscki->inum, fscki->xattr_sz, + (unsigned long)fscki->inum, fscki->xattr_sz, fscki->calc_xsz); goto out_dump; } if (fscki->xattr_cnt != fscki->calc_xcnt) { ubifs_err("inode %lu has %u xattrs, but " - "calculated count is %lld", fscki->inum, + "calculated count is %lld", + (unsigned long)fscki->inum, fscki->xattr_cnt, fscki->calc_xcnt); goto out_dump; } if (fscki->xattr_nms != fscki->calc_xnms) { ubifs_err("inode %lu has xattr names' size %u, but " "calculated names' size is %lld", - fscki->inum, fscki->xattr_nms, + (unsigned long)fscki->inum, fscki->xattr_nms, fscki->calc_xnms); goto out_dump; } @@ -1985,11 +1996,12 @@ out_dump: ino_key_init(c, &key, fscki->inum); err = ubifs_lookup_level0(c, &key, &znode, &n); if (!err) { - ubifs_err("inode %lu not found in index", fscki->inum); + ubifs_err("inode %lu not found in index", + (unsigned long)fscki->inum); return -ENOENT; } else if (err < 0) { ubifs_err("error %d while looking up inode %lu", - err, fscki->inum); + err, (unsigned long)fscki->inum); return err; } @@ -2007,7 +2019,7 @@ out_dump: } ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", - fscki->inum, zbr->lnum, zbr->offs); + (unsigned long)fscki->inum, zbr->lnum, zbr->offs); dbg_dump_node(c, ino); kfree(ino); return -EINVAL; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 526c01ec800..0422c98e179 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -161,7 +161,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, return ERR_PTR(-EINVAL); } ubifs_warn("running out of inode numbers (current %lu, max %d)", - c->highest_inum, INUM_WATERMARK); + (unsigned long)c->highest_inum, INUM_WATERMARK); } inode->i_ino = ++c->highest_inum; @@ -428,7 +428,8 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) dbg_gen("feed '%s', ino %llu, new f_pos %#x", dent->name, (unsigned long long)le64_to_cpu(dent->inum), key_hash_flash(c, &dent->key)); - ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); + ubifs_assert(le64_to_cpu(dent->ch.sqnum) > + ubifs_inode(dir)->creat_sqnum); nm.len = le16_to_cpu(dent->nlen); over = filldir(dirent, dent->name, nm.len, file->f_pos, diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 51cf511d44d..2624411d975 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -72,7 +72,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return err; } - ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum); + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) @@ -626,7 +626,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, dn = bu->buf + (bu->zbranch[nn].offs - offs); - ubifs_assert(dn->ch.sqnum > + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); @@ -691,32 +691,22 @@ out_err: /** * ubifs_do_bulk_read - do bulk-read. * @c: UBIFS file-system description object - * @page1: first page + * @bu: bulk-read information + * @page1: first page to read * * This function returns %1 if the bulk-read is done, otherwise %0 is returned. */ -static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) +static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, + struct page *page1) { pgoff_t offset = page1->index, end_index; struct address_space *mapping = page1->mapping; struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); - struct bu_info *bu; int err, page_idx, page_cnt, ret = 0, n = 0; + int allocate = bu->buf ? 0 : 1; loff_t isize; - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); - if (!bu) - return 0; - - bu->buf_len = c->bulk_read_buf_size; - bu->buf = kmalloc(bu->buf_len, GFP_NOFS); - if (!bu->buf) - goto out_free; - - data_key_init(c, &bu->key, inode->i_ino, - offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); - err = ubifs_tnc_get_bu_keys(c, bu); if (err) goto out_warn; @@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) * together. If all the pages were like this, bulk-read would * reduce performance, so we turn it off for a while. */ - ui->read_in_a_row = 0; - ui->bulk_read = 0; - goto out_free; + goto out_bu_off; } if (bu->cnt) { + if (allocate) { + /* + * Allocate bulk-read buffer depending on how many data + * nodes we are going to read. + */ + bu->buf_len = bu->zbranch[bu->cnt - 1].offs + + bu->zbranch[bu->cnt - 1].len - + bu->zbranch[0].offs; + ubifs_assert(bu->buf_len > 0); + ubifs_assert(bu->buf_len <= c->leb_size); + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); + if (!bu->buf) + goto out_bu_off; + } + err = ubifs_tnc_bulk_read(c, bu); if (err) goto out_warn; @@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) ui->last_page_read = offset + page_idx - 1; out_free: - kfree(bu->buf); - kfree(bu); + if (allocate) + kfree(bu->buf); return ret; out_warn: ubifs_warn("ignoring error %d and skipping bulk-read", err); goto out_free; + +out_bu_off: + ui->read_in_a_row = ui->bulk_read = 0; + goto out_free; } /** @@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page) struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = page->index, last_page_read = ui->last_page_read; - int ret = 0; + struct bu_info *bu; + int err = 0, allocated = 0; ui->last_page_read = index; - if (!c->bulk_read) return 0; + /* - * Bulk-read is protected by ui_mutex, but it is an optimization, so - * don't bother if we cannot lock the mutex. + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, + * so don't bother if we cannot lock the mutex. */ if (!mutex_trylock(&ui->ui_mutex)) return 0; + if (index != last_page_read + 1) { /* Turn off bulk-read if we stop reading sequentially */ ui->read_in_a_row = 1; @@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page) ui->bulk_read = 0; goto out_unlock; } + if (!ui->bulk_read) { ui->read_in_a_row += 1; if (ui->read_in_a_row < 3) @@ -829,10 +839,35 @@ static int ubifs_bulk_read(struct page *page) /* Three reads in a row, so switch on bulk-read */ ui->bulk_read = 1; } - ret = ubifs_do_bulk_read(c, page); + + /* + * If possible, try to use pre-allocated bulk-read information, which + * is protected by @c->bu_mutex. + */ + if (mutex_trylock(&c->bu_mutex)) + bu = &c->bu; + else { + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); + if (!bu) + goto out_unlock; + + bu->buf = NULL; + allocated = 1; + } + + bu->buf_len = c->max_bu_buf_len; + data_key_init(c, &bu->key, inode->i_ino, + page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); + err = ubifs_do_bulk_read(c, bu, page); + + if (!allocated) + mutex_unlock(&c->bu_mutex); + else + kfree(bu); + out_unlock: mutex_unlock(&ui->ui_mutex); - return ret; + return err; } static int ubifs_readpage(struct file *file, struct page *page) diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 22993f867d1..f91b745908e 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -690,8 +690,9 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; struct ubifs_inode *ui = ubifs_inode(inode); - dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key), - key_block(c, key), len, DBGKEY(key)); + dbg_jnl("ino %lu, blk %u, len %d, key %s", + (unsigned long)key_inum(c, key), key_block(c, key), len, + DBGKEY(key)); ubifs_assert(len <= UBIFS_BLOCK_SIZE); data = kmalloc(dlen, GFP_NOFS); @@ -1128,7 +1129,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, ino_t inum = inode->i_ino; unsigned int blk; - dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size); + dbg_jnl("ino %lu, size %lld -> %lld", + (unsigned long)inum, old_size, new_size); ubifs_assert(!ui->data_len); ubifs_assert(S_ISREG(inode->i_mode)); ubifs_assert(mutex_is_locked(&ui->ui_mutex)); diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 9ee65086f62..3f1f16bc25c 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h @@ -345,7 +345,7 @@ static inline int key_type_flash(const struct ubifs_info *c, const void *k) { const union ubifs_key *key = k; - return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS; + return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS; } /** @@ -416,7 +416,7 @@ static inline unsigned int key_block_flash(const struct ubifs_info *c, { const union ubifs_key *key = k; - return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK; + return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK; } /** diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index eed5a0025d6..a41434b4278 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -571,8 +571,6 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c, /* We assume here that LEB zero is never an LPT LEB */ if (nnode->nbranch[iip].lnum) return ubifs_get_pnode(c, nnode, iip); - else - return NULL; } /* Go up while can't go right */ diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 02d3462f4d3..9bd5a43d452 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -105,7 +105,7 @@ int ubifs_add_orphan(struct ubifs_info *c, ino_t inum) list_add_tail(&orphan->list, &c->orph_list); list_add_tail(&orphan->new_list, &c->orph_new); spin_unlock(&c->orphan_lock); - dbg_gen("ino %lu", inum); + dbg_gen("ino %lu", (unsigned long)inum); return 0; } @@ -132,14 +132,16 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) else { if (o->dnext) { spin_unlock(&c->orphan_lock); - dbg_gen("deleted twice ino %lu", inum); + dbg_gen("deleted twice ino %lu", + (unsigned long)inum); return; } if (o->cnext) { o->dnext = c->orph_dnext; c->orph_dnext = o; spin_unlock(&c->orphan_lock); - dbg_gen("delete later ino %lu", inum); + dbg_gen("delete later ino %lu", + (unsigned long)inum); return; } rb_erase(p, &c->orph_tree); @@ -151,12 +153,12 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum) } spin_unlock(&c->orphan_lock); kfree(o); - dbg_gen("inum %lu", inum); + dbg_gen("inum %lu", (unsigned long)inum); return; } } spin_unlock(&c->orphan_lock); - dbg_err("missing orphan ino %lu", inum); + dbg_err("missing orphan ino %lu", (unsigned long)inum); dbg_dump_stack(); } @@ -448,7 +450,7 @@ static void erase_deleted(struct ubifs_info *c) rb_erase(&orphan->rb, &c->orph_tree); list_del(&orphan->list); c->tot_orphans -= 1; - dbg_gen("deleting orphan ino %lu", orphan->inum); + dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum); kfree(orphan); } c->orph_dnext = NULL; @@ -536,8 +538,8 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum) list_add_tail(&orphan->list, &c->orph_list); orphan->dnext = c->orph_dnext; c->orph_dnext = orphan; - dbg_mnt("ino %lu, new %d, tot %d", - inum, c->new_orphans, c->tot_orphans); + dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum, + c->new_orphans, c->tot_orphans); return 0; } @@ -609,7 +611,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; for (i = 0; i < n; i++) { inum = le64_to_cpu(orph->inos[i]); - dbg_rcvry("deleting orphaned inode %lu", inum); + dbg_rcvry("deleting orphaned inode %lu", + (unsigned long)inum); err = ubifs_tnc_remove_ino(c, inum); if (err) return err; @@ -840,8 +843,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, if (inum != ci->last_ino) { /* Lowest node type is the inode node, so it comes first */ if (key_type(c, &zbr->key) != UBIFS_INO_KEY) - ubifs_err("found orphan node ino %lu, type %d", inum, - key_type(c, &zbr->key)); + ubifs_err("found orphan node ino %lu, type %d", + (unsigned long)inum, key_type(c, &zbr->key)); ci->last_ino = inum; ci->tot_inos += 1; err = ubifs_tnc_read_node(c, zbr, ci->node); @@ -853,7 +856,8 @@ static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr, /* Must be recorded as an orphan */ if (!dbg_find_check_orphan(&ci->root, inum) && !dbg_find_orphan(c, inum)) { - ubifs_err("missing orphan, ino %lu", inum); + ubifs_err("missing orphan, ino %lu", + (unsigned long)inum); ci->missing += 1; } } diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 77d26c141cf..90acac603e6 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -168,12 +168,12 @@ static int write_rcvrd_mst_node(struct ubifs_info *c, struct ubifs_mst_node *mst) { int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; - uint32_t save_flags; + __le32 save_flags; dbg_rcvry("recovery"); save_flags = mst->flags; - mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY); + mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); @@ -1435,13 +1435,13 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); if (err) goto out; - dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs, - i_size, e->d_size); + dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", + (unsigned long)e->inum, lnum, offs, i_size, e->d_size); return 0; out: ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", - e->inum, e->i_size, e->d_size, err); + (unsigned long)e->inum, e->i_size, e->d_size, err); return err; } @@ -1472,7 +1472,8 @@ int ubifs_recover_size(struct ubifs_info *c) return err; if (err == -ENOENT) { /* Remove data nodes that have no inode */ - dbg_rcvry("removing ino %lu", e->inum); + dbg_rcvry("removing ino %lu", + (unsigned long)e->inum); err = ubifs_tnc_remove_ino(c, e->inum); if (err) return err; @@ -1493,8 +1494,8 @@ int ubifs_recover_size(struct ubifs_info *c) return PTR_ERR(inode); if (inode->i_size < e->d_size) { dbg_rcvry("ino %lu size %lld -> %lld", - e->inum, e->d_size, - inode->i_size); + (unsigned long)e->inum, + e->d_size, inode->i_size); inode->i_size = e->d_size; ubifs_inode(inode)->ui_size = e->d_size; e->inode = inode; diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 7399692af85..21f7d047c30 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -1065,7 +1065,7 @@ int ubifs_replay_journal(struct ubifs_info *c) ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, - c->highest_inum); + (unsigned long)c->highest_inum); out: destroy_replay_tree(c); destroy_bud_list(c); diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 2bf753b3888..0f392351dc5 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -81,6 +81,7 @@ static int create_default_filesystem(struct ubifs_info *c) int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; int min_leb_cnt = UBIFS_MIN_LEB_CNT; uint64_t tmp64, main_bytes; + __le64 tmp_le64; /* Some functions called from here depend on the @c->key_len filed */ c->key_len = UBIFS_SK_LEN; @@ -295,10 +296,10 @@ static int create_default_filesystem(struct ubifs_info *c) ino->ch.node_type = UBIFS_INO_NODE; ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); ino->nlink = cpu_to_le32(2); - tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); - ino->atime_sec = tmp; - ino->ctime_sec = tmp; - ino->mtime_sec = tmp; + tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); + ino->atime_sec = tmp_le64; + ino->ctime_sec = tmp_le64; + ino->mtime_sec = tmp_le64; ino->atime_nsec = 0; ino->ctime_nsec = 0; ino->mtime_nsec = 0; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8780efbf40a..d80b2aef42b 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -36,6 +36,12 @@ #include <linux/mount.h> #include "ubifs.h" +/* + * Maximum amount of memory we may 'kmalloc()' without worrying that we are + * allocating too much. + */ +#define UBIFS_KMALLOC_OK (128*1024) + /* Slab cache for UBIFS inodes */ struct kmem_cache *ubifs_inode_slab; @@ -561,18 +567,11 @@ static int init_constants_early(struct ubifs_info *c) * calculations when reporting free space. */ c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; - /* Buffer size for bulk-reads */ - c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; - if (c->bulk_read_buf_size > c->leb_size) - c->bulk_read_buf_size = c->leb_size; - if (c->bulk_read_buf_size > 128 * 1024) { - /* Check if we can kmalloc more than 128KiB */ - void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); - kfree(try); - if (!try) - c->bulk_read_buf_size = 128 * 1024; - } + /* Buffer size for bulk-reads */ + c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; + if (c->max_bu_buf_len > c->leb_size) + c->max_bu_buf_len = c->leb_size; return 0; } @@ -992,6 +991,34 @@ static void destroy_journal(struct ubifs_info *c) } /** + * bu_init - initialize bulk-read information. + * @c: UBIFS file-system description object + */ +static void bu_init(struct ubifs_info *c) +{ + ubifs_assert(c->bulk_read == 1); + + if (c->bu.buf) + return; /* Already initialized */ + +again: + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); + if (!c->bu.buf) { + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { + c->max_bu_buf_len = UBIFS_KMALLOC_OK; + goto again; + } + + /* Just disable bulk-read */ + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " + "disabling it", c->max_bu_buf_len); + c->mount_opts.bulk_read = 1; + c->bulk_read = 0; + return; + } +} + +/** * mount_ubifs - mount UBIFS file-system. * @c: UBIFS file-system description object * @@ -1059,6 +1086,13 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } + if (c->bulk_read == 1) + bu_init(c); + + /* + * We have to check all CRCs, even for data nodes, when we mount the FS + * (specifically, when we are replaying). + */ c->always_chk_crc = 1; err = ubifs_read_superblock(c); @@ -1289,6 +1323,7 @@ out_cbuf: out_dereg: dbg_failure_mode_deregistration(c); out_free: + kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); @@ -1325,10 +1360,11 @@ static void ubifs_umount(struct ubifs_info *c) kfree(c->cbuf); kfree(c->rcvrd_mst_node); kfree(c->mst_node); + kfree(c->bu.buf); + vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); UBIFS_DBG(vfree(c->dbg_buf)); - vfree(c->ileb_buf); dbg_failure_mode_deregistration(c); } @@ -1626,6 +1662,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) ubifs_err("invalid or unknown remount parameter"); return err; } + if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { err = ubifs_remount_rw(c); if (err) @@ -1633,6 +1670,14 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) ubifs_remount_ro(c); + if (c->bulk_read == 1) + bu_init(c); + else { + dbg_gen("disable bulk-read"); + kfree(c->bu.buf); + c->bu.buf = NULL; + } + return 0; } @@ -1723,6 +1768,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) mutex_init(&c->log_mutex); mutex_init(&c->mst_mutex); mutex_init(&c->umount_mutex); + mutex_init(&c->bu_mutex); init_waitqueue_head(&c->cmt_wq); c->buds = RB_ROOT; c->old_idx = RB_ROOT; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index d27fd918b9c..6eef5344a14 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1501,7 +1501,12 @@ out: * @bu: bulk-read parameters and results * * Lookup consecutive data node keys for the same inode that reside - * consecutively in the same LEB. + * consecutively in the same LEB. This function returns zero in case of success + * and a negative error code in case of failure. + * + * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function + * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares + * maxumum possible amount of nodes for bulk-read. */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { @@ -2677,7 +2682,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) struct ubifs_dent_node *xent, *pxent = NULL; struct qstr nm = { .name = NULL }; - dbg_tnc("ino %lu", inum); + dbg_tnc("ino %lu", (unsigned long)inum); /* * Walk all extended attribute entries and remove them together with @@ -2697,7 +2702,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) } xattr_inum = le64_to_cpu(xent->inum); - dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum); + dbg_tnc("xent '%s', ino %lu", xent->name, + (unsigned long)xattr_inum); nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a7bd32fa15b..46b172560a0 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -753,7 +753,7 @@ struct ubifs_znode { }; /** - * struct bu_info - bulk-read information + * struct bu_info - bulk-read information. * @key: first data node key * @zbranch: zbranches of data nodes to bulk read * @buf: buffer to read into @@ -969,7 +969,10 @@ struct ubifs_mount_opts { * @mst_node: master node * @mst_offs: offset of valid master node * @mst_mutex: protects the master node area, @mst_node, and @mst_offs - * @bulk_read_buf_size: buffer size for bulk-reads + * + * @max_bu_buf_len: maximum bulk-read buffer length + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu + * @bu: pre-allocated bulk-read information * * @log_lebs: number of logical eraseblocks in the log * @log_bytes: log size in bytes @@ -1217,7 +1220,10 @@ struct ubifs_info { struct ubifs_mst_node *mst_node; int mst_offs; struct mutex mst_mutex; - int bulk_read_buf_size; + + int max_bu_buf_len; + struct mutex bu_mutex; + struct bu_info bu; int log_lebs; long long log_bytes; diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index d700dacdb10..c903130be7f 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -212,7 +212,7 @@ xfs_rename( if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { error = XFS_ERROR(EXDEV); - xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + xfs_rename_unlock4(inodes, XFS_ILOCK_EXCL); xfs_trans_cancel(tp, cancel_flags); goto std_return; } |