diff options
Diffstat (limited to 'fs')
36 files changed, 269 insertions, 132 deletions
diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 4f77f3caee9..af6952e39a1 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -19,6 +19,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); static void afs_fl_release_private(struct file_lock *fl); static struct workqueue_struct *afs_lock_manager; +static DEFINE_MUTEX(afs_lock_manager_mutex); static struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, @@ -30,12 +31,20 @@ static struct file_lock_operations afs_lock_ops = { */ static int afs_init_lock_manager(void) { + int ret; + + ret = 0; if (!afs_lock_manager) { - afs_lock_manager = create_singlethread_workqueue("kafs_lockd"); - if (!afs_lock_manager) - return -ENOMEM; + mutex_lock(&afs_lock_manager_mutex); + if (!afs_lock_manager) { + afs_lock_manager = + create_singlethread_workqueue("kafs_lockd"); + if (!afs_lock_manager) + ret = -ENOMEM; + } + mutex_unlock(&afs_lock_manager_mutex); } - return 0; + return ret; } /* @@ -68,6 +77,29 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode) } /* + * grant one or more locks (readlocks are allowed to jump the queue if the + * first lock in the queue is itself a readlock) + * - the caller must hold the vnode lock + */ +static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) +{ + struct file_lock *p, *_p; + + list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); + if (fl->fl_type == F_RDLCK) { + list_for_each_entry_safe(p, _p, &vnode->pending_locks, + fl_u.afs.link) { + if (p->fl_type == F_RDLCK) { + p->fl_u.afs.state = AFS_LOCK_GRANTED; + list_move_tail(&p->fl_u.afs.link, + &vnode->granted_locks); + wake_up(&p->fl_wait); + } + } + } +} + +/* * do work for a lock, including: * - probing for a lock we're waiting on but didn't get immediately * - extending a lock that's close to timing out @@ -172,8 +204,7 @@ void afs_lock_work(struct work_struct *work) struct file_lock, fl_u.afs.link) == fl) { fl->fl_u.afs.state = ret; if (ret == AFS_LOCK_GRANTED) - list_move_tail(&fl->fl_u.afs.link, - &vnode->granted_locks); + afs_grant_locks(vnode, fl); else list_del_init(&fl->fl_u.afs.link); wake_up(&fl->fl_wait); @@ -258,49 +289,50 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) spin_lock(&vnode->lock); - if (list_empty(&vnode->pending_locks)) { - /* if there's no-one else with a lock on this vnode, then we - * need to ask the server for a lock */ - if (list_empty(&vnode->granted_locks)) { - _debug("not locked"); - ASSERTCMP(vnode->flags & - ((1 << AFS_VNODE_LOCKING) | - (1 << AFS_VNODE_READLOCKED) | - (1 << AFS_VNODE_WRITELOCKED)), ==, 0); - list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); - set_bit(AFS_VNODE_LOCKING, &vnode->flags); - spin_unlock(&vnode->lock); + /* if we've already got a readlock on the server then we can instantly + * grant another readlock, irrespective of whether there are any + * pending writelocks */ + if (type == AFS_LOCK_READ && + vnode->flags & (1 << AFS_VNODE_READLOCKED)) { + _debug("instant readlock"); + ASSERTCMP(vnode->flags & + ((1 << AFS_VNODE_LOCKING) | + (1 << AFS_VNODE_WRITELOCKED)), ==, 0); + ASSERT(!list_empty(&vnode->granted_locks)); + goto sharing_existing_lock; + } - ret = afs_vnode_set_lock(vnode, key, type); - clear_bit(AFS_VNODE_LOCKING, &vnode->flags); - switch (ret) { - case 0: - goto acquired_server_lock; - case -EWOULDBLOCK: - spin_lock(&vnode->lock); - ASSERT(list_empty(&vnode->granted_locks)); - ASSERTCMP(vnode->pending_locks.next, ==, - &fl->fl_u.afs.link); - goto wait; - default: - spin_lock(&vnode->lock); - list_del_init(&fl->fl_u.afs.link); - spin_unlock(&vnode->lock); - goto error; - } - } + /* if there's no-one else with a lock on this vnode, then we need to + * ask the server for a lock */ + if (list_empty(&vnode->pending_locks) && + list_empty(&vnode->granted_locks)) { + _debug("not locked"); + ASSERTCMP(vnode->flags & + ((1 << AFS_VNODE_LOCKING) | + (1 << AFS_VNODE_READLOCKED) | + (1 << AFS_VNODE_WRITELOCKED)), ==, 0); + list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); + set_bit(AFS_VNODE_LOCKING, &vnode->flags); + spin_unlock(&vnode->lock); - /* if we've already got a readlock on the server and no waiting - * writelocks, then we might be able to instantly grant another - * readlock */ - if (type == AFS_LOCK_READ && - vnode->flags & (1 << AFS_VNODE_READLOCKED)) { - _debug("instant readlock"); - ASSERTCMP(vnode->flags & - ((1 << AFS_VNODE_LOCKING) | - (1 << AFS_VNODE_WRITELOCKED)), ==, 0); - ASSERT(!list_empty(&vnode->granted_locks)); - goto sharing_existing_lock; + ret = afs_vnode_set_lock(vnode, key, type); + clear_bit(AFS_VNODE_LOCKING, &vnode->flags); + switch (ret) { + case 0: + _debug("acquired"); + goto acquired_server_lock; + case -EWOULDBLOCK: + _debug("would block"); + spin_lock(&vnode->lock); + ASSERT(list_empty(&vnode->granted_locks)); + ASSERTCMP(vnode->pending_locks.next, ==, + &fl->fl_u.afs.link); + goto wait; + default: + spin_lock(&vnode->lock); + list_del_init(&fl->fl_u.afs.link); + spin_unlock(&vnode->lock); + goto error; } } @@ -230,7 +230,7 @@ void bio_put(struct bio *bio) } } -inline int bio_phys_segments(request_queue_t *q, struct bio *bio) +inline int bio_phys_segments(struct request_queue *q, struct bio *bio) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); @@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio) return bio->bi_phys_segments; } -inline int bio_hw_segments(request_queue_t *q, struct bio *bio) +inline int bio_hw_segments(struct request_queue *q, struct bio *bio) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); @@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) */ void __bio_clone(struct bio *bio, struct bio *bio_src) { - request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); + struct request_queue *q = bdev_get_queue(bio_src->bi_bdev); memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec)); @@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) */ int bio_get_nr_vecs(struct block_device *bdev) { - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); int nr_pages; nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev) return nr_pages; } -static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page +static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned short max_sectors) { @@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page * smaller than PAGE_SIZE, so it is always possible to add a single * page to an empty bio. This should only be used by REQ_PC bios. */ -int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, +int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); @@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio) * to/from kernel pages as necessary. Must be paired with * call bio_uncopy_user() on io completion. */ -struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, +struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, unsigned int len, int write_to_vm) { unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -600,7 +600,7 @@ out_bmd: return ERR_PTR(ret); } -static struct bio *__bio_map_user_iov(request_queue_t *q, +static struct bio *__bio_map_user_iov(struct request_queue *q, struct block_device *bdev, struct sg_iovec *iov, int iov_count, int write_to_vm) @@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, /** * bio_map_user - map user address into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @bdev: destination block device * @uaddr: start of user address * @len: length in bytes @@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, +struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, unsigned long uaddr, unsigned int len, int write_to_vm) { struct sg_iovec iov; @@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, /** * bio_map_user_iov - map user sg_iovec table into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @bdev: destination block device * @iov: the iovec. * @iov_count: number of elements in the iovec @@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, * Map the user space address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, +struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, struct sg_iovec *iov, int iov_count, int write_to_vm) { @@ -808,7 +808,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) } -static struct bio *__bio_map_kern(request_queue_t *q, void *data, +static struct bio *__bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; @@ -847,7 +847,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, /** * bio_map_kern - map kernel address into bio - * @q: the request_queue_t for the bio + * @q: the struct request_queue for the bio * @data: pointer to buffer to map * @len: length in bytes * @gfp_mask: allocation flags for bio allocation @@ -855,7 +855,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, * Map the kernel address into a bio suitable for io to a block * device. Returns an error pointer in case of error. */ -struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, +struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) { struct bio *bio; diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index e440a7b95d0..2bc1428d621 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -115,6 +115,10 @@ #include <linux/dvb/video.h> #include <linux/lp.h> +#ifdef CONFIG_SPARC +#include <asm/fbio.h> +#endif + static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *f) { @@ -3493,6 +3497,22 @@ IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32) /* loop */ IGNORE_IOCTL(LOOP_CLR_FD) + +#ifdef CONFIG_SPARC +/* Sparc framebuffers, handled in sbusfb_compat_ioctl() */ +IGNORE_IOCTL(FBIOGTYPE) +IGNORE_IOCTL(FBIOSATTR) +IGNORE_IOCTL(FBIOGATTR) +IGNORE_IOCTL(FBIOSVIDEO) +IGNORE_IOCTL(FBIOGVIDEO) +IGNORE_IOCTL(FBIOSCURPOS) +IGNORE_IOCTL(FBIOGCURPOS) +IGNORE_IOCTL(FBIOGCURMAX) +IGNORE_IOCTL(FBIOPUTCMAP32) +IGNORE_IOCTL(FBIOGETCMAP32) +IGNORE_IOCTL(FBIOSCURSOR32) +IGNORE_IOCTL(FBIOGCURSOR32) +#endif }; #define IOCTL_HASHSIZE 256 diff --git a/fs/dcookies.c b/fs/dcookies.c index c1208f53bd7..792cbf55fa9 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -20,6 +20,7 @@ #include <linux/capability.h> #include <linux/dcache.h> #include <linux/mm.h> +#include <linux/err.h> #include <linux/errno.h> #include <linux/dcookies.h> #include <linux/mutex.h> diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 68579a0ed3f..639a32c3c9c 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -580,7 +580,7 @@ static int ext2_check_descriptors (struct super_block * sb) return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || - le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group > + le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext2_error (sb, "ext2_check_descriptors", diff --git a/fs/ext3/super.c b/fs/ext3/super.c index f0614e3f1fe..22cfdd61c06 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1221,7 +1221,7 @@ static int ext3_check_descriptors (struct super_block * sb) return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || - le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group > + le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext3_error (sb, "ext3_check_descriptors", diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 750c46f7d89..78beb096f57 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1544,7 +1544,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, static void ext4_ext_put_in_cache(struct inode *inode, __u32 block, - __u32 len, __u32 start, int type) + __u32 len, ext4_fsblk_t start, int type) { struct ext4_ext_cache *cex; BUG_ON(len == 0); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 75adbb64e02..4550b83ab1c 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1283,7 +1283,7 @@ static int ext4_check_descriptors (struct super_block * sb) } inode_table = ext4_inode_table(sb, gdp); if (inode_table < first_block || - inode_table + sbi->s_itb_per_group > last_block) + inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_error (sb, "ext4_check_descriptors", "Inode table for group %d" diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index 1a5e8e893d7..77342113011 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -508,7 +508,7 @@ static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl) */ if (!sdp->sd_args.ar_localflocks) return -EINVAL; - return setlease(file, arg, fl); + return generic_setlease(file, arg, fl); } /** diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 95c72aa8186..043b470fd3b 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -846,6 +846,15 @@ root_found: goto out_no_root; if (!inode->i_op) goto out_bad_root; + + /* Make sure the root inode is a directory */ + if (!S_ISDIR(inode->i_mode)) { + printk(KERN_WARNING + "isofs_fill_super: root inode is not a directory. " + "Corrupted media?\n"); + goto out_iput; + } + /* get the root dentry */ s->s_root = d_alloc_root(inode); if (!(s->s_root)) diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index b3efa4536cc..a21e4bc5444 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -335,10 +335,10 @@ static void nlmsvc_freegrantargs(struct nlm_rqst *call) /* * Deferred lock request handling for non-blocking lock */ -static u32 +static __be32 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) { - u32 status = nlm_lck_denied_nolocks; + __be32 status = nlm_lck_denied_nolocks; block->b_flags |= B_QUEUED; @@ -352,7 +352,7 @@ nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) status = nlm_drop_reply; } dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", - block, block->b_flags, status); + block, block->b_flags, ntohl(status)); return status; } diff --git a/fs/locks.c b/fs/locks.c index 31051063724..50857d2d340 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1324,7 +1324,7 @@ int fcntl_getlease(struct file *filp) } /** - * setlease - sets a lease on an open file + * generic_setlease - sets a lease on an open file * @filp: file pointer * @arg: type of lease to obtain * @flp: input - file_lock to use, output - file_lock inserted @@ -1334,7 +1334,7 @@ int fcntl_getlease(struct file *filp) * * Called with kernel lock held. */ -int setlease(struct file *filp, long arg, struct file_lock **flp) +int generic_setlease(struct file *filp, long arg, struct file_lock **flp) { struct file_lock *fl, **before, **my_before = NULL, *lease; struct dentry *dentry = filp->f_path.dentry; @@ -1419,7 +1419,7 @@ int setlease(struct file *filp, long arg, struct file_lock **flp) out: return error; } -EXPORT_SYMBOL(setlease); +EXPORT_SYMBOL(generic_setlease); /** * vfs_setlease - sets a lease on an open file @@ -1456,7 +1456,7 @@ int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) if (filp->f_op && filp->f_op->setlease) error = filp->f_op->setlease(filp, arg, lease); else - error = setlease(filp, arg, lease); + error = generic_setlease(filp, arg, lease); unlock_kernel(); return error; diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index 551e0bac7aa..df6d60bdfcd 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -726,9 +726,6 @@ ncp_del_file_or_subdir2(struct ncp_server *server, __le32 dirent; if (!inode) { -#ifdef CONFIG_NCPFS_DEBUGDENTRY - PRINTK("ncpfs: ncpdel2: dentry->d_inode == NULL\n"); -#endif return 0xFF; /* Any error */ } volnum = NCP_FINFO(inode)->volNumber; diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 2d295dda4c1..cba899a3494 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -564,9 +564,10 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) /* flags */ err = get_int(&mesg, &an_int); - if (err == -ENOENT) + if (err == -ENOENT) { + err = 0; set_bit(CACHE_NEGATIVE, &exp.h.flags); - else { + } else { if (err || an_int < 0) goto out; exp.ex_flags= an_int; diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 2cf9a9a2d89..2ccffde81b8 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -138,7 +138,7 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, char idstr[11]; qword_add(bpp, blen, ent->authname); - snprintf(idstr, sizeof(idstr), "%d", ent->id); + snprintf(idstr, sizeof(idstr), "%u", ent->id); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, idstr); @@ -165,7 +165,7 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) return 0; } ent = container_of(h, struct ent, h); - seq_printf(m, "%s %s %d", ent->authname, + seq_printf(m, "%s %s %u", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->id); if (test_bit(CACHE_VALID, &h->flags)) @@ -349,7 +349,7 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->name); if (test_bit(CACHE_VALID, &h->flags)) - seq_printf(m, " %d", ent->id); + seq_printf(m, " %u", ent->id); seq_printf(m, "\n"); return 0; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 3c627128e20..29b7e63cb32 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -100,7 +100,15 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o status = nfsd_create_v3(rqstp, current_fh, open->op_fname.data, open->op_fname.len, &open->op_iattr, &resfh, open->op_createmode, - (u32 *)open->op_verf.data, &open->op_truncate, &created); + (u32 *)open->op_verf.data, + &open->op_truncate, &created); + + /* If we ever decide to use different attrs to store the + * verifier in nfsd_create_v3, then we'll need to change this + */ + if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0) + open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS | + FATTR4_WORD1_TIME_MODIFY); } else { status = nfsd_lookup(rqstp, current_fh, open->op_fname.data, open->op_fname.len, &resfh); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b3d55c6747f..8ef0964179b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2450,7 +2450,7 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_ } static void -nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, int nfserr, +nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_secinfo *secinfo) { int i = 0; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index ee96a897a29..a0c2b253818 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1309,7 +1309,10 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp, if (createmode == NFS3_CREATE_EXCLUSIVE) { /* solaris7 gets confused (bugid 4218508) if these have - * the high bit set, so just clear the high bits. + * the high bit set, so just clear the high bits. If this is + * ever changed to use different attrs for storing the + * verifier, then do_open_lookup() will also need to be fixed + * accordingly. */ v_mtime = verifier[0]&0x7fffffff; v_atime = verifier[1]&0x7fffffff; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 5727cd18302..c4034f693e7 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2153,7 +2153,7 @@ static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe, src = buf->ops->map(pipe, buf, 1); dst = kmap_atomic(page, KM_USER1); memcpy(dst + offset, src + buf->offset, count); - kunmap_atomic(page, KM_USER1); + kunmap_atomic(dst, KM_USER1); buf->ops->unmap(pipe, buf, src); copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count, diff --git a/fs/open.c b/fs/open.c index a6b054edacb..1d9e5e98bf4 100644 --- a/fs/open.c +++ b/fs/open.c @@ -256,24 +256,26 @@ static long do_sys_truncate(const char __user * path, loff_t length) if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto dput_and_out; - /* - * Make sure that there are no leases. - */ - error = break_lease(inode, FMODE_WRITE); + error = get_write_access(inode); if (error) goto dput_and_out; - error = get_write_access(inode); + /* + * Make sure that there are no leases. get_write_access() protects + * against the truncate racing with a lease-granting setlease(). + */ + error = break_lease(inode, FMODE_WRITE); if (error) - goto dput_and_out; + goto put_write_and_out; error = locks_verify_truncate(inode, NULL, length); if (!error) { DQUOT_INIT(inode); error = do_truncate(nd.dentry, length, 0, NULL); } - put_write_access(inode); +put_write_and_out: + put_write_access(inode); dput_and_out: path_release(&nd); out: @@ -403,7 +405,7 @@ asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len) if (inode->i_op && inode->i_op->fallocate) ret = inode->i_op->fallocate(inode, mode, offset, len); else - ret = -ENOSYS; + ret = -EOPNOTSUPP; out_fput: fput(file); diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c index 4ccec4cd136..5567ec0d03a 100644 --- a/fs/partitions/msdos.c +++ b/fs/partitions/msdos.c @@ -203,6 +203,7 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev, Sector sect; struct solaris_x86_vtoc *v; int i; + short max_nparts; v = (struct solaris_x86_vtoc *)read_dev_sector(bdev, offset+1, §); if (!v) @@ -218,7 +219,9 @@ parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev, put_dev_sector(sect); return; } - for (i=0; i<SOLARIS_X86_NUMSLICE && state->next<state->limit; i++) { + /* Ensure we can handle previous case of VTOC with 8 entries gracefully */ + max_nparts = le16_to_cpu (v->v_nparts) > 8 ? SOLARIS_X86_NUMSLICE : 8; + for (i=0; i<max_nparts && state->next<state->limit; i++) { struct solaris_x86_slice *s = &v->v_slice[i]; if (s->s_size == 0) continue; diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c index 123f8b46c8b..794118da4ef 100644 --- a/fs/partitions/sun.c +++ b/fs/partitions/sun.c @@ -19,34 +19,47 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) Sector sect; struct sun_disklabel { unsigned char info[128]; /* Informative text string */ - unsigned char spare0[14]; - struct sun_info { - unsigned char spare1; - unsigned char id; - unsigned char spare2; - unsigned char flags; - } infos[8]; - unsigned char spare[246]; /* Boot information etc. */ + struct sun_vtoc { + __be32 version; /* Layout version */ + char volume[8]; /* Volume name */ + __be16 nparts; /* Number of partitions */ + struct sun_info { /* Partition hdrs, sec 2 */ + __be16 id; + __be16 flags; + } infos[8]; + __be16 padding; /* Alignment padding */ + __be32 bootinfo[3]; /* Info needed by mboot */ + __be32 sanity; /* To verify vtoc sanity */ + __be32 reserved[10]; /* Free space */ + __be32 timestamp[8]; /* Partition timestamp */ + } vtoc; + __be32 write_reinstruct; /* sectors to skip, writes */ + __be32 read_reinstruct; /* sectors to skip, reads */ + unsigned char spare[148]; /* Padding */ __be16 rspeed; /* Disk rotational speed */ __be16 pcylcount; /* Physical cylinder count */ __be16 sparecyl; /* extra sects per cylinder */ - unsigned char spare2[4]; /* More magic... */ + __be16 obs1; /* gap1 */ + __be16 obs2; /* gap2 */ __be16 ilfact; /* Interleave factor */ __be16 ncyl; /* Data cylinder count */ __be16 nacyl; /* Alt. cylinder count */ __be16 ntrks; /* Tracks per cylinder */ __be16 nsect; /* Sectors per track */ - unsigned char spare3[4]; /* Even more magic... */ + __be16 obs3; /* bhead - Label head offset */ + __be16 obs4; /* ppart - Physical Partition */ struct sun_partition { __be32 start_cylinder; __be32 num_sectors; } partitions[8]; __be16 magic; /* Magic number */ __be16 csum; /* Label xor'd checksum */ - } * label; + } * label; struct sun_partition *p; unsigned long spc; char b[BDEVNAME_SIZE]; + int use_vtoc; + int nparts; label = (struct sun_disklabel *)read_dev_sector(bdev, 0, §); if (!label) @@ -70,9 +83,22 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) return 0; } - /* All Sun disks have 8 partition entries */ + /* Check to see if we can use the VTOC table */ + use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) && + (be32_to_cpu(label->vtoc.version) == 1) && + (be16_to_cpu(label->vtoc.nparts) <= 8)); + + /* Use 8 partition entries if not specified in validated VTOC */ + nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8; + + /* + * So that old Linux-Sun partitions continue to work, + * alow the VTOC to be used under the additional condition ... + */ + use_vtoc = use_vtoc || !(label->vtoc.sanity | + label->vtoc.version | label->vtoc.nparts); spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); - for (i = 0; i < 8; i++, p++) { + for (i = 0; i < nparts; i++, p++) { unsigned long st_sector; unsigned int num_sectors; @@ -81,10 +107,12 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev) if (num_sectors) { put_partition(state, slot, st_sector, num_sectors); state->parts[slot].flags = 0; - if (label->infos[i].id == LINUX_RAID_PARTITION) - state->parts[slot].flags |= ADDPART_FLAG_RAID; - if (label->infos[i].id == SUN_WHOLE_DISK) - state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK; + if (use_vtoc) { + if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION) + state->parts[slot].flags |= ADDPART_FLAG_RAID; + else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK) + state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK; + } } slot++; } diff --git a/fs/partitions/sun.h b/fs/partitions/sun.h index b1b19fda7b2..7f864d1f86d 100644 --- a/fs/partitions/sun.h +++ b/fs/partitions/sun.h @@ -3,5 +3,6 @@ */ #define SUN_LABEL_MAGIC 0xDABE +#define SUN_VTOC_SANITY 0x600DDEEE int sun_partition(struct parsed_partitions *state, struct block_device *bdev); diff --git a/fs/pipe.c b/fs/pipe.c index d007830d9c8..6b3d91a691b 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -255,7 +255,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) /** * generic_pipe_buf_confirm - verify contents of the pipe buffer - * @pipe: the pipe that the buffer belongs to + * @info: the pipe that the buffer belongs to * @buf: the buffer to confirm * * Description: diff --git a/fs/proc/base.c b/fs/proc/base.c index 3c77d5a64e7..19489b0d555 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -927,7 +927,7 @@ static const struct file_operations proc_pid_sched_operations = { .read = seq_read, .write = sched_write, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; #endif diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 94e2c1adf18..a5b0dfd89a1 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -386,6 +386,19 @@ static const struct file_operations proc_reg_file_ops = { .release = proc_reg_release, }; +#ifdef CONFIG_COMPAT +static const struct file_operations proc_reg_file_ops_no_compat = { + .llseek = proc_reg_llseek, + .read = proc_reg_read, + .write = proc_reg_write, + .poll = proc_reg_poll, + .unlocked_ioctl = proc_reg_unlocked_ioctl, + .mmap = proc_reg_mmap, + .open = proc_reg_open, + .release = proc_reg_release, +}; +#endif + struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, struct proc_dir_entry *de) { @@ -413,8 +426,15 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, if (de->proc_iops) inode->i_op = de->proc_iops; if (de->proc_fops) { - if (S_ISREG(inode->i_mode)) - inode->i_fop = &proc_reg_file_ops; + if (S_ISREG(inode->i_mode)) { +#ifdef CONFIG_COMPAT + if (!de->proc_fops->compat_ioctl) + inode->i_fop = + &proc_reg_file_ops_no_compat; + else +#endif + inode->i_fop = &proc_reg_file_ops; + } else inode->i_fop = de->proc_fops; } diff --git a/fs/quota.c b/fs/quota.c index e6577ac15a6..99b24b52bfc 100644 --- a/fs/quota.c +++ b/fs/quota.c @@ -387,7 +387,7 @@ asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special, qid_t return ret; } -#if defined(CONFIG_X86_64) || defined(CONFIG_IA64) +#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) /* * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) * and is necessary due to alignment problems. diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index cad2b7ace63..237fe8b8e81 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -295,5 +295,10 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file, */ int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) { - return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; + if (!(vma->vm_flags & VM_SHARED)) + return -ENOSYS; + + file_accessed(file); + vma->vm_ops = &generic_file_vm_ops; + return 0; } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index b6f12593c39..981027d1187 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1042,7 +1042,8 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st pos = I_UNFM_NUM(&s_ih); while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) { - __u32 *unfm, block; + __le32 *unfm; + __u32 block; /* Each unformatted block deletion may involve one additional * bitmap block into the transaction, thereby the initial @@ -1052,7 +1053,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st break; } - unfm = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1; + unfm = (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1; block = get_block_num(unfm, 0); if (block != 0) { diff --git a/fs/signalfd.c b/fs/signalfd.c index 3b07f26d984..7b941abbcde 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -320,7 +320,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas if (sizemask != sizeof(sigset_t) || copy_from_user(&sigmask, user_mask, sizeof(sigmask))) - return error = -EINVAL; + return -EINVAL; sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); signotset(&sigmask); diff --git a/fs/splice.c b/fs/splice.c index 0a097321808..c010a72ca2d 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -164,7 +164,7 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = { * @spd: data to fill * * Description: - * @spd contains a map of pages and len/offset tupples, a long with + * @spd contains a map of pages and len/offset tuples, along with * the struct pipe_buf_operations associated with these pages. This * function will link that data to the pipe. * @@ -1000,7 +1000,7 @@ static long do_splice_to(struct file *in, loff_t *ppos, * Description: * This is a special case helper to splice directly between two * points, without requiring an explicit pipe. Internally an allocated - * pipe is cached in the process, and reused during the life time of + * pipe is cached in the process, and reused during the lifetime of * that process. * */ diff --git a/fs/timerfd.c b/fs/timerfd.c index af9eca5c023..61983f3b107 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -95,7 +95,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, { struct timerfd_ctx *ctx = file->private_data; ssize_t res; - u32 ticks = 0; + u64 ticks = 0; DECLARE_WAITQUEUE(wait, current); if (count < sizeof(ticks)) @@ -130,7 +130,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, * callback to avoid DoS attacks specifying a very * short timer period. */ - ticks = (u32) + ticks = (u64) hrtimer_forward(&ctx->tmr, hrtimer_cb_get_time(&ctx->tmr), ctx->tintv); @@ -140,7 +140,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, } spin_unlock_irq(&ctx->wqh.lock); if (ticks) - res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks); + res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks); return res; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 0d2c41666cd..1652b2c665b 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1127,13 +1127,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) } inode->i_uid = le32_to_cpu(fe->uid); - if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb, - UDF_FLAG_UID_IGNORE)) + if (inode->i_uid == -1 || + UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || + UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) inode->i_uid = UDF_SB(inode->i_sb)->s_uid; inode->i_gid = le32_to_cpu(fe->gid); - if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb, - UDF_FLAG_GID_IGNORE)) + if (inode->i_gid == -1 || + UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) || + UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; inode->i_nlink = le16_to_cpu(fe->fileLinkCount); diff --git a/fs/udf/super.c b/fs/udf/super.c index 7b30964665d..382be7be5ae 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -366,11 +366,13 @@ static int udf_parse_options(char *options, struct udf_options *uopt) if (match_int(args, &option)) return 0; uopt->gid = option; + uopt->flags |= (1 << UDF_FLAG_GID_SET); break; case Opt_uid: if (match_int(args, &option)) return 0; uopt->uid = option; + uopt->flags |= (1 << UDF_FLAG_UID_SET); break; case Opt_umask: if (match_octal(args, &option)) diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 3e937d3fb8f..3c2982017c6 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -24,6 +24,8 @@ #define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */ #define UDF_FLAG_GID_FORGET 13 #define UDF_FLAG_GID_IGNORE 14 +#define UDF_FLAG_UID_SET 15 +#define UDF_FLAG_GID_SET 16 #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002 diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 141cf15067c..42319d75aaa 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -139,7 +139,7 @@ STATIC int xfs_inumbers_fmt_compat( long count, long *written) { - compat_xfs_inogrp_t *p32 = ubuffer; + compat_xfs_inogrp_t __user *p32 = ubuffer; long i; for (i = 0; i < count; i++) { @@ -444,7 +444,7 @@ xfs_compat_ioctl( case XFS_IOC_FSINUMBERS_32: cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq); return xfs_ioc_bulkstat_compat(XFS_BHVTOI(VNHEAD(vp))->i_mount, - cmd, (void*)arg); + cmd, (void __user*)arg); case XFS_IOC_FD_TO_HANDLE_32: case XFS_IOC_PATH_TO_HANDLE_32: case XFS_IOC_PATH_TO_FSHANDLE_32: |