From 66c006a55137cc51f8761549e2024a7593180d27 Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Mon, 7 Nov 2005 01:01:17 -0800 Subject: [PATCH] drivers/md: fix-up schedule_timeout() usage Use schedule_timeout_interruptible() instead of set_current_state()/schedule_timeout() to reduce kernel size. Signed-off-by: Nishanth Aravamudan Cc: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 3 +-- drivers/md/raid6main.c | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6497295ebfb..1223e98ecd7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1587,8 +1587,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* make sure we don't swamp the stripe cache if someone else * is trying to get access */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); spin_lock(&sh->lock); diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 6437a95ffc1..77578694770 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1746,8 +1746,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* make sure we don't swamp the stripe cache if someone else * is trying to get access */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); spin_lock(&sh->lock); -- cgit v1.2.3 From 733482e445ca4450cf41381b1c95e2b8c7145114 Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Tue, 8 Nov 2005 21:34:55 -0800 Subject: [PATCH] changing CONFIG_LOCALVERSION rebuilds too much, for no good reason This patch removes almost all inclusions of linux/version.h. The 3 #defines are unused in most of the touched files. A few drivers use the simple KERNEL_VERSION(a,b,c) macro, which is unfortunatly in linux/version.h. There are also lots of #ifdef for long obsolete kernels, this was not touched. In a few places, the linux/version.h include was move to where the LINUX_VERSION_CODE was used. quilt vi `find * -type f -name "*.[ch]"|xargs grep -El '(UTS_RELEASE|LINUX_VERSION_CODE|KERNEL_VERSION|linux/version.h)'|grep -Ev '(/(boot|coda|drm)/|~$)'` search pattern: /UTS_RELEASE\|LINUX_VERSION_CODE\|KERNEL_VERSION\|linux\/\(utsname\|version\).h Signed-off-by: Olaf Hering Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 01654fcabc5..e59694bc575 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -21,7 +21,6 @@ */ #include -#include #include #include #include -- cgit v1.2.3 From 4e5314b56a7ea11c7a5f2b8418992b2f49648a25 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:22 -0800 Subject: [PATCH] md: better handling of readerrors with raid5. This patch changes the behaviour of raid5 when it gets a read error. Instead of just failing the device, it tried to find out what should have been there, and writes it over the bad block. For some media-errors, this has a reasonable chance of fixing the error. If the write succeeds, and a subsequent read succeeds as well, raid5 decided the address is OK and conitnues. Instead of failing a drive on read-error, we attempt to re-write the block, and then re-read. If that all works, we allow the device to remain in the array. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1223e98ecd7..8cf1ae8b8a7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -349,7 +349,7 @@ static void shrink_stripes(raid5_conf_t *conf) conf->slab_cache = NULL; } -static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done, +static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, int error) { struct stripe_head *sh = bi->bi_private; @@ -401,10 +401,27 @@ static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done, } #else set_bit(R5_UPTODATE, &sh->dev[i].flags); -#endif +#endif + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + printk("R5: read error corrected!!\n"); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + } } else { - md_error(conf->mddev, conf->disks[i].rdev); clear_bit(R5_UPTODATE, &sh->dev[i].flags); + if (conf->mddev->degraded) { + printk("R5: read error not correctable.\n"); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + md_error(conf->mddev, conf->disks[i].rdev); + } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { + /* Oh, no!!! */ + printk("R5: read error NOT corrected!!\n"); + clear_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReWrite, &sh->dev[i].flags); + md_error(conf->mddev, conf->disks[i].rdev); + } else + set_bit(R5_ReadError, &sh->dev[i].flags); } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); #if 0 @@ -966,6 +983,12 @@ static void handle_stripe(struct stripe_head *sh) if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ if (!rdev || !rdev->in_sync) { + /* The ReadError flag wil just be confusing now */ + clear_bit(R5_ReadError, &dev->flags); + clear_bit(R5_ReWrite, &dev->flags); + } + if (!rdev || !rdev->in_sync + || test_bit(R5_ReadError, &dev->flags)) { failed++; failed_num = i; } else @@ -980,6 +1003,14 @@ static void handle_stripe(struct stripe_head *sh) if (failed > 1 && to_read+to_write+written) { for (i=disks; i--; ) { int bitmap_end = 0; + + if (test_bit(R5_ReadError, &sh->dev[i].flags)) { + mdk_rdev_t *rdev = conf->disks[i].rdev; + if (rdev && rdev->in_sync) + /* multiple read failures in one stripe */ + md_error(conf->mddev, rdev); + } + spin_lock_irq(&conf->device_lock); /* fail all writes first */ bi = sh->dev[i].towrite; @@ -1015,7 +1046,8 @@ static void handle_stripe(struct stripe_head *sh) } /* fail any reads if this device is non-operational */ - if (!test_bit(R5_Insync, &sh->dev[i].flags)) { + if (!test_bit(R5_Insync, &sh->dev[i].flags) || + test_bit(R5_ReadError, &sh->dev[i].flags)) { bi = sh->dev[i].toread; sh->dev[i].toread = NULL; if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) @@ -1274,7 +1306,26 @@ static void handle_stripe(struct stripe_head *sh) md_done_sync(conf->mddev, STRIPE_SECTORS,1); clear_bit(STRIPE_SYNCING, &sh->state); } - + + /* If the failed drive is just a ReadError, then we might need to progress + * the repair/check process + */ + if (failed == 1 && test_bit(R5_ReadError, &sh->dev[failed_num].flags) + && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) + && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) + ) { + dev = &sh->dev[failed_num]; + if (!test_bit(R5_ReWrite, &dev->flags)) { + set_bit(R5_Wantwrite, &dev->flags); + set_bit(R5_ReWrite, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } else { + /* let's read it back */ + set_bit(R5_Wantread, &dev->flags); + set_bit(R5_LOCKED, &dev->flags); + } + } + spin_unlock(&sh->lock); while ((bi=return_bi)) { -- cgit v1.2.3 From eae1701fbd264cfc7efbaf7cd4cd999760070e27 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:23 -0800 Subject: [PATCH] md: initial sysfs support for md Start using kobjects in mddevs, and provide a couple of simple attributes (level and disks). Attributes live in /sys/block/mdX/md/attr-name Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 9ecf51ee596..a68ad854732 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -181,7 +181,7 @@ static void mddev_put(mddev_t *mddev) if (!mddev->raid_disks && list_empty(&mddev->disks)) { list_del(&mddev->all_mddevs); blk_put_queue(mddev->queue); - kfree(mddev); + kobject_unregister(&mddev->kobj); } spin_unlock(&all_mddevs_lock); } @@ -1551,6 +1551,85 @@ static void analyze_sbs(mddev_t * mddev) } +struct md_sysfs_entry { + struct attribute attr; + ssize_t (*show)(mddev_t *, char *); + ssize_t (*store)(mddev_t *, const char *, size_t); +}; + +static ssize_t +md_show_level(mddev_t *mddev, char *page) +{ + mdk_personality_t *p = mddev->pers; + if (p == NULL) + return 0; + if (mddev->level >= 0) + return sprintf(page, "RAID-%d\n", mddev->level); + else + return sprintf(page, "%s\n", p->name); +} + +static struct md_sysfs_entry md_level = { + .attr = {.name = "level", .mode = S_IRUGO }, + .show = md_show_level, +}; + +static ssize_t +md_show_rdisks(mddev_t *mddev, char *page) +{ + return sprintf(page, "%d\n", mddev->raid_disks); +} + +static struct md_sysfs_entry md_raid_disks = { + .attr = {.name = "raid_disks", .mode = S_IRUGO }, + .show = md_show_rdisks, +}; + +static struct attribute *md_default_attrs[] = { + &md_level.attr, + &md_raid_disks.attr, + NULL, +}; + +static ssize_t +md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); + mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + + if (!entry->show) + return -EIO; + return entry->show(mddev, page); +} + +static ssize_t +md_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); + mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + + if (!entry->store) + return -EIO; + return entry->store(mddev, page, length); +} + +static void md_free(struct kobject *ko) +{ + mddev_t *mddev = container_of(ko, mddev_t, kobj); + kfree(mddev); +} + +static struct sysfs_ops md_sysfs_ops = { + .show = md_attr_show, + .store = md_attr_store, +}; +static struct kobj_type md_ktype = { + .release = md_free, + .sysfs_ops = &md_sysfs_ops, + .default_attrs = md_default_attrs, +}; + int mdp_major = 0; static struct kobject *md_probe(dev_t dev, int *part, void *data) @@ -1592,6 +1671,11 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) add_disk(disk); mddev->gendisk = disk; up(&disks_sem); + mddev->kobj.parent = kobject_get(&disk->kobj); + mddev->kobj.k_name = NULL; + snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); + mddev->kobj.ktype = &md_ktype; + kobject_register(&mddev->kobj); return NULL; } -- cgit v1.2.3 From 86e6ffdd243a06663713e637ee683fb27dce8e0c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:24 -0800 Subject: [PATCH] md: extend md sysfs support to component devices. Each device in an md array how has a corresponding /sys/block/mdX/md/devNN/ directory which can contain attributes. Currently there is only 'state' which summarises the state, nd 'super' which has a copy of the superblock, and 'block' which is a symlink to the block device. Also, /sys/block/mdX/md/rdNN represents slot 'NN' in the array, and is a symlink to the relevant 'devNN'. Obviously spare devices do not have a slot in the array, and so don't have such a symlink. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 168 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 160 insertions(+), 8 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index a68ad854732..74520b50c30 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -711,6 +711,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) */ int i; int active=0, working=0,failed=0,spare=0,nr_disks=0; + unsigned int fixdesc=0; rdev->sb_size = MD_SB_BYTES; @@ -758,16 +759,28 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->disks[0].state = (1<raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) - rdev2->desc_nr = rdev2->raid_disk; + desc_nr = rdev2->raid_disk; else - rdev2->desc_nr = next_spare++; + desc_nr = next_spare++; + if (desc_nr != rdev2->desc_nr) { + fixdesc |= (1 << desc_nr); + rdev2->desc_nr = desc_nr; + if (rdev2->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev2->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + } + sysfs_remove_link(&rdev2->kobj, "block"); + kobject_del(&rdev2->kobj); + } d = &sb->disks[rdev2->desc_nr]; nr_disks++; d->number = rdev2->desc_nr; d->major = MAJOR(rdev2->bdev->bd_dev); d->minor = MINOR(rdev2->bdev->bd_dev); - if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty) + if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) d->raid_disk = rdev2->raid_disk; else d->raid_disk = rdev2->desc_nr; /* compatibility */ @@ -787,7 +800,22 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) if (test_bit(WriteMostly, &rdev2->flags)) d->state |= (1<desc_nr)) { + snprintf(rdev2->kobj.name, KOBJ_NAME_LEN, "dev%d", + rdev2->desc_nr); + kobject_add(&rdev2->kobj); + sysfs_create_link(&rdev2->kobj, + &rdev2->bdev->bd_disk->kobj, + "block"); + if (rdev2->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev2->raid_disk); + sysfs_create_link(&mddev->kobj, + &rdev2->kobj, nm); + } + } /* now set the "removed" and "faulty" bits on any missing devices */ for (i=0 ; i < mddev->raid_disks ; i++) { mdp_disk_t *d = &sb->disks[i]; @@ -1147,6 +1175,13 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) list_add(&rdev->same_set, &mddev->disks); rdev->mddev = mddev; printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b)); + + rdev->kobj.k_name = NULL; + snprintf(rdev->kobj.name, KOBJ_NAME_LEN, "dev%d", rdev->desc_nr); + rdev->kobj.parent = kobject_get(&mddev->kobj); + kobject_add(&rdev->kobj); + + sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block"); return 0; } @@ -1160,6 +1195,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) list_del_init(&rdev->same_set); printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); rdev->mddev = NULL; + sysfs_remove_link(&rdev->kobj, "block"); + kobject_del(&rdev->kobj); } /* @@ -1215,7 +1252,7 @@ static void export_rdev(mdk_rdev_t * rdev) md_autodetect_dev(rdev->bdev->bd_dev); #endif unlock_rdev(rdev); - kfree(rdev); + kobject_put(&rdev->kobj); } static void kick_rdev_from_array(mdk_rdev_t * rdev) @@ -1414,6 +1451,94 @@ repeat: } +struct rdev_sysfs_entry { + struct attribute attr; + ssize_t (*show)(mdk_rdev_t *, char *); + ssize_t (*store)(mdk_rdev_t *, const char *, size_t); +}; + +static ssize_t +rdev_show_state(mdk_rdev_t *rdev, char *page) +{ + char *sep = ""; + int len=0; + + if (rdev->faulty) { + len+= sprintf(page+len, "%sfaulty",sep); + sep = ","; + } + if (rdev->in_sync) { + len += sprintf(page+len, "%sin_sync",sep); + sep = ","; + } + if (!rdev->faulty && !rdev->in_sync) { + len += sprintf(page+len, "%sspare", sep); + sep = ","; + } + return len+sprintf(page+len, "\n"); +} + +static struct rdev_sysfs_entry rdev_state = { + .attr = {.name = "state", .mode = S_IRUGO }, + .show = rdev_show_state, +}; + +static ssize_t +rdev_show_super(mdk_rdev_t *rdev, char *page) +{ + if (rdev->sb_loaded && rdev->sb_size) { + memcpy(page, page_address(rdev->sb_page), rdev->sb_size); + return rdev->sb_size; + } else + return 0; +} +static struct rdev_sysfs_entry rdev_super = { + .attr = {.name = "super", .mode = S_IRUGO }, + .show = rdev_show_super, +}; +static struct attribute *rdev_default_attrs[] = { + &rdev_state.attr, + &rdev_super.attr, + NULL, +}; +static ssize_t +rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); + mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + + if (!entry->show) + return -EIO; + return entry->show(rdev, page); +} + +static ssize_t +rdev_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); + mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); + + if (!entry->store) + return -EIO; + return entry->store(rdev, page, length); +} + +static void rdev_free(struct kobject *ko) +{ + mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); + kfree(rdev); +} +static struct sysfs_ops rdev_sysfs_ops = { + .show = rdev_attr_show, + .store = rdev_attr_store, +}; +static struct kobj_type rdev_ktype = { + .release = rdev_free, + .sysfs_ops = &rdev_sysfs_ops, + .default_attrs = rdev_default_attrs, +}; + /* * Import a device. If 'super_format' >= 0, then sanity check the superblock * @@ -1445,6 +1570,10 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi if (err) goto abort_free; + rdev->kobj.parent = NULL; + rdev->kobj.ktype = &rdev_ktype; + kobject_init(&rdev->kobj); + rdev->desc_nr = -1; rdev->faulty = 0; rdev->in_sync = 0; @@ -1820,6 +1949,13 @@ static int do_md_run(mddev_t * mddev) mddev->safemode_timer.data = (unsigned long) mddev; mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ mddev->in_sync = 1; + + ITERATE_RDEV(mddev,rdev,tmp) + if (rdev->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); + } set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); @@ -1941,9 +2077,18 @@ static int do_md_stop(mddev_t * mddev, int ro) * Free resources if final stop */ if (!ro) { + mdk_rdev_t *rdev; + struct list_head *tmp; struct gendisk *disk; printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); + ITERATE_RDEV(mddev,rdev,tmp) + if (rdev->raid_disk >= 0) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); + } + export_array(mddev); mddev->array_size = 0; @@ -3962,17 +4107,24 @@ void md_check_recovery(mddev_t *mddev) if (rdev->raid_disk >= 0 && (rdev->faulty || ! rdev->in_sync) && atomic_read(&rdev->nr_pending)==0) { - if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) + if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { + char nm[20]; + sprintf(nm,"rd%d", rdev->raid_disk); + sysfs_remove_link(&mddev->kobj, nm); rdev->raid_disk = -1; + } } if (mddev->degraded) { ITERATE_RDEV(mddev,rdev,rtmp) if (rdev->raid_disk < 0 && !rdev->faulty) { - if (mddev->pers->hot_add_disk(mddev,rdev)) + if (mddev->pers->hot_add_disk(mddev,rdev)) { + char nm[20]; + sprintf(nm, "rd%d", rdev->raid_disk); + sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); spares++; - else + } else break; } } -- cgit v1.2.3 From 3f294f4fb6f2ba887b717674da26c21f3d57f3fc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:25 -0800 Subject: [PATCH] md: add kobject/sysfs support to raid5 /sys/block/mdX/md/raid5/ contains raid5-related attributes. Currently stripe_cache_size is number of entries in stripe cache, and is settable. stripe_cache_active is number of active entries, and in only readable. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 183 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 151 insertions(+), 32 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8cf1ae8b8a7..121fbaa9ed5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -293,9 +293,31 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector return sh; } -static int grow_stripes(raid5_conf_t *conf, int num) +static int grow_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; + sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); + if (!sh) + return 0; + memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); + sh->raid_conf = conf; + spin_lock_init(&sh->lock); + + if (grow_buffers(sh, conf->raid_disks)) { + shrink_buffers(sh, conf->raid_disks); + kmem_cache_free(conf->slab_cache, sh); + return 0; + } + /* we just created an active stripe so... */ + atomic_set(&sh->count, 1); + atomic_inc(&conf->active_stripes); + INIT_LIST_HEAD(&sh->lru); + release_stripe(sh); + return 1; +} + +static int grow_stripes(raid5_conf_t *conf, int num) +{ kmem_cache_t *sc; int devs = conf->raid_disks; @@ -308,43 +330,34 @@ static int grow_stripes(raid5_conf_t *conf, int num) return 1; conf->slab_cache = sc; while (num--) { - sh = kmem_cache_alloc(sc, GFP_KERNEL); - if (!sh) - return 1; - memset(sh, 0, sizeof(*sh) + (devs-1)*sizeof(struct r5dev)); - sh->raid_conf = conf; - spin_lock_init(&sh->lock); - - if (grow_buffers(sh, conf->raid_disks)) { - shrink_buffers(sh, conf->raid_disks); - kmem_cache_free(sc, sh); + if (!grow_one_stripe(conf)) return 1; - } - /* we just created an active stripe so... */ - atomic_set(&sh->count, 1); - atomic_inc(&conf->active_stripes); - INIT_LIST_HEAD(&sh->lru); - release_stripe(sh); } return 0; } -static void shrink_stripes(raid5_conf_t *conf) +static int drop_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; - while (1) { - spin_lock_irq(&conf->device_lock); - sh = get_free_stripe(conf); - spin_unlock_irq(&conf->device_lock); - if (!sh) - break; - if (atomic_read(&sh->count)) - BUG(); - shrink_buffers(sh, conf->raid_disks); - kmem_cache_free(conf->slab_cache, sh); - atomic_dec(&conf->active_stripes); - } + spin_lock_irq(&conf->device_lock); + sh = get_free_stripe(conf); + spin_unlock_irq(&conf->device_lock); + if (!sh) + return 0; + if (atomic_read(&sh->count)) + BUG(); + shrink_buffers(sh, conf->raid_disks); + kmem_cache_free(conf->slab_cache, sh); + atomic_dec(&conf->active_stripes); + return 1; +} + +static void shrink_stripes(raid5_conf_t *conf) +{ + while (drop_one_stripe(conf)) + ; + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -1714,6 +1727,108 @@ static void raid5d (mddev_t *mddev) PRINTK("--- raid5d inactive\n"); } +struct raid5_sysfs_entry { + struct attribute attr; + ssize_t (*show)(raid5_conf_t *, char *); + ssize_t (*store)(raid5_conf_t *, const char *, ssize_t); +}; + +static ssize_t +raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page) +{ + return sprintf(page, "%d\n", conf->max_nr_stripes); +} + +static ssize_t +raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len) +{ + char *end; + int new; + if (len >= PAGE_SIZE) + return -EINVAL; + + new = simple_strtoul(page, &end, 10); + if (!*page || (*end && *end != '\n') ) + return -EINVAL; + if (new <= 16 || new > 32768) + return -EINVAL; + while (new < conf->max_nr_stripes) { + if (drop_one_stripe(conf)) + conf->max_nr_stripes--; + else + break; + } + while (new > conf->max_nr_stripes) { + if (grow_one_stripe(conf)) + conf->max_nr_stripes++; + else break; + } + return len; +} +static struct raid5_sysfs_entry raid5_stripecache_size = { + .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR }, + .show = raid5_show_stripe_cache_size, + .store = raid5_store_stripe_cache_size, +}; + +static ssize_t +raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page) +{ + return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); +} + +static struct raid5_sysfs_entry raid5_stripecache_active = { + .attr = {.name = "stripe_cache_active", .mode = S_IRUGO}, + .show = raid5_show_stripe_cache_active, +}; + +static struct attribute *raid5_default_attrs[] = { + &raid5_stripecache_size.attr, + &raid5_stripecache_active.attr, + NULL, +}; + +static ssize_t +raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); + raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); + + if (!entry->show) + return -EIO; + return entry->show(conf, page); +} + +static ssize_t +raid5_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); + raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); + + if (!entry->store) + return -EIO; + return entry->store(conf, page, length); +} + +static void raid5_free(struct kobject *ko) +{ + raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj); + kfree(conf); +} + + +static struct sysfs_ops raid5_sysfs_ops = { + .show = raid5_attr_show, + .store = raid5_attr_store, +}; + +static struct kobj_type raid5_ktype = { + .release = raid5_free, + .sysfs_ops = &raid5_sysfs_ops, + .default_attrs = raid5_default_attrs, +}; + static int run(mddev_t *mddev) { raid5_conf_t *conf; @@ -1855,6 +1970,10 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + } /* Ok, everything is just fine now */ + conf->kobj.parent = kobject_get(&mddev->kobj); + strcpy(conf->kobj.name, "raid5"); + conf->kobj.ktype = &raid5_ktype; + kobject_register(&conf->kobj); if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; @@ -1879,7 +1998,7 @@ abort: -static int stop (mddev_t *mddev) +static int stop(mddev_t *mddev) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; @@ -1888,7 +2007,7 @@ static int stop (mddev_t *mddev) shrink_stripes(conf); free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - kfree(conf); + kobject_unregister(&conf->kobj); mddev->private = NULL; return 0; } -- cgit v1.2.3 From 24dd469d728dae07f40c5d79ea6dedd38cdf1a30 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:26 -0800 Subject: [PATCH] md: allow a manual resync with md You can trigger a 'check' with echo check > /sys/block/mdX/md/scan_mode or a check-and-repair errors with echo repair > /sys/block/mdX/md/scan_mode and read the current state from the same file. Note: personalities need to know the different between 'check' and 'repair', but don't yet. Until they do, 'check' will be the same as 'repair' and will just do a normal resync pass. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 68 insertions(+), 9 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 74520b50c30..37400873b87 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1714,9 +1714,60 @@ static struct md_sysfs_entry md_raid_disks = { .show = md_show_rdisks, }; +static ssize_t +md_show_scan(mddev_t *mddev, char *page) +{ + char *type = "none"; + if (mddev->recovery & + ((1<recovery & (1<recovery)) + type = "resync"; + else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) + type = "check"; + else + type = "repair"; + } else + type = "recover"; + } + return sprintf(page, "%s\n", type); +} + +static ssize_t +md_store_scan(mddev_t *mddev, const char *page, size_t len) +{ + int canscan=0; + if (mddev->recovery & + ((1<reconfig_sem); + if (mddev->pers && mddev->pers->sync_request) + canscan=1; + up(&mddev->reconfig_sem); + if (!canscan) + return -EINVAL; + + if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) + set_bit(MD_RECOVERY_CHECK, &mddev->recovery); + else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) + return -EINVAL; + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + return len; +} + +static struct md_sysfs_entry md_scan_mode = { + .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR }, + .show = md_show_scan, + .store = md_store_scan, +}; + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, + &md_scan_mode.attr, NULL, }; @@ -3855,7 +3906,8 @@ static void md_do_sync(mddev_t *mddev) is_mddev_idle(mddev); /* this also initializes IO event counters */ /* we don't use the checkpoint if there's a bitmap */ - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap + && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) j = mddev->recovery_cp; else j = 0; @@ -4093,9 +4145,13 @@ void md_check_recovery(mddev_t *mddev) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); goto unlock; } - if (mddev->recovery) - /* probably just the RECOVERY_NEEDED flag */ - mddev->recovery = 0; + /* Clear some bits that don't mean anything, but + * might be left set + */ + clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_RECOVERY_ERR, &mddev->recovery); + clear_bit(MD_RECOVERY_INTR, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); /* no recovery is running. * remove any failed drives, then @@ -4129,14 +4185,17 @@ void md_check_recovery(mddev_t *mddev) } } - if (!spares && (mddev->recovery_cp == MaxSector )) { - /* nothing we can do ... */ + if (spares) { + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + } else if (mddev->recovery_cp < MaxSector) { + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + /* nothing to be done ... */ goto unlock; - } + if (mddev->pers->sync_request) { set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - if (!spares) - set_bit(MD_RECOVERY_SYNC, &mddev->recovery); if (spares && mddev->bitmap && ! mddev->bitmap->file) { /* We are adding a device or devices to an array * which has the bitmap stored on all devices. -- cgit v1.2.3 From 9d88883e68f404d5581bd391713ceef470ea53a9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:26 -0800 Subject: [PATCH] md: teach raid5 the difference between 'check' and 'repair'. With this, raid5 can be asked to check parity without repairing it. It also keeps a count of the number of incorrect parity blocks found (mismatches) and reports them through sysfs. Signed-off-by: Neil Brown Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 18 ++++++++++++++++-- drivers/md/raid5.c | 5 +++++ 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 37400873b87..e58d61d9f31 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1758,16 +1758,29 @@ md_store_scan(mddev_t *mddev, const char *page, size_t len) return len; } +static ssize_t +md_show_mismatch(mddev_t *mddev, char *page) +{ + return sprintf(page, "%llu\n", + (unsigned long long) mddev->resync_mismatches); +} + static struct md_sysfs_entry md_scan_mode = { .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR }, .show = md_show_scan, .store = md_store_scan, }; +static struct md_sysfs_entry md_mismatches = { + .attr = {.name = "mismatch_cnt", .mode = S_IRUGO }, + .show = md_show_mismatch, +}; + static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, &md_scan_mode.attr, + &md_mismatches.attr, NULL, }; @@ -3888,12 +3901,13 @@ static void md_do_sync(mddev_t *mddev) } } while (mddev->curr_resync < 2); - if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* resync follows the size requested by the personality, * which defaults to physical size, but can be virtual size */ max_sectors = mddev->resync_max_sectors; - else + mddev->resync_mismatches = 0; + } else /* recovery follows the physical size of devices */ max_sectors = mddev->size << 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 121fbaa9ed5..ce154553aca 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1292,6 +1292,11 @@ static void handle_stripe(struct stripe_head *sh) !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) { /* parity is correct (on disc, not in buffer any more) */ set_bit(STRIPE_INSYNC, &sh->state); + } else { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + set_bit(STRIPE_INSYNC, &sh->state); } } if (!test_bit(STRIPE_INSYNC, &sh->state)) { -- cgit v1.2.3 From d6065f7bf8bec170c9c56524a250093ce73ca5d9 Mon Sep 17 00:00:00 2001 From: Suzanne Wood Date: Tue, 8 Nov 2005 21:39:27 -0800 Subject: [PATCH] md: provide proper rcu_dereference / rcu_assign_pointer annotations in md Acked-by: Signed-off-by: Suzanne Wood Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/multipath.c | 8 ++++---- drivers/md/raid1.c | 20 ++++++++++---------- drivers/md/raid10.c | 30 ++++++++++++++++-------------- drivers/md/raid5.c | 8 ++++---- drivers/md/raid6main.c | 8 ++++---- 5 files changed, 38 insertions(+), 36 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index c06f4474192..ae2c5fd6105 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -63,7 +63,7 @@ static int multipath_map (multipath_conf_t *conf) rcu_read_lock(); for (i = 0; i < disks; i++) { - mdk_rdev_t *rdev = conf->multipaths[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && rdev->in_sync) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); @@ -139,7 +139,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { - mdk_rdev_t *rdev = conf->multipaths[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); @@ -224,7 +224,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { - mdk_rdev_t *rdev = conf->multipaths[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !rdev->faulty) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -331,7 +331,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) conf->working_disks++; rdev->raid_disk = path; rdev->in_sync = 1; - p->rdev = rdev; + rcu_assign_pointer(p->rdev, rdev); found = 1; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e16f473bcf4..f12fc288f25 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -416,10 +416,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* Choose the first operation device, for consistancy */ new_disk = 0; - for (rdev = conf->mirrors[new_disk].rdev; + for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); !rdev || !rdev->in_sync || test_bit(WriteMostly, &rdev->flags); - rdev = conf->mirrors[++new_disk].rdev) { + rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { if (rdev && rdev->in_sync) wonly_disk = new_disk; @@ -434,10 +434,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* make sure the disk is operational */ - for (rdev = conf->mirrors[new_disk].rdev; + for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); !rdev || !rdev->in_sync || test_bit(WriteMostly, &rdev->flags); - rdev = conf->mirrors[new_disk].rdev) { + rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { if (rdev && rdev->in_sync) wonly_disk = new_disk; @@ -474,7 +474,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) disk = conf->raid_disks; disk--; - rdev = conf->mirrors[disk].rdev; + rdev = rcu_dereference(conf->mirrors[disk].rdev); if (!rdev || !rdev->in_sync || @@ -496,7 +496,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) if (new_disk >= 0) { - rdev = conf->mirrors[new_disk].rdev; + rdev = rcu_dereference(conf->mirrors[new_disk].rdev); if (!rdev) goto retry; atomic_inc(&rdev->nr_pending); @@ -522,7 +522,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { - mdk_rdev_t *rdev = conf->mirrors[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); @@ -556,7 +556,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { - mdk_rdev_t *rdev = conf->mirrors[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !rdev->faulty) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -728,7 +728,7 @@ static int make_request(request_queue_t *q, struct bio * bio) #endif rcu_read_lock(); for (i = 0; i < disks; i++) { - if ((rdev=conf->mirrors[i].rdev) != NULL && + if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL && !rdev->faulty) { atomic_inc(&rdev->nr_pending); if (rdev->faulty) { @@ -954,7 +954,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) found = 1; if (rdev->saved_raid_disk != mirror) conf->fullsync = 1; - p->rdev = rdev; + rcu_assign_pointer(p->rdev, rdev); break; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index bbe40e9cf92..26114f40bde 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -496,6 +496,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) int disk, slot, nslot; const int sectors = r10_bio->sectors; sector_t new_distance, current_distance; + mdk_rdev_t *rdev; raid10_find_phys(conf, r10_bio); rcu_read_lock(); @@ -510,8 +511,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) slot = 0; disk = r10_bio->devs[slot].devnum; - while (!conf->mirrors[disk].rdev || - !conf->mirrors[disk].rdev->in_sync) { + while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + !rdev->in_sync) { slot++; if (slot == conf->copies) { slot = 0; @@ -527,8 +528,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) /* make sure the disk is operational */ slot = 0; disk = r10_bio->devs[slot].devnum; - while (!conf->mirrors[disk].rdev || - !conf->mirrors[disk].rdev->in_sync) { + while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || + !rdev->in_sync) { slot ++; if (slot == conf->copies) { disk = -1; @@ -547,11 +548,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) int ndisk = r10_bio->devs[nslot].devnum; - if (!conf->mirrors[ndisk].rdev || - !conf->mirrors[ndisk].rdev->in_sync) + if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || + !rdev->in_sync) continue; - if (!atomic_read(&conf->mirrors[ndisk].rdev->nr_pending)) { + if (!atomic_read(&rdev->nr_pending)) { disk = ndisk; slot = nslot; break; @@ -569,7 +570,7 @@ rb_out: r10_bio->read_slot = slot; /* conf->next_seq_sect = this_sector + sectors;*/ - if (disk >= 0 && conf->mirrors[disk].rdev) + if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL) atomic_inc(&conf->mirrors[disk].rdev->nr_pending); rcu_read_unlock(); @@ -583,7 +584,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { - mdk_rdev_t *rdev = conf->mirrors[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); @@ -614,7 +615,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { - mdk_rdev_t *rdev = conf->mirrors[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); if (rdev && !rdev->faulty) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -768,9 +769,10 @@ static int make_request(request_queue_t *q, struct bio * bio) rcu_read_lock(); for (i = 0; i < conf->copies; i++) { int d = r10_bio->devs[i].devnum; - if (conf->mirrors[d].rdev && - !conf->mirrors[d].rdev->faulty) { - atomic_inc(&conf->mirrors[d].rdev->nr_pending); + mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); + if (rdev && + !rdev->faulty) { + atomic_inc(&rdev->nr_pending); r10_bio->devs[i].bio = bio; } else r10_bio->devs[i].bio = NULL; @@ -980,7 +982,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) p->head_position = 0; rdev->raid_disk = mirror; found = 1; - p->rdev = rdev; + rcu_assign_pointer(p->rdev, rdev); break; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ce154553aca..061d265ed94 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1374,7 +1374,7 @@ static void handle_stripe(struct stripe_head *sh) bi->bi_end_io = raid5_end_read_request; rcu_read_lock(); - rdev = conf->disks[i].rdev; + rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && rdev->faulty) rdev = NULL; if (rdev) @@ -1448,7 +1448,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); @@ -1493,7 +1493,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !rdev->faulty) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -2165,7 +2165,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) found = 1; if (rdev->saved_raid_disk != disk) conf->fullsync = 1; - p->rdev = rdev; + rcu_assign_pointer(p->rdev, rdev); break; } print_raid5_conf(conf); diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 77578694770..84f3ee01e4c 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -1464,7 +1464,7 @@ static void handle_stripe(struct stripe_head *sh) bi->bi_end_io = raid6_end_read_request; rcu_read_lock(); - rdev = conf->disks[i].rdev; + rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && rdev->faulty) rdev = NULL; if (rdev) @@ -1538,7 +1538,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); @@ -1583,7 +1583,7 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { - mdk_rdev_t *rdev = conf->disks[i].rdev; + mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !rdev->faulty) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -2154,7 +2154,7 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) found = 1; if (rdev->saved_raid_disk != disk) conf->fullsync = 1; - p->rdev = rdev; + rcu_assign_pointer(p->rdev, rdev); break; } print_raid6_conf(conf); -- cgit v1.2.3 From 9c79197761b4c181a143dc6a6044f4e47d44bdcc Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:28 -0800 Subject: [PATCH] md: fix ref-counting problems with kobjects in md Thanks Greg. Cc: Greg KH Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 8 ++++++-- drivers/md/raid5.c | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index e58d61d9f31..fe0137a5b00 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -805,7 +805,11 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) if (fixdesc & (1<desc_nr)) { snprintf(rdev2->kobj.name, KOBJ_NAME_LEN, "dev%d", rdev2->desc_nr); + /* kobject_add gets a ref on the parent, so + * we have to drop the one we already have + */ kobject_add(&rdev2->kobj); + kobject_put(rdev->kobj.parent); sysfs_create_link(&rdev2->kobj, &rdev2->bdev->bd_disk->kobj, "block"); @@ -1178,7 +1182,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) rdev->kobj.k_name = NULL; snprintf(rdev->kobj.name, KOBJ_NAME_LEN, "dev%d", rdev->desc_nr); - rdev->kobj.parent = kobject_get(&mddev->kobj); + rdev->kobj.parent = &mddev->kobj; kobject_add(&rdev->kobj); sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block"); @@ -1864,7 +1868,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) add_disk(disk); mddev->gendisk = disk; up(&disks_sem); - mddev->kobj.parent = kobject_get(&disk->kobj); + mddev->kobj.parent = &disk->kobj; mddev->kobj.k_name = NULL; snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); mddev->kobj.ktype = &md_ktype; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 061d265ed94..246c9b1cc4a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1975,7 +1975,7 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + } /* Ok, everything is just fine now */ - conf->kobj.parent = kobject_get(&mddev->kobj); + conf->kobj.parent = &mddev->kobj; strcpy(conf->kobj.name, "raid5"); conf->kobj.ktype = &raid5_ktype; kobject_register(&conf->kobj); -- cgit v1.2.3 From 31399d9e56abeec4d819f07eefc97f30b5d5ed75 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:29 -0800 Subject: [PATCH] md: minor MD fixes 1/ Use reduce stack usage, because 'gcc' apparently doesn't overlay different variables that are in separate scopes... 2/ Use test_bit instead of ( .. & 1<< ..) which in this case is buggy. Thanks to Andrew Morton Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index fe0137a5b00..013f2f27589 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -698,6 +698,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) struct list_head *tmp; mdk_rdev_t *rdev2; int next_spare = mddev->raid_disks; + char nm[20]; /* make rdev->sb match mddev data.. * @@ -768,7 +769,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) fixdesc |= (1 << desc_nr); rdev2->desc_nr = desc_nr; if (rdev2->raid_disk >= 0) { - char nm[20]; sprintf(nm, "rd%d", rdev2->raid_disk); sysfs_remove_link(&mddev->kobj, nm); } @@ -814,7 +814,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) &rdev2->bdev->bd_disk->kobj, "block"); if (rdev2->raid_disk >= 0) { - char nm[20]; sprintf(nm, "rd%d", rdev2->raid_disk); sysfs_create_link(&mddev->kobj, &rdev2->kobj, nm); @@ -1722,9 +1721,9 @@ static ssize_t md_show_scan(mddev_t *mddev, char *page) { char *type = "none"; - if (mddev->recovery & - ((1<recovery & (1<recovery) || + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { + if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) type = "resync"; else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) @@ -1741,8 +1740,9 @@ static ssize_t md_store_scan(mddev_t *mddev, const char *page, size_t len) { int canscan=0; - if (mddev->recovery & - ((1<recovery) || + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; down(&mddev->reconfig_sem); if (mddev->pers && mddev->pers->sync_request) -- cgit v1.2.3 From 007583c9253fed363a0bd71b039e9b40a0f6855e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:30 -0800 Subject: [PATCH] md: change raid5 sysfs attribute to not create a new directory There isn't really a need for raid5 attributes to be an a subdirectory, so this patch moves them from /sys/block/mdX/md/raid5/attribute to /sys/block/mdX/md/attribute This suggests that all md personalities should co-operate about namespace usage, but that shouldn't be a problem. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 6 ----- drivers/md/raid5.c | 72 ++++++++++++------------------------------------------ 2 files changed, 15 insertions(+), 63 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 013f2f27589..3db5c351307 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1683,12 +1683,6 @@ static void analyze_sbs(mddev_t * mddev) } -struct md_sysfs_entry { - struct attribute attr; - ssize_t (*show)(mddev_t *, char *); - ssize_t (*store)(mddev_t *, const char *, size_t); -}; - static ssize_t md_show_level(mddev_t *mddev, char *page) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 246c9b1cc4a..08a1620b9f8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1732,21 +1732,17 @@ static void raid5d (mddev_t *mddev) PRINTK("--- raid5d inactive\n"); } -struct raid5_sysfs_entry { - struct attribute attr; - ssize_t (*show)(raid5_conf_t *, char *); - ssize_t (*store)(raid5_conf_t *, const char *, ssize_t); -}; - static ssize_t -raid5_show_stripe_cache_size(raid5_conf_t *conf, char *page) +raid5_show_stripe_cache_size(mddev_t *mddev, char *page) { + raid5_conf_t *conf = mddev_to_conf(mddev); return sprintf(page, "%d\n", conf->max_nr_stripes); } static ssize_t -raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len) +raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) { + raid5_conf_t *conf = mddev_to_conf(mddev); char *end; int new; if (len >= PAGE_SIZE) @@ -1770,68 +1766,33 @@ raid5_store_stripe_cache_size(raid5_conf_t *conf, const char *page, ssize_t len) } return len; } -static struct raid5_sysfs_entry raid5_stripecache_size = { + +static struct md_sysfs_entry raid5_stripecache_size = { .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR }, .show = raid5_show_stripe_cache_size, .store = raid5_store_stripe_cache_size, }; static ssize_t -raid5_show_stripe_cache_active(raid5_conf_t *conf, char *page) +raid5_show_stripe_cache_active(mddev_t *mddev, char *page) { + raid5_conf_t *conf = mddev_to_conf(mddev); return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); } -static struct raid5_sysfs_entry raid5_stripecache_active = { +static struct md_sysfs_entry raid5_stripecache_active = { .attr = {.name = "stripe_cache_active", .mode = S_IRUGO}, .show = raid5_show_stripe_cache_active, }; -static struct attribute *raid5_default_attrs[] = { +static struct attribute *raid5_attrs[] = { &raid5_stripecache_size.attr, &raid5_stripecache_active.attr, NULL, }; - -static ssize_t -raid5_attr_show(struct kobject *kobj, struct attribute *attr, char *page) -{ - struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); - raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); - - if (!entry->show) - return -EIO; - return entry->show(conf, page); -} - -static ssize_t -raid5_attr_store(struct kobject *kobj, struct attribute *attr, - const char *page, size_t length) -{ - struct raid5_sysfs_entry *entry = container_of(attr, struct raid5_sysfs_entry, attr); - raid5_conf_t *conf = container_of(kobj, raid5_conf_t, kobj); - - if (!entry->store) - return -EIO; - return entry->store(conf, page, length); -} - -static void raid5_free(struct kobject *ko) -{ - raid5_conf_t *conf = container_of(ko, raid5_conf_t, kobj); - kfree(conf); -} - - -static struct sysfs_ops raid5_sysfs_ops = { - .show = raid5_attr_show, - .store = raid5_attr_store, -}; - -static struct kobj_type raid5_ktype = { - .release = raid5_free, - .sysfs_ops = &raid5_sysfs_ops, - .default_attrs = raid5_default_attrs, +static struct attribute_group raid5_attrs_group = { + .name = NULL, + .attrs = raid5_attrs, }; static int run(mddev_t *mddev) @@ -1975,10 +1936,7 @@ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + } /* Ok, everything is just fine now */ - conf->kobj.parent = &mddev->kobj; - strcpy(conf->kobj.name, "raid5"); - conf->kobj.ktype = &raid5_ktype; - kobject_register(&conf->kobj); + sysfs_create_group(&mddev->kobj, &raid5_attrs_group); if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ; @@ -2012,7 +1970,7 @@ static int stop(mddev_t *mddev) shrink_stripes(conf); free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ - kobject_unregister(&conf->kobj); + sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); mddev->private = NULL; return 0; } -- cgit v1.2.3 From ba22dcbf106338a5c46d6979f9b19564faae3d49 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:31 -0800 Subject: [PATCH] md: improvements to raid5 handling of read errors Two refinements to the 'attempt-overwrite-on-read-error' mechanism. 1/ If the array is read-only, don't attempt an over-write. 2/ If there are more than max_nr_stripes read errors on a device with no success, fail the drive. This will make sure a dead drive will be eventually kicked even when we aren't trying to rewrite (which would normally kick a dead drive more quickly. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 1 + drivers/md/raid5.c | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 3db5c351307..3fb80397f8a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1582,6 +1582,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi rdev->in_sync = 0; rdev->data_offset = 0; atomic_set(&rdev->nr_pending, 0); + atomic_set(&rdev->read_errors, 0); size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; if (!size) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 08a1620b9f8..77610b98d4e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -420,21 +420,29 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } + if (atomic_read(&conf->disks[i].rdev->read_errors)) + atomic_set(&conf->disks[i].rdev->read_errors, 0); } else { + int retry = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); - if (conf->mddev->degraded) { + atomic_inc(&conf->disks[i].rdev->read_errors); + if (conf->mddev->degraded) printk("R5: read error not correctable.\n"); - clear_bit(R5_ReadError, &sh->dev[i].flags); - clear_bit(R5_ReWrite, &sh->dev[i].flags); - md_error(conf->mddev, conf->disks[i].rdev); - } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { + else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) /* Oh, no!!! */ printk("R5: read error NOT corrected!!\n"); + else if (atomic_read(&conf->disks[i].rdev->read_errors) + > conf->max_nr_stripes) + printk("raid5: Too many read errors, failing device.\n"); + else + retry = 1; + if (retry) + set_bit(R5_ReadError, &sh->dev[i].flags); + else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); md_error(conf->mddev, conf->disks[i].rdev); - } else - set_bit(R5_ReadError, &sh->dev[i].flags); + } } rdev_dec_pending(conf->disks[i].rdev, conf->mddev); #if 0 @@ -1328,7 +1336,8 @@ static void handle_stripe(struct stripe_head *sh) /* If the failed drive is just a ReadError, then we might need to progress * the repair/check process */ - if (failed == 1 && test_bit(R5_ReadError, &sh->dev[failed_num].flags) + if (failed == 1 && ! conf->mddev->ro && + test_bit(R5_ReadError, &sh->dev[failed_num].flags) && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags) && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags) ) { -- cgit v1.2.3 From b2d444d7ad975d555bb919601bcdc0e58975a40e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:31 -0800 Subject: [PATCH] md: convert 'faulty' and 'in_sync' fields to bits in 'flags' field This has the advantage of removing the confusion caused by 'rdev_t' and 'mddev_t' both having 'in_sync' fields. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 6 ++-- drivers/md/md.c | 92 ++++++++++++++++++++++++-------------------------- drivers/md/multipath.c | 23 +++++++------ drivers/md/raid1.c | 52 ++++++++++++++-------------- drivers/md/raid10.c | 41 +++++++++++----------- drivers/md/raid5.c | 36 ++++++++++---------- drivers/md/raid6main.c | 32 +++++++++--------- 7 files changed, 142 insertions(+), 140 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e59694bc575..c5fa4c2a5af 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -271,7 +271,8 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde return ERR_PTR(-ENOMEM); ITERATE_RDEV(mddev, rdev, tmp) { - if (! rdev->in_sync || rdev->faulty) + if (! test_bit(In_sync, &rdev->flags) + || test_bit(Faulty, &rdev->flags)) continue; target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512); @@ -291,7 +292,8 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai struct list_head *tmp; ITERATE_RDEV(mddev, rdev, tmp) - if (rdev->in_sync && !rdev->faulty) + if (test_bit(In_sync, &rdev->flags) + && !test_bit(Faulty, &rdev->flags)) md_super_write(mddev, rdev, (rdev->sb_offset<<1) + offset + page->index * (PAGE_SIZE/512), diff --git a/drivers/md/md.c b/drivers/md/md.c index 3fb80397f8a..9dfa063d1c6 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -610,7 +610,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); rdev->raid_disk = -1; - rdev->in_sync = 0; + rdev->flags = 0; if (mddev->raid_disks == 0) { mddev->major_version = 0; mddev->minor_version = sb->minor_version; @@ -671,21 +671,19 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) return 0; if (mddev->level != LEVEL_MULTIPATH) { - rdev->faulty = 0; - rdev->flags = 0; desc = sb->disks + rdev->desc_nr; if (desc->state & (1<faulty = 1; + set_bit(Faulty, &rdev->flags); else if (desc->state & (1<raid_disk < mddev->raid_disks) { - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); rdev->raid_disk = desc->raid_disk; } if (desc->state & (1<flags); } else /* MULTIPATH are always insync */ - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); return 0; } @@ -761,7 +759,8 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) ITERATE_RDEV(mddev,rdev2,tmp) { mdp_disk_t *d; int desc_nr; - if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) + if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) + && !test_bit(Faulty, &rdev2->flags)) desc_nr = rdev2->raid_disk; else desc_nr = next_spare++; @@ -780,14 +779,15 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) d->number = rdev2->desc_nr; d->major = MAJOR(rdev2->bdev->bd_dev); d->minor = MINOR(rdev2->bdev->bd_dev); - if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty) + if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) + && !test_bit(Faulty, &rdev2->flags)) d->raid_disk = rdev2->raid_disk; else d->raid_disk = rdev2->desc_nr; /* compatibility */ - if (rdev2->faulty) { + if (test_bit(Faulty, &rdev2->flags)) { d->state = (1<in_sync) { + } else if (test_bit(In_sync, &rdev2->flags)) { d->state = (1<state |= (1<sb_page); rdev->raid_disk = -1; - rdev->in_sync = 0; + rdev->flags = 0; if (mddev->raid_disks == 0) { mddev->major_version = 1; mddev->patch_version = 0; @@ -1027,22 +1027,19 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); switch(role) { case 0xffff: /* spare */ - rdev->faulty = 0; break; case 0xfffe: /* faulty */ - rdev->faulty = 1; + set_bit(Faulty, &rdev->flags); break; default: - rdev->in_sync = 1; - rdev->faulty = 0; + set_bit(In_sync, &rdev->flags); rdev->raid_disk = role; break; } - rdev->flags = 0; if (sb->devflags & WriteMostly1) set_bit(WriteMostly, &rdev->flags); } else /* MULTIPATH are always insync */ - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); return 0; } @@ -1086,9 +1083,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) ITERATE_RDEV(mddev,rdev2,tmp) { i = rdev2->desc_nr; - if (rdev2->faulty) + if (test_bit(Faulty, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(0xfffe); - else if (rdev2->in_sync) + else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else sb->dev_roles[i] = cpu_to_le16(0xffff); @@ -1327,7 +1324,8 @@ static void print_rdev(mdk_rdev_t *rdev) char b[BDEVNAME_SIZE]; printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", bdevname(rdev->bdev,b), (unsigned long long)rdev->size, - rdev->faulty, rdev->in_sync, rdev->desc_nr); + test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), + rdev->desc_nr); if (rdev->sb_loaded) { printk(KERN_INFO "md: rdev superblock:\n"); print_sb((mdp_super_t*)page_address(rdev->sb_page)); @@ -1421,11 +1419,11 @@ repeat: ITERATE_RDEV(mddev,rdev,tmp) { char b[BDEVNAME_SIZE]; dprintk(KERN_INFO "md: "); - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) dprintk("(skipping faulty "); dprintk("%s ", bdevname(rdev->bdev,b)); - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { md_super_write(mddev,rdev, rdev->sb_offset<<1, rdev->sb_size, rdev->sb_page); @@ -1466,15 +1464,16 @@ rdev_show_state(mdk_rdev_t *rdev, char *page) char *sep = ""; int len=0; - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { len+= sprintf(page+len, "%sfaulty",sep); sep = ","; } - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { len += sprintf(page+len, "%sin_sync",sep); sep = ","; } - if (!rdev->faulty && !rdev->in_sync) { + if (!test_bit(Faulty, &rdev->flags) && + !test_bit(In_sync, &rdev->flags)) { len += sprintf(page+len, "%sspare", sep); sep = ","; } @@ -1578,8 +1577,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi kobject_init(&rdev->kobj); rdev->desc_nr = -1; - rdev->faulty = 0; - rdev->in_sync = 0; + rdev->flags = 0; rdev->data_offset = 0; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); @@ -1670,7 +1668,7 @@ static void analyze_sbs(mddev_t * mddev) if (mddev->level == LEVEL_MULTIPATH) { rdev->desc_nr = i++; rdev->raid_disk = rdev->desc_nr; - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); } } @@ -1939,7 +1937,7 @@ static int do_md_run(mddev_t * mddev) /* devices must have minimum size of one chunk */ ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) continue; if (rdev->size < chunk_size / 1024) { printk(KERN_WARNING @@ -1967,7 +1965,7 @@ static int do_md_run(mddev_t * mddev) * Also find largest hardsector size */ ITERATE_RDEV(mddev,rdev,tmp) { - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); invalidate_bdev(rdev->bdev, 0); @@ -2304,7 +2302,7 @@ static int autostart_array(dev_t startdev) return err; } - if (start_rdev->faulty) { + if (test_bit(Faulty, &start_rdev->flags)) { printk(KERN_WARNING "md: can not autostart based on faulty %s!\n", bdevname(start_rdev->bdev,b)); @@ -2363,11 +2361,11 @@ static int get_array_info(mddev_t * mddev, void __user * arg) nr=working=active=failed=spare=0; ITERATE_RDEV(mddev,rdev,tmp) { nr++; - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) failed++; else { working++; - if (rdev->in_sync) + if (test_bit(In_sync, &rdev->flags)) active++; else spare++; @@ -2458,9 +2456,9 @@ static int get_disk_info(mddev_t * mddev, void __user * arg) info.minor = MINOR(rdev->bdev->bd_dev); info.raid_disk = rdev->raid_disk; info.state = 0; - if (rdev->faulty) + if (test_bit(Faulty, &rdev->flags)) info.state |= (1<in_sync) { + else if (test_bit(In_sync, &rdev->flags)) { info.state |= (1<saved_raid_disk = rdev->raid_disk; - rdev->in_sync = 0; /* just to be sure */ + clear_bit(In_sync, &rdev->flags); /* just to be sure */ if (info->state & (1<flags); @@ -2591,11 +2589,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) else rdev->raid_disk = -1; - rdev->faulty = 0; + rdev->flags = 0; + if (rdev->raid_disk < mddev->raid_disks) - rdev->in_sync = (info->state & (1<in_sync = 0; + if (info->state & (1<flags); if (info->state & (1<flags); @@ -2694,14 +2692,14 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) goto abort_export; } - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { printk(KERN_WARNING "md: can not hot-add faulty %s disk to %s!\n", bdevname(rdev->bdev,b), mdname(mddev)); err = -EINVAL; goto abort_export; } - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); rdev->desc_nr = -1; bind_rdev_to_array(rdev, mddev); @@ -3428,7 +3426,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) return; } - if (!rdev || rdev->faulty) + if (!rdev || test_bit(Faulty, &rdev->flags)) return; /* dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", @@ -3626,7 +3624,7 @@ static int md_seq_show(struct seq_file *seq, void *v) bdevname(rdev->bdev,b), rdev->desc_nr); if (test_bit(WriteMostly, &rdev->flags)) seq_printf(seq, "(W)"); - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { seq_printf(seq, "(F)"); continue; } else if (rdev->raid_disk < 0) @@ -4174,7 +4172,7 @@ void md_check_recovery(mddev_t *mddev) */ ITERATE_RDEV(mddev,rdev,rtmp) if (rdev->raid_disk >= 0 && - (rdev->faulty || ! rdev->in_sync) && + (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && atomic_read(&rdev->nr_pending)==0) { if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { char nm[20]; @@ -4187,7 +4185,7 @@ void md_check_recovery(mddev_t *mddev) if (mddev->degraded) { ITERATE_RDEV(mddev,rdev,rtmp) if (rdev->raid_disk < 0 - && !rdev->faulty) { + && !test_bit(Faulty, &rdev->flags)) { if (mddev->pers->hot_add_disk(mddev,rdev)) { char nm[20]; sprintf(nm, "rd%d", rdev->raid_disk); @@ -4347,7 +4345,7 @@ static void autostart_arrays(int part) if (IS_ERR(rdev)) continue; - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { MD_BUG(); continue; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index ae2c5fd6105..145cdc5ad00 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -64,7 +64,7 @@ static int multipath_map (multipath_conf_t *conf) rcu_read_lock(); for (i = 0; i < disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && rdev->in_sync) { + if (rdev && test_bit(In_sync, &rdev->flags)) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return i; @@ -140,7 +140,8 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) + && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -211,7 +212,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->multipaths[i].rdev && - conf->multipaths[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); } @@ -225,7 +226,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -265,10 +266,10 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) /* * Mark disk as unusable */ - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { char b[BDEVNAME_SIZE]; - rdev->in_sync = 0; - rdev->faulty = 1; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); mddev->sb_dirty = 1; conf->working_disks--; printk(KERN_ALERT "multipath: IO failure on %s," @@ -298,7 +299,7 @@ static void print_multipath_conf (multipath_conf_t *conf) tmp = conf->multipaths + i; if (tmp->rdev) printk(" disk%d, o:%d, dev:%s\n", - i,!tmp->rdev->faulty, + i,!test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -330,7 +331,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) conf->working_disks++; rdev->raid_disk = path; - rdev->in_sync = 1; + set_bit(In_sync, &rdev->flags); rcu_assign_pointer(p->rdev, rdev); found = 1; } @@ -350,7 +351,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number) rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number); err = -EBUSY; @@ -482,7 +483,7 @@ static int multipath_run (mddev_t *mddev) mddev->queue->max_sectors > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); - if (!rdev->faulty) + if (!test_bit(Faulty, &rdev->flags)) conf->working_disks++; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f12fc288f25..fb6b866c28f 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -417,11 +417,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) new_disk = 0; for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); - !rdev || !rdev->in_sync + !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { - if (rdev && rdev->in_sync) + if (rdev && test_bit(In_sync, &rdev->flags)) wonly_disk = new_disk; if (new_disk == conf->raid_disks - 1) { @@ -435,11 +435,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) /* make sure the disk is operational */ for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); - !rdev || !rdev->in_sync || + !rdev || !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags); rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { - if (rdev && rdev->in_sync) + if (rdev && test_bit(In_sync, &rdev->flags)) wonly_disk = new_disk; if (new_disk <= 0) @@ -477,7 +477,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) rdev = rcu_dereference(conf->mirrors[disk].rdev); if (!rdev || - !rdev->in_sync || + !test_bit(In_sync, &rdev->flags) || test_bit(WriteMostly, &rdev->flags)) continue; @@ -500,7 +500,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) if (!rdev) goto retry; atomic_inc(&rdev->nr_pending); - if (!rdev->in_sync) { + if (!test_bit(In_sync, &rdev->flags)) { /* cannot risk returning a device that failed * before we inc'ed nr_pending */ @@ -523,7 +523,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -557,7 +557,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -729,9 +729,9 @@ static int make_request(request_queue_t *q, struct bio * bio) rcu_read_lock(); for (i = 0; i < disks; i++) { if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL && - !rdev->faulty) { + !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); - if (rdev->faulty) { + if (test_bit(Faulty, &rdev->flags)) { atomic_dec(&rdev->nr_pending); r1_bio->bios[i] = NULL; } else @@ -824,7 +824,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf(seq, "%s", conf->mirrors[i].rdev && - conf->mirrors[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); seq_printf(seq, "]"); } @@ -840,14 +840,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * next level up know. * else mark the drive as failed */ - if (rdev->in_sync + if (test_bit(In_sync, &rdev->flags) && conf->working_disks == 1) /* * Don't fail the drive, act as though we were just a * normal single drive */ return; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { mddev->degraded++; conf->working_disks--; /* @@ -855,8 +855,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->in_sync = 0; - rdev->faulty = 1; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); mddev->sb_dirty = 1; printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n" " Operation continuing on %d devices\n", @@ -881,7 +881,7 @@ static void print_conf(conf_t *conf) tmp = conf->mirrors + i; if (tmp->rdev) printk(" disk %d, wo:%d, o:%d, dev:%s\n", - i, !tmp->rdev->in_sync, !tmp->rdev->faulty, + i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -913,11 +913,11 @@ static int raid1_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->mirrors + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { conf->working_disks++; mddev->degraded--; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } @@ -972,7 +972,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) print_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -1282,11 +1282,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* make sure disk is operational */ wonly = disk; while (conf->mirrors[disk].rdev == NULL || - !conf->mirrors[disk].rdev->in_sync || + !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) || test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags) ) { if (conf->mirrors[disk].rdev && - conf->mirrors[disk].rdev->in_sync) + test_bit(In_sync, &conf->mirrors[disk].rdev->flags)) wonly = disk; if (disk <= 0) disk = conf->raid_disks; @@ -1333,10 +1333,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i bio->bi_rw = READ; bio->bi_end_io = end_sync_read; } else if (conf->mirrors[i].rdev == NULL || - conf->mirrors[i].rdev->faulty) { + test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { still_degraded = 1; continue; - } else if (!conf->mirrors[i].rdev->in_sync || + } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || sector_nr + RESYNC_SECTORS > mddev->recovery_cp) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; @@ -1478,7 +1478,7 @@ static int run(mddev_t *mddev) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); disk->head_position = 0; - if (!rdev->faulty && rdev->in_sync) + if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags)) conf->working_disks++; } conf->raid_disks = mddev->raid_disks; @@ -1518,7 +1518,7 @@ static int run(mddev_t *mddev) */ for (j = 0; j < conf->raid_disks && (!conf->mirrors[j].rdev || - !conf->mirrors[j].rdev->in_sync) ; j++) + !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++) /* nothing */; conf->last_used = j; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 26114f40bde..867f06ae33d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -512,7 +512,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) disk = r10_bio->devs[slot].devnum; while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || - !rdev->in_sync) { + !test_bit(In_sync, &rdev->flags)) { slot++; if (slot == conf->copies) { slot = 0; @@ -529,7 +529,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) slot = 0; disk = r10_bio->devs[slot].devnum; while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || - !rdev->in_sync) { + !test_bit(In_sync, &rdev->flags)) { slot ++; if (slot == conf->copies) { disk = -1; @@ -549,7 +549,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || - !rdev->in_sync) + !test_bit(In_sync, &rdev->flags)) continue; if (!atomic_read(&rdev->nr_pending)) { @@ -585,7 +585,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -616,7 +616,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -771,7 +771,7 @@ static int make_request(request_queue_t *q, struct bio * bio) int d = r10_bio->devs[i].devnum; mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); if (rdev && - !rdev->faulty) { + !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r10_bio->devs[i].bio = bio; } else @@ -826,7 +826,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf(seq, "%s", conf->mirrors[i].rdev && - conf->mirrors[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_"); seq_printf(seq, "]"); } @@ -841,7 +841,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * next level up know. * else mark the drive as failed */ - if (rdev->in_sync + if (test_bit(In_sync, &rdev->flags) && conf->working_disks == 1) /* * Don't fail the drive, just return an IO error. @@ -851,7 +851,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * really dead" tests... */ return; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { mddev->degraded++; conf->working_disks--; /* @@ -859,8 +859,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->in_sync = 0; - rdev->faulty = 1; + clear_bit(In_sync, &rdev->flags); + set_bit(Faulty, &rdev->flags); mddev->sb_dirty = 1; printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n" " Operation continuing on %d devices\n", @@ -885,7 +885,8 @@ static void print_conf(conf_t *conf) tmp = conf->mirrors + i; if (tmp->rdev) printk(" disk %d, wo:%d, o:%d, dev:%s\n", - i, !tmp->rdev->in_sync, !tmp->rdev->faulty, + i, !test_bit(In_sync, &tmp->rdev->flags), + !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -938,11 +939,11 @@ static int raid10_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->mirrors + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { conf->working_disks++; mddev->degraded--; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } @@ -1000,7 +1001,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) print_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -1416,7 +1417,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (i=0 ; iraid_disks; i++) if (conf->mirrors[i].rdev && - !conf->mirrors[i].rdev->in_sync) { + !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { /* want to reconstruct this device */ r10bio_t *rb2 = r10_bio; @@ -1437,7 +1438,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i for (j=0; jcopies;j++) { int d = r10_bio->devs[j].devnum; if (conf->mirrors[d].rdev && - conf->mirrors[d].rdev->in_sync) { + test_bit(In_sync, &conf->mirrors[d].rdev->flags)) { /* This is where we read from */ bio = r10_bio->devs[0].bio; bio->bi_next = biolist; @@ -1513,7 +1514,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i bio = r10_bio->devs[i].bio; bio->bi_end_io = NULL; if (conf->mirrors[d].rdev == NULL || - conf->mirrors[d].rdev->faulty) + test_bit(Faulty, &conf->mirrors[d].rdev->flags)) continue; atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&r10_bio->remaining); @@ -1699,7 +1700,7 @@ static int run(mddev_t *mddev) mddev->queue->max_sectors = (PAGE_SIZE>>9); disk->head_position = 0; - if (!rdev->faulty && rdev->in_sync) + if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags)) conf->working_disks++; } conf->raid_disks = mddev->raid_disks; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77610b98d4e..d1c488b008a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -525,19 +525,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) raid5_conf_t *conf = (raid5_conf_t *) mddev->private; PRINTK("raid5: error called\n"); - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { mddev->sb_dirty = 1; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { conf->working_disks--; mddev->degraded++; conf->failed_disks++; - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); /* * if recovery was running, make sure it aborts. */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->faulty = 1; + set_bit(Faulty, &rdev->flags); printk (KERN_ALERT "raid5: Disk failure on %s, disabling device." " Operation continuing on %d devices\n", @@ -1003,12 +1003,12 @@ static void handle_stripe(struct stripe_head *sh) } if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ - if (!rdev || !rdev->in_sync) { + if (!rdev || !test_bit(In_sync, &rdev->flags)) { /* The ReadError flag wil just be confusing now */ clear_bit(R5_ReadError, &dev->flags); clear_bit(R5_ReWrite, &dev->flags); } - if (!rdev || !rdev->in_sync + if (!rdev || !test_bit(In_sync, &rdev->flags) || test_bit(R5_ReadError, &dev->flags)) { failed++; failed_num = i; @@ -1027,7 +1027,7 @@ static void handle_stripe(struct stripe_head *sh) if (test_bit(R5_ReadError, &sh->dev[i].flags)) { mdk_rdev_t *rdev = conf->disks[i].rdev; - if (rdev && rdev->in_sync) + if (rdev && test_bit(In_sync, &rdev->flags)) /* multiple read failures in one stripe */ md_error(conf->mddev, rdev); } @@ -1384,7 +1384,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_lock(); rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && rdev->faulty) + if (rdev && test_bit(Faulty, &rdev->flags)) rdev = NULL; if (rdev) atomic_inc(&rdev->nr_pending); @@ -1458,7 +1458,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -1503,7 +1503,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -1850,7 +1850,7 @@ static int run(mddev_t *mddev) disk->rdev = rdev; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "raid5: device %s operational as raid" " disk %d\n", bdevname(rdev->bdev,b), @@ -2029,7 +2029,7 @@ static void status (struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->disks[i].rdev && - conf->disks[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); #if RAID5_DEBUG #define D(x) \ @@ -2056,7 +2056,7 @@ static void print_raid5_conf (raid5_conf_t *conf) tmp = conf->disks + i; if (tmp->rdev) printk(" disk %d, o:%d, dev:%s\n", - i, !tmp->rdev->faulty, + i, !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -2070,12 +2070,12 @@ static int raid5_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->disks + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { mddev->degraded--; conf->failed_disks--; conf->working_disks++; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } print_raid5_conf(conf); @@ -2092,7 +2092,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number) print_raid5_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -2127,7 +2127,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ for (disk=0; disk < mddev->raid_disks; disk++) if ((p=conf->disks + disk)->rdev == NULL) { - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); rdev->raid_disk = disk; found = 1; if (rdev->saved_raid_disk != disk) diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index 84f3ee01e4c..eae5a35629c 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c @@ -507,19 +507,19 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) raid6_conf_t *conf = (raid6_conf_t *) mddev->private; PRINTK("raid6: error called\n"); - if (!rdev->faulty) { + if (!test_bit(Faulty, &rdev->flags)) { mddev->sb_dirty = 1; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { conf->working_disks--; mddev->degraded++; conf->failed_disks++; - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); /* * if recovery was running, make sure it aborts. */ set_bit(MD_RECOVERY_ERR, &mddev->recovery); } - rdev->faulty = 1; + set_bit(Faulty, &rdev->flags); printk (KERN_ALERT "raid6: Disk failure on %s, disabling device." " Operation continuing on %d devices\n", @@ -1071,7 +1071,7 @@ static void handle_stripe(struct stripe_head *sh) } if (dev->written) written++; rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ - if (!rdev || !rdev->in_sync) { + if (!rdev || !test_bit(In_sync, &rdev->flags)) { if ( failed < 2 ) failed_num[failed] = i; failed++; @@ -1465,7 +1465,7 @@ static void handle_stripe(struct stripe_head *sh) rcu_read_lock(); rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && rdev->faulty) + if (rdev && test_bit(Faulty, &rdev->flags)) rdev = NULL; if (rdev) atomic_inc(&rdev->nr_pending); @@ -1539,7 +1539,7 @@ static void unplug_slaves(mddev_t *mddev) rcu_read_lock(); for (i=0; iraid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { + if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); @@ -1584,7 +1584,7 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk, rcu_read_lock(); for (i=0; iraid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); - if (rdev && !rdev->faulty) { + if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); @@ -1868,7 +1868,7 @@ static int run(mddev_t *mddev) disk->rdev = rdev; - if (rdev->in_sync) { + if (test_bit(In_sync, &rdev->flags)) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "raid6: device %s operational as raid" " disk %d\n", bdevname(rdev->bdev,b), @@ -2052,7 +2052,7 @@ static void status (struct seq_file *seq, mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) seq_printf (seq, "%s", conf->disks[i].rdev && - conf->disks[i].rdev->in_sync ? "U" : "_"); + test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); seq_printf (seq, "]"); #if RAID6_DUMPSTATE seq_printf (seq, "\n"); @@ -2078,7 +2078,7 @@ static void print_raid6_conf (raid6_conf_t *conf) tmp = conf->disks + i; if (tmp->rdev) printk(" disk %d, o:%d, dev:%s\n", - i, !tmp->rdev->faulty, + i, !test_bit(Faulty, &tmp->rdev->flags), bdevname(tmp->rdev->bdev,b)); } } @@ -2092,12 +2092,12 @@ static int raid6_spare_active(mddev_t *mddev) for (i = 0; i < conf->raid_disks; i++) { tmp = conf->disks + i; if (tmp->rdev - && !tmp->rdev->faulty - && !tmp->rdev->in_sync) { + && !test_bit(Faulty, &tmp->rdev->flags) + && !test_bit(In_sync, &tmp->rdev->flags)) { mddev->degraded--; conf->failed_disks--; conf->working_disks++; - tmp->rdev->in_sync = 1; + set_bit(In_sync, &tmp->rdev->flags); } } print_raid6_conf(conf); @@ -2114,7 +2114,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number) print_raid6_conf(conf); rdev = p->rdev; if (rdev) { - if (rdev->in_sync || + if (test_bit(In_sync, &rdev->flags) || atomic_read(&rdev->nr_pending)) { err = -EBUSY; goto abort; @@ -2149,7 +2149,7 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) */ for (disk=0; disk < mddev->raid_disks; disk++) if ((p=conf->disks + disk)->rdev == NULL) { - rdev->in_sync = 0; + clear_bit(In_sync, &rdev->flags); rdev->raid_disk = disk; found = 1; if (rdev->saved_raid_disk != disk) -- cgit v1.2.3 From bd926c63b7a6843d3ce2728396c0891e54fce5c4 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:32 -0800 Subject: [PATCH] md: make md on-disk bitmaps not host-endian Current bitmaps use set_bit et.al and so are host-endian, which means not-portable. Oops. Define a new version number (4) for which bitmaps are little-endian. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 22 ++++++++++++++++++---- drivers/md/md.c | 2 +- 2 files changed, 19 insertions(+), 5 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index c5fa4c2a5af..220273e81ed 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -482,7 +482,8 @@ static int bitmap_read_sb(struct bitmap *bitmap) /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) reason = "bad magic"; - else if (sb->version != cpu_to_le32(BITMAP_MAJOR)) + else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || + le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) reason = "unrecognized superblock version"; else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) reason = "bitmap chunksize out of range (512B - 4MB)"; @@ -527,6 +528,8 @@ success: bitmap->daemon_lastrun = jiffies; bitmap->max_write_behind = write_behind; bitmap->flags |= sb->state; + if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) + bitmap->flags |= BITMAP_HOSTENDIAN; bitmap->events_cleared = le64_to_cpu(sb->events_cleared); if (sb->state & BITMAP_STALE) bitmap->events_cleared = bitmap->mddev->events; @@ -764,7 +767,10 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) /* set the bit */ kaddr = kmap_atomic(page, KM_USER0); - set_bit(bit, kaddr); + if (bitmap->flags & BITMAP_HOSTENDIAN) + set_bit(bit, kaddr); + else + ext2_set_bit(bit, kaddr); kunmap_atomic(kaddr, KM_USER0); PRINTK("set file bit %lu page %lu\n", bit, page->index); @@ -891,6 +897,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) oldindex = ~0L; for (i = 0; i < chunks; i++) { + int b; index = file_page_index(i); bit = file_page_offset(i); if (index != oldindex) { /* this is a new page, read it in */ @@ -939,7 +946,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) bitmap->filemap[bitmap->file_pages++] = page; } - if (test_bit(bit, page_address(page))) { + if (bitmap->flags & BITMAP_HOSTENDIAN) + b = test_bit(bit, page_address(page)); + else + b = ext2_test_bit(bit, page_address(page)); + if (b) { /* if the disk bit is set, set the memory bit */ bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), ((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start) @@ -1097,7 +1108,10 @@ int bitmap_daemon_work(struct bitmap *bitmap) -1); /* clear the bit */ - clear_bit(file_page_offset(j), page_address(page)); + if (bitmap->flags & BITMAP_HOSTENDIAN) + clear_bit(file_page_offset(j), page_address(page)); + else + ext2_clear_bit(file_page_offset(j), page_address(page)); } } spin_unlock_irqrestore(&bitmap->lock, flags); diff --git a/drivers/md/md.c b/drivers/md/md.c index 9dfa063d1c6..caa4add00c1 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4281,7 +4281,7 @@ static int __init md_init(void) " MD_SB_DISKS=%d\n", MD_MAJOR_VERSION, MD_MINOR_VERSION, MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); - printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR, + printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, BITMAP_MINOR); if (register_blkdev(MAJOR_NR, "md")) -- cgit v1.2.3 From a9701a30470856408d08657eb1bd7ae29a146190 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:34 -0800 Subject: [PATCH] md: support BIO_RW_BARRIER for md/raid1 We can only accept BARRIER requests if all slaves handle barriers, and that can, of course, change with time.... So we keep track of whether the whole array seems safe for barriers, and also whether each individual rdev handles barriers. We initially assumes barriers are OK. When writing the superblock we try a barrier, and if that fails, we flag things for no-barriers. This will usually clear the flags fairly quickly. If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to resubmit, so introduce function "md_super_wait" which waits for requests to finish, and retries ENOTSUPP requests without the barrier flag. When writing the real raid1, write requests which were BIO_RW_BARRIER but which aresn't supported need to be retried. So raid1d is enhanced to do this, and when any bio write completes (i.e. no retry needed) we remove it from the r1bio, so that devices needing retry are easy to find. We should hardly ever get -ENOTSUPP errors when writing data to the raid. It should only happen if: 1/ the device used to support BARRIER, but now doesn't. Few devices change like this, though raid1 can! or 2/ the array has no persistent superblock, so there was no opportunity to pre-test for barriers when writing the superblock. Signed-off-by: Neil Brown Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/bitmap.c | 5 +- drivers/md/md.c | 99 +++++++++++++++++++++++++++++++------- drivers/md/raid1.c | 134 ++++++++++++++++++++++++++++++++++++---------------- 3 files changed, 177 insertions(+), 61 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 220273e81ed..51315302a85 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -301,7 +301,7 @@ static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wai page); if (wait) - wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + md_super_wait(mddev); return 0; } @@ -828,8 +828,7 @@ int bitmap_unplug(struct bitmap *bitmap) wake_up_process(bitmap->writeback_daemon->tsk)); spin_unlock_irq(&bitmap->write_lock); } else - wait_event(bitmap->mddev->sb_wait, - atomic_read(&bitmap->mddev->pending_writes)==0); + md_super_wait(bitmap->mddev); } return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index caa4add00c1..199016932de 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -330,18 +330,46 @@ static void free_disk_sb(mdk_rdev_t * rdev) static int super_written(struct bio *bio, unsigned int bytes_done, int error) { mdk_rdev_t *rdev = bio->bi_private; + mddev_t *mddev = rdev->mddev; if (bio->bi_size) return 1; if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) - md_error(rdev->mddev, rdev); + md_error(mddev, rdev); - if (atomic_dec_and_test(&rdev->mddev->pending_writes)) - wake_up(&rdev->mddev->sb_wait); + if (atomic_dec_and_test(&mddev->pending_writes)) + wake_up(&mddev->sb_wait); bio_put(bio); return 0; } +static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) +{ + struct bio *bio2 = bio->bi_private; + mdk_rdev_t *rdev = bio2->bi_private; + mddev_t *mddev = rdev->mddev; + if (bio->bi_size) + return 1; + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && + error == -EOPNOTSUPP) { + unsigned long flags; + /* barriers don't appear to be supported :-( */ + set_bit(BarriersNotsupp, &rdev->flags); + mddev->barriers_work = 0; + spin_lock_irqsave(&mddev->write_lock, flags); + bio2->bi_next = mddev->biolist; + mddev->biolist = bio2; + spin_unlock_irqrestore(&mddev->write_lock, flags); + wake_up(&mddev->sb_wait); + bio_put(bio); + return 0; + } + bio_put(bio2); + bio->bi_private = rdev; + return super_written(bio, bytes_done, error); +} + void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page) { @@ -350,16 +378,54 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, * and decrement it on completion, waking up sb_wait * if zero is reached. * If an error occurred, call md_error + * + * As we might need to resubmit the request if BIO_RW_BARRIER + * causes ENOTSUPP, we allocate a spare bio... */ struct bio *bio = bio_alloc(GFP_NOIO, 1); + int rw = (1<bi_bdev = rdev->bdev; bio->bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; + bio->bi_rw = rw; + atomic_inc(&mddev->pending_writes); - submit_bio((1<flags)) { + struct bio *rbio; + rw |= (1<bi_private = bio; + rbio->bi_end_io = super_written_barrier; + submit_bio(rw, rbio); + } else + submit_bio(rw, bio); +} + +void md_super_wait(mddev_t *mddev) +{ + /* wait for all superblock writes that were scheduled to complete. + * if any had to be retried (due to BARRIER problems), retry them + */ + DEFINE_WAIT(wq); + for(;;) { + prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); + if (atomic_read(&mddev->pending_writes)==0) + break; + while (mddev->biolist) { + struct bio *bio; + spin_lock_irq(&mddev->write_lock); + bio = mddev->biolist; + mddev->biolist = bio->bi_next ; + bio->bi_next = NULL; + spin_unlock_irq(&mddev->write_lock); + submit_bio(bio->bi_rw, bio); + } + schedule(); + } + finish_wait(&mddev->sb_wait, &wq); } static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) @@ -1382,7 +1448,7 @@ static void md_update_sb(mddev_t * mddev) int sync_req; repeat: - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); sync_req = mddev->in_sync; mddev->utime = get_seconds(); mddev->events ++; @@ -1405,11 +1471,11 @@ repeat: */ if (!mddev->persistent) { mddev->sb_dirty = 0; - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); wake_up(&mddev->sb_wait); return; } - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); dprintk(KERN_INFO "md: updating %s RAID superblock on device (in sync %d)\n", @@ -1437,17 +1503,17 @@ repeat: /* only need to write one superblock... */ break; } - wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + md_super_wait(mddev); /* if there was a failure, sb_dirty was set to 1, and we re-write super */ - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { /* have to write it out again */ - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); goto repeat; } mddev->sb_dirty = 0; - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); wake_up(&mddev->sb_wait); } @@ -1989,6 +2055,7 @@ static int do_md_run(mddev_t * mddev) mddev->recovery = 0; mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ + mddev->barriers_work = 1; /* before we start the array running, initialise the bitmap */ err = bitmap_create(mddev); @@ -2107,7 +2174,7 @@ static int do_md_stop(mddev_t * mddev, int ro) mddev->ro = 1; } else { bitmap_flush(mddev); - wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + md_super_wait(mddev); if (mddev->ro) set_disk_ro(disk, 0); blk_queue_make_request(mddev->queue, md_fail_request); @@ -3796,13 +3863,13 @@ void md_write_start(mddev_t *mddev, struct bio *bi) atomic_inc(&mddev->writes_pending); if (mddev->in_sync) { - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); if (mddev->in_sync) { mddev->in_sync = 0; mddev->sb_dirty = 1; md_wakeup_thread(mddev->thread); } - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); } wait_event(mddev->sb_wait, mddev->sb_dirty==0); } @@ -4112,7 +4179,7 @@ void md_check_recovery(mddev_t *mddev) if (mddev_trylock(mddev)==0) { int spares =0; - spin_lock(&mddev->write_lock); + spin_lock_irq(&mddev->write_lock); if (mddev->safemode && !atomic_read(&mddev->writes_pending) && !mddev->in_sync && mddev->recovery_cp == MaxSector) { mddev->in_sync = 1; @@ -4120,7 +4187,7 @@ void md_check_recovery(mddev_t *mddev) } if (mddev->safemode == 1) mddev->safemode = 0; - spin_unlock(&mddev->write_lock); + spin_unlock_irq(&mddev->write_lock); if (mddev->sb_dirty) md_update_sb(mddev); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fb6b866c28f..1cbf51fbd43 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -301,7 +301,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); - int mirror, behind; + int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); conf_t *conf = mddev_to_conf(r1_bio->mddev); if (bio->bi_size) @@ -311,47 +311,54 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int if (r1_bio->bios[mirror] == bio) break; - /* - * this branch is our 'one mirror IO has finished' event handler: - */ - if (!uptodate) { - md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); - /* an I/O failed, we can't clear the bitmap */ - set_bit(R1BIO_Degraded, &r1_bio->state); - } else + if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { + set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); + set_bit(R1BIO_BarrierRetry, &r1_bio->state); + r1_bio->mddev->barriers_work = 0; + } else { /* - * Set R1BIO_Uptodate in our master bio, so that - * we will return a good error code for to the higher - * levels even if IO on some other mirrored buffer fails. - * - * The 'master' represents the composite IO operation to - * user-side. So if something waits for IO, then it will - * wait for the 'master' bio. + * this branch is our 'one mirror IO has finished' event handler: */ - set_bit(R1BIO_Uptodate, &r1_bio->state); - - update_head_pos(mirror, r1_bio); - - behind = test_bit(R1BIO_BehindIO, &r1_bio->state); - if (behind) { - if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) - atomic_dec(&r1_bio->behind_remaining); - - /* In behind mode, we ACK the master bio once the I/O has safely - * reached all non-writemostly disks. Setting the Returned bit - * ensures that this gets done only once -- we don't ever want to - * return -EIO here, instead we'll wait */ - - if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && - test_bit(R1BIO_Uptodate, &r1_bio->state)) { - /* Maybe we can return now */ - if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { - struct bio *mbio = r1_bio->master_bio; - PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", - (unsigned long long) mbio->bi_sector, - (unsigned long long) mbio->bi_sector + - (mbio->bi_size >> 9) - 1); - bio_endio(mbio, mbio->bi_size, 0); + r1_bio->bios[mirror] = NULL; + bio_put(bio); + if (!uptodate) { + md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); + /* an I/O failed, we can't clear the bitmap */ + set_bit(R1BIO_Degraded, &r1_bio->state); + } else + /* + * Set R1BIO_Uptodate in our master bio, so that + * we will return a good error code for to the higher + * levels even if IO on some other mirrored buffer fails. + * + * The 'master' represents the composite IO operation to + * user-side. So if something waits for IO, then it will + * wait for the 'master' bio. + */ + set_bit(R1BIO_Uptodate, &r1_bio->state); + + update_head_pos(mirror, r1_bio); + + if (behind) { + if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) + atomic_dec(&r1_bio->behind_remaining); + + /* In behind mode, we ACK the master bio once the I/O has safely + * reached all non-writemostly disks. Setting the Returned bit + * ensures that this gets done only once -- we don't ever want to + * return -EIO here, instead we'll wait */ + + if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && + test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* Maybe we can return now */ + if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { + struct bio *mbio = r1_bio->master_bio; + PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", + (unsigned long long) mbio->bi_sector, + (unsigned long long) mbio->bi_sector + + (mbio->bi_size >> 9) - 1); + bio_endio(mbio, mbio->bi_size, 0); + } } } } @@ -361,8 +368,16 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int * already. */ if (atomic_dec_and_test(&r1_bio->remaining)) { + if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { + reschedule_retry(r1_bio); + /* Don't dec_pending yet, we want to hold + * the reference over the retry + */ + return 0; + } if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { /* free extra copy of the data pages */ +/* FIXME bio has been freed!!! */ int i = bio->bi_vcnt; while (i--) __free_page(bio->bi_io_vec[i].bv_page); @@ -648,8 +663,9 @@ static int make_request(request_queue_t *q, struct bio * bio) struct bio_list bl; struct page **behind_pages = NULL; const int rw = bio_data_dir(bio); + int do_barriers; - if (unlikely(bio_barrier(bio))) { + if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { bio_endio(bio, bio->bi_size, -EOPNOTSUPP); return 0; } @@ -759,6 +775,10 @@ static int make_request(request_queue_t *q, struct bio * bio) atomic_set(&r1_bio->remaining, 0); atomic_set(&r1_bio->behind_remaining, 0); + do_barriers = bio->bi_rw & BIO_RW_BARRIER; + if (do_barriers) + set_bit(R1BIO_Barrier, &r1_bio->state); + bio_list_init(&bl); for (i = 0; i < disks; i++) { struct bio *mbio; @@ -771,7 +791,7 @@ static int make_request(request_queue_t *q, struct bio * bio) mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE; + mbio->bi_rw = WRITE | do_barriers; mbio->bi_private = r1_bio; if (behind_pages) { @@ -1153,6 +1173,36 @@ static void raid1d(mddev_t *mddev) if (test_bit(R1BIO_IsSync, &r1_bio->state)) { sync_request_write(mddev, r1_bio); unplug = 1; + } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { + /* some requests in the r1bio were BIO_RW_BARRIER + * requests which failed with -ENOTSUPP. Hohumm.. + * Better resubmit without the barrier. + * We know which devices to resubmit for, because + * all others have had their bios[] entry cleared. + */ + int i; + clear_bit(R1BIO_BarrierRetry, &r1_bio->state); + clear_bit(R1BIO_Barrier, &r1_bio->state); + for (i=0; i < conf->raid_disks; i++) + if (r1_bio->bios[i]) { + struct bio_vec *bvec; + int j; + + bio = bio_clone(r1_bio->master_bio, GFP_NOIO); + /* copy pages from the failed bio, as + * this might be a write-behind device */ + __bio_for_each_segment(bvec, bio, j, 0) + bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page; + bio_put(r1_bio->bios[i]); + bio->bi_sector = r1_bio->sector + + conf->mirrors[i].rdev->data_offset; + bio->bi_bdev = conf->mirrors[i].rdev->bdev; + bio->bi_end_io = raid1_end_write_request; + bio->bi_rw = WRITE; + bio->bi_private = r1_bio; + r1_bio->bios[i] = bio; + generic_make_request(bio); + } } else { int disk; bio = r1_bio->bios[r1_bio->read_disk]; -- cgit v1.2.3 From 19133a4298223422742db7b5f940e8c54c6a1846 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:35 -0800 Subject: [PATCH] md: Remove attempt to use dynamic names in sysfs for component devices on an MD array. With version-0.90 superblock, component devices on an md device to not have any stable name related to the array -(version-1 assigns a fixed index when a device is added to an array, and this remains despit any hot-swap). The intial code for making these devices appear in sysfs used dynamic names, which would change whenever a hot-spare was swapped for a failed or missing device. This turns out not to be practical in sysfs for a number of reasons. This patch changes then naming of component devices to be based on the result of 'bdevname'. This is stable and should be unique. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 40 ++++++---------------------------------- 1 file changed, 6 insertions(+), 34 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 199016932de..8238c1c0ec0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -762,7 +762,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) struct list_head *tmp; mdk_rdev_t *rdev2; int next_spare = mddev->raid_disks; - char nm[20]; + /* make rdev->sb match mddev data.. * @@ -776,7 +776,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) */ int i; int active=0, working=0,failed=0,spare=0,nr_disks=0; - unsigned int fixdesc=0; rdev->sb_size = MD_SB_BYTES; @@ -830,16 +829,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) desc_nr = rdev2->raid_disk; else desc_nr = next_spare++; - if (desc_nr != rdev2->desc_nr) { - fixdesc |= (1 << desc_nr); - rdev2->desc_nr = desc_nr; - if (rdev2->raid_disk >= 0) { - sprintf(nm, "rd%d", rdev2->raid_disk); - sysfs_remove_link(&mddev->kobj, nm); - } - sysfs_remove_link(&rdev2->kobj, "block"); - kobject_del(&rdev2->kobj); - } + rdev2->desc_nr = desc_nr; d = &sb->disks[rdev2->desc_nr]; nr_disks++; d->number = rdev2->desc_nr; @@ -866,25 +856,6 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) if (test_bit(WriteMostly, &rdev2->flags)) d->state |= (1<desc_nr)) { - snprintf(rdev2->kobj.name, KOBJ_NAME_LEN, "dev%d", - rdev2->desc_nr); - /* kobject_add gets a ref on the parent, so - * we have to drop the one we already have - */ - kobject_add(&rdev2->kobj); - kobject_put(rdev->kobj.parent); - sysfs_create_link(&rdev2->kobj, - &rdev2->bdev->bd_disk->kobj, - "block"); - if (rdev2->raid_disk >= 0) { - sprintf(nm, "rd%d", rdev2->raid_disk); - sysfs_create_link(&mddev->kobj, - &rdev2->kobj, nm); - } - } /* now set the "removed" and "faulty" bits on any missing devices */ for (i=0 ; i < mddev->raid_disks ; i++) { mdp_disk_t *d = &sb->disks[i]; @@ -1237,13 +1208,14 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) if (find_rdev_nr(mddev, rdev->desc_nr)) return -EBUSY; } + bdevname(rdev->bdev,b); + if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) + return -ENOMEM; list_add(&rdev->same_set, &mddev->disks); rdev->mddev = mddev; - printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b)); + printk(KERN_INFO "md: bind<%s>\n", b); - rdev->kobj.k_name = NULL; - snprintf(rdev->kobj.name, KOBJ_NAME_LEN, "dev%d", rdev->desc_nr); rdev->kobj.parent = &mddev->kobj; kobject_add(&rdev->kobj); -- cgit v1.2.3 From f91de92ed6bfb70a3ff607558c910c7bf34d45e9 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:36 -0800 Subject: [PATCH] md: allow md arrays to be started read-only (module parameter). When an md array is started, the superblock will be written, and resync may commense. This is not good if you want to be completely read-only as, for example, when preparing to resume from a suspend-to-disk image. So introduce a module parameter "start_ro" which can be set to '1' at boot, at module load, or via /sys/module/md_mod/parameters/start_ro When this is set, new arrays get an 'auto-ro' mode, which disables all internal io (superblock updates, resync, recovery) and is automatically switched to 'rw' when the first write request arrives. The array can be set to true 'ro' mode using 'mdadm -r' before the first write request, or resync can be started without a write using 'mdadm -w'. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 8 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 8238c1c0ec0..d002b8301fc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -131,6 +131,8 @@ static ctl_table raid_root_table[] = { static struct block_device_operations md_fops; +static int start_readonly; + /* * Enables to iterate over all existing md arrays * all_mddevs_lock protects this list. @@ -2029,6 +2031,9 @@ static int do_md_run(mddev_t * mddev) mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ mddev->barriers_work = 1; + if (start_readonly) + mddev->ro = 2; /* read-only, but switch on first write */ + /* before we start the array running, initialise the bitmap */ err = bitmap_create(mddev); if (err) @@ -2141,7 +2146,7 @@ static int do_md_stop(mddev_t * mddev, int ro) if (ro) { err = -ENXIO; - if (mddev->ro) + if (mddev->ro==1) goto out; mddev->ro = 1; } else { @@ -3258,12 +3263,22 @@ static int md_ioctl(struct inode *inode, struct file *file, /* * The remaining ioctls are changing the state of the - * superblock, so we do not allow read-only arrays - * here: + * superblock, so we do not allow them on read-only arrays. + * However non-MD ioctls (e.g. get-size) will still come through + * here and hit the 'default' below, so only disallow + * 'md' ioctls, and switch to rw mode if started auto-readonly. */ - if (mddev->ro) { - err = -EROFS; - goto abort_unlock; + if (_IOC_TYPE(cmd) == MD_MAJOR && + mddev->ro && mddev->pers) { + if (mddev->ro == 2) { + mddev->ro = 0; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + + } else { + err = -EROFS; + goto abort_unlock; + } } switch (cmd) @@ -3651,8 +3666,10 @@ static int md_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%s : %sactive", mdname(mddev), mddev->pers ? "" : "in"); if (mddev->pers) { - if (mddev->ro) + if (mddev->ro==1) seq_printf(seq, " (read-only)"); + if (mddev->ro==2) + seq_printf(seq, "(auto-read-only)"); seq_printf(seq, " %s", mddev->pers->name); } @@ -3696,7 +3713,9 @@ static int md_seq_show(struct seq_file *seq, void *v) status_resync (seq, mddev); seq_printf(seq, "\n "); } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) - seq_printf(seq, " resync=DELAYED\n "); + seq_printf(seq, "\tresync=DELAYED\n "); + else if (mddev->recovery_cp < MaxSector) + seq_printf(seq, "\tresync=PENDING\n "); } else seq_printf(seq, "\n "); @@ -3833,6 +3852,13 @@ void md_write_start(mddev_t *mddev, struct bio *bi) if (bio_data_dir(bi) != WRITE) return; + BUG_ON(mddev->ro == 1); + if (mddev->ro == 2) { + /* need to switch to read/write */ + mddev->ro = 0; + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } atomic_inc(&mddev->writes_pending); if (mddev->in_sync) { spin_lock_irq(&mddev->write_lock); @@ -4431,6 +4457,23 @@ static __exit void md_exit(void) module_init(md_init) module_exit(md_exit) +static int get_ro(char *buffer, struct kernel_param *kp) +{ + return sprintf(buffer, "%d", start_readonly); +} +static int set_ro(const char *val, struct kernel_param *kp) +{ + char *e; + int num = simple_strtoul(val, &e, 10); + if (*val && (*e == '\0' || *e == '\n')) { + start_readonly = num; + return 0;; + } + return -EINVAL; +} + +module_param_call(start_ro, set_ro, get_ro, NULL, 0600); + EXPORT_SYMBOL(register_md_personality); EXPORT_SYMBOL(unregister_md_personality); EXPORT_SYMBOL(md_error); -- cgit v1.2.3 From f637b9f9fc195e4f4635faf495fd8b462c21b411 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:37 -0800 Subject: [PATCH] md: make sure /block link in /sys/.../md/ goes to correct devices If a block_device is a partition, then it's kobject is bdev->bd_part->kobj otherwise (if it is a full device), the kobject is bdev->bd_disk->kobj As md wants back-links to the correct object (whether partition or not), we need to respect this difference... (Thus current code shows a link to the whole device, whether we are using a partition or not, which is wrong). Signed-off-by: Neil Brown Acked-by: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index d002b8301fc..2b592897609 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1182,6 +1182,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) { mdk_rdev_t *same_pdev; char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; + struct kobject *ko; if (rdev->mddev) { MD_BUG(); @@ -1221,7 +1222,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) rdev->kobj.parent = &mddev->kobj; kobject_add(&rdev->kobj); - sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block"); + if (rdev->bdev->bd_part) + ko = &rdev->bdev->bd_part->kobj; + else + ko = &rdev->bdev->bd_disk->kobj; + sysfs_create_link(&rdev->kobj, ko, "block"); return 0; } -- cgit v1.2.3 From e5de485f00d6e6c15f487869faaf999c708f69b1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:38 -0800 Subject: [PATCH] md: make manual repair work for raid1 Raid1 currently optimises resync using the intent bitmap etc. This optimisation is not wanted when we explicitly request a repair through sysfs, so add appropriate checks. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid1.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1cbf51fbd43..2da9d3ba902 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1310,7 +1310,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i * This call the bitmap_start_sync doesn't actually record anything */ if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && - !conf->fullsync) { + !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* We can skip this block, and probably several more */ *skipped = 1; return sync_blocks; @@ -1387,7 +1387,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i still_degraded = 1; continue; } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || - sector_nr + RESYNC_SECTORS > mddev->recovery_cp) { + sector_nr + RESYNC_SECTORS > mddev->recovery_cp || + test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { bio->bi_rw = WRITE; bio->bi_end_io = end_sync_write; write_targets ++; @@ -1421,8 +1422,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i break; if (sync_blocks == 0) { if (!bitmap_start_sync(mddev->bitmap, sector_nr, - &sync_blocks, still_degraded) && - !conf->fullsync) + &sync_blocks, still_degraded) && + !conf->fullsync && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) break; if (sync_blocks < (PAGE_SIZE>>9)) BUG(); -- cgit v1.2.3 From 3855ad9f398de88a3290f7dd799633c4b73903ea Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:38 -0800 Subject: [PATCH] md: make sure a user-request sync of raid5 ignores intent bitmap A sync of raid5 usually ignore blocks which the bitmap says are in-sync. But a user-request check or repair should not ignore these. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/raid5.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/md') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d1c488b008a..51003b008de 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1645,6 +1645,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i return rv; } if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { /* we can skip this block, and probably more */ sync_blocks /= STRIPE_SECTORS; -- cgit v1.2.3 From 96de1e663cda65ba9275afb1bc007f34e5068ba1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:39 -0800 Subject: [PATCH] md: fix some locking and module refcounting issues with md's use of sysfs 1/ I really should be using the __ATTR macros for defining attributes, so that the .owner field get set properly, otherwise modules can be removed while sysfs files are open. This also involves some name changes of _show routines. 2/ Always lock the mddev (against reconfiguration) for all sysfs attribute access. This easily avoid certain races and is completely consistant with other interfaces (ioctl and /proc/mdstat both always lock against reconfiguration). 3/ raid5 attributes must check that the 'conf' structure actually exists (the array could have been stopped while an attribute file was open). 4/ A missing 'kfree' from when the raid5_conf_t was converted to have a kobject embedded, and then converted back again. Signed-off-by: Neil Brown Acked-by: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 62 ++++++++++++++++++++++++------------------------------ drivers/md/raid5.c | 30 +++++++++++++++----------- 2 files changed, 46 insertions(+), 46 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 2b592897609..b2d8813ed71 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1504,7 +1504,7 @@ struct rdev_sysfs_entry { }; static ssize_t -rdev_show_state(mdk_rdev_t *rdev, char *page) +state_show(mdk_rdev_t *rdev, char *page) { char *sep = ""; int len=0; @@ -1525,13 +1525,11 @@ rdev_show_state(mdk_rdev_t *rdev, char *page) return len+sprintf(page+len, "\n"); } -static struct rdev_sysfs_entry rdev_state = { - .attr = {.name = "state", .mode = S_IRUGO }, - .show = rdev_show_state, -}; +static struct rdev_sysfs_entry +rdev_state = __ATTR_RO(state); static ssize_t -rdev_show_super(mdk_rdev_t *rdev, char *page) +super_show(mdk_rdev_t *rdev, char *page) { if (rdev->sb_loaded && rdev->sb_size) { memcpy(page, page_address(rdev->sb_page), rdev->sb_size); @@ -1539,10 +1537,8 @@ rdev_show_super(mdk_rdev_t *rdev, char *page) } else return 0; } -static struct rdev_sysfs_entry rdev_super = { - .attr = {.name = "super", .mode = S_IRUGO }, - .show = rdev_show_super, -}; +static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); + static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_super.attr, @@ -1728,7 +1724,7 @@ static void analyze_sbs(mddev_t * mddev) } static ssize_t -md_show_level(mddev_t *mddev, char *page) +level_show(mddev_t *mddev, char *page) { mdk_personality_t *p = mddev->pers; if (p == NULL) @@ -1739,21 +1735,15 @@ md_show_level(mddev_t *mddev, char *page) return sprintf(page, "%s\n", p->name); } -static struct md_sysfs_entry md_level = { - .attr = {.name = "level", .mode = S_IRUGO }, - .show = md_show_level, -}; +static struct md_sysfs_entry md_level = __ATTR_RO(level); static ssize_t -md_show_rdisks(mddev_t *mddev, char *page) +raid_disks_show(mddev_t *mddev, char *page) { return sprintf(page, "%d\n", mddev->raid_disks); } -static struct md_sysfs_entry md_raid_disks = { - .attr = {.name = "raid_disks", .mode = S_IRUGO }, - .show = md_show_rdisks, -}; +static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); static ssize_t md_show_scan(mddev_t *mddev, char *page) @@ -1782,10 +1772,10 @@ md_store_scan(mddev_t *mddev, const char *page, size_t len) if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; - down(&mddev->reconfig_sem); + if (mddev->pers && mddev->pers->sync_request) canscan=1; - up(&mddev->reconfig_sem); + if (!canscan) return -EINVAL; @@ -1801,22 +1791,18 @@ md_store_scan(mddev_t *mddev, const char *page, size_t len) } static ssize_t -md_show_mismatch(mddev_t *mddev, char *page) +mismatch_cnt_show(mddev_t *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long) mddev->resync_mismatches); } -static struct md_sysfs_entry md_scan_mode = { - .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR }, - .show = md_show_scan, - .store = md_store_scan, -}; +static struct md_sysfs_entry +md_scan_mode = __ATTR(scan_mode, S_IRUGO|S_IWUSR, md_show_scan, md_store_scan); -static struct md_sysfs_entry md_mismatches = { - .attr = {.name = "mismatch_cnt", .mode = S_IRUGO }, - .show = md_show_mismatch, -}; + +static struct md_sysfs_entry +md_mismatches = __ATTR_RO(mismatch_cnt); static struct attribute *md_default_attrs[] = { &md_level.attr, @@ -1831,10 +1817,14 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + ssize_t rv; if (!entry->show) return -EIO; - return entry->show(mddev, page); + mddev_lock(mddev); + rv = entry->show(mddev, page); + mddev_unlock(mddev); + return rv; } static ssize_t @@ -1843,10 +1833,14 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, { struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); + ssize_t rv; if (!entry->store) return -EIO; - return entry->store(mddev, page, length); + mddev_lock(mddev); + rv = entry->store(mddev, page, length); + mddev_unlock(mddev); + return rv; } static void md_free(struct kobject *ko) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 51003b008de..e2a40283e32 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1746,7 +1746,10 @@ static ssize_t raid5_show_stripe_cache_size(mddev_t *mddev, char *page) { raid5_conf_t *conf = mddev_to_conf(mddev); - return sprintf(page, "%d\n", conf->max_nr_stripes); + if (conf) + return sprintf(page, "%d\n", conf->max_nr_stripes); + else + return 0; } static ssize_t @@ -1757,6 +1760,8 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) int new; if (len >= PAGE_SIZE) return -EINVAL; + if (!conf) + return -ENODEV; new = simple_strtoul(page, &end, 10); if (!*page || (*end && *end != '\n') ) @@ -1777,23 +1782,23 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) return len; } -static struct md_sysfs_entry raid5_stripecache_size = { - .attr = {.name = "stripe_cache_size", .mode = S_IRUGO | S_IWUSR }, - .show = raid5_show_stripe_cache_size, - .store = raid5_store_stripe_cache_size, -}; +static struct md_sysfs_entry +raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, + raid5_show_stripe_cache_size, + raid5_store_stripe_cache_size); static ssize_t -raid5_show_stripe_cache_active(mddev_t *mddev, char *page) +stripe_cache_active_show(mddev_t *mddev, char *page) { raid5_conf_t *conf = mddev_to_conf(mddev); - return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); + if (conf) + return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); + else + return 0; } -static struct md_sysfs_entry raid5_stripecache_active = { - .attr = {.name = "stripe_cache_active", .mode = S_IRUGO}, - .show = raid5_show_stripe_cache_active, -}; +static struct md_sysfs_entry +raid5_stripecache_active = __ATTR_RO(stripe_cache_active); static struct attribute *raid5_attrs[] = { &raid5_stripecache_size.attr, @@ -1981,6 +1986,7 @@ static int stop(mddev_t *mddev) free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); + kfree(conf); mddev->private = NULL; return 0; } -- cgit v1.2.3 From 411036fa1924f5e5b0f7f9d04ae5d8cdc72fb839 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:40 -0800 Subject: [PATCH] md: split off some md attributes in sysfs to a separate group Some, but not all, md array support data redundancy and hence support checking and restoring that redundancy (resync, rebuild). Some attributes apply specifically to functions involving this redundancy, and so should only appear for md arrays for which they are meaningful. i.e. they should not appear for raid0, linear, multpath, faulty. This patch separates these into a distinct group and creates the group only if the personality supports sync_request. Signed-off-by: Neil Brown Acked-by: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index b2d8813ed71..292dad31d5e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1807,10 +1807,19 @@ md_mismatches = __ATTR_RO(mismatch_cnt); static struct attribute *md_default_attrs[] = { &md_level.attr, &md_raid_disks.attr, + NULL, +}; + +static struct attribute *md_redundancy_attrs[] = { &md_scan_mode.attr, &md_mismatches.attr, NULL, }; +static struct attribute_group md_redundancy_group = { + .name = NULL, + .attrs = md_redundancy_attrs, +}; + static ssize_t md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) @@ -2047,6 +2056,8 @@ static int do_md_run(mddev_t * mddev) bitmap_destroy(mddev); return err; } + if (mddev->pers->sync_request) + sysfs_create_group(&mddev->kobj, &md_redundancy_group); atomic_set(&mddev->writes_pending,0); mddev->safemode = 0; mddev->safemode_timer.function = md_safemode_timeout; @@ -2155,6 +2166,9 @@ static int do_md_stop(mddev_t * mddev, int ro) set_disk_ro(disk, 0); blk_queue_make_request(mddev->queue, md_fail_request); mddev->pers->stop(mddev); + if (mddev->pers->sync_request) + sysfs_remove_group(&mddev->kobj, &md_redundancy_group); + module_put(mddev->pers->owner); mddev->pers = NULL; if (mddev->ro) -- cgit v1.2.3 From 8e1b39d623359e5ef7983a2bd0fb676be45cba31 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:41 -0800 Subject: [PATCH] md: only try to print recovery/resync status for personalities that support recovery The introduction of 'resync=PENDING' (for read-only devices) caused that message to appear for non-syncable arrays like raid0 and linear. Simplest thing is to not try to print any resync info unless the personality clearly supports it. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 292dad31d5e..47b8685d4bd 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3722,13 +3722,15 @@ static int md_seq_show(struct seq_file *seq, void *v) if (mddev->pers) { mddev->pers->status (seq, mddev); seq_printf(seq, "\n "); - if (mddev->curr_resync > 2) { - status_resync (seq, mddev); - seq_printf(seq, "\n "); - } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) - seq_printf(seq, "\tresync=DELAYED\n "); - else if (mddev->recovery_cp < MaxSector) - seq_printf(seq, "\tresync=PENDING\n "); + if (mddev->pers->sync_request) { + if (mddev->curr_resync > 2) { + status_resync (seq, mddev); + seq_printf(seq, "\n "); + } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) + seq_printf(seq, "\tresync=DELAYED\n "); + else if (mddev->recovery_cp < MaxSector) + seq_printf(seq, "\tresync=PENDING\n "); + } } else seq_printf(seq, "\n "); -- cgit v1.2.3 From fd9d49cac46f5758d513ccf831b599dd4412546f Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:42 -0800 Subject: [PATCH] md: ignore auto-readonly flag for arrays where it isn't meaningful The 'auto-readonly' flag (which suppresses resync and superblock updates until the first write) is not meaningful for personalities that don't support resync or superblock writes (raid0, linear, etc). So clear the setting early to avoid it confusing anything - e.g. appearing in /proc/mdstat Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 47b8685d4bd..25f2bbfe6a2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2058,6 +2058,9 @@ static int do_md_run(mddev_t * mddev) } if (mddev->pers->sync_request) sysfs_create_group(&mddev->kobj, &md_redundancy_group); + else if (mddev->ro == 2) /* auto-readonly not meaningful */ + mddev->ro = 0; + atomic_set(&mddev->writes_pending,0); mddev->safemode = 0; mddev->safemode_timer.function = md_safemode_timeout; -- cgit v1.2.3 From 787453c2397edcc3261efebb661739acd8c38547 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:43 -0800 Subject: [PATCH] md: complete conversion of md to use kthreads There are a few loose ends following the conversion of md to use kthreads: - Some fields in mdk_thread_t that aren't needed (kthreads does it's own completion and manages it's own name). - thread->run is now never NULL, so no need to check - Some tests for signal_pending that aren't needed (As we don't use signals to stop threads any more) - Some flush_signals are not needed - Some waits are interruptible and don't need to be. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 25f2bbfe6a2..097ae1b5484 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3424,21 +3424,17 @@ static int md_thread(void * arg) */ allow_signal(SIGKILL); - complete(thread->event); while (!kthread_should_stop()) { - void (*run)(mddev_t *); - wait_event_interruptible_timeout(thread->wqueue, - test_bit(THREAD_WAKEUP, &thread->flags) - || kthread_should_stop(), - thread->timeout); + wait_event_timeout(thread->wqueue, + test_bit(THREAD_WAKEUP, &thread->flags) + || kthread_should_stop(), + thread->timeout); try_to_freeze(); clear_bit(THREAD_WAKEUP, &thread->flags); - run = thread->run; - if (run) - run(thread->mddev); + thread->run(thread->mddev); } return 0; @@ -3457,7 +3453,6 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, const char *name) { mdk_thread_t *thread; - struct completion event; thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL); if (!thread) @@ -3466,18 +3461,14 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, memset(thread, 0, sizeof(mdk_thread_t)); init_waitqueue_head(&thread->wqueue); - init_completion(&event); - thread->event = &event; thread->run = run; thread->mddev = mddev; - thread->name = name; thread->timeout = MAX_SCHEDULE_TIMEOUT; thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; } - wait_for_completion(&event); return thread; } @@ -3941,9 +3932,7 @@ static void md_do_sync(mddev_t *mddev) mddev->curr_resync = 2; try_again: - if (signal_pending(current) || - kthread_should_stop()) { - flush_signals(current); + if (kthread_should_stop()) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); goto skip; } @@ -3963,9 +3952,8 @@ static void md_do_sync(mddev_t *mddev) * time 'round when curr_resync == 2 */ continue; - prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); - if (!signal_pending(current) && - !kthread_should_stop() && + prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); + if (!kthread_should_stop() && mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying resync of %s" " until %s has finished resync (they" @@ -4074,13 +4062,12 @@ static void md_do_sync(mddev_t *mddev) } - if (signal_pending(current) || kthread_should_stop()) { + if (kthread_should_stop()) { /* * got a signal, exit. */ printk(KERN_INFO "md: md_do_sync() got signal ... exiting\n"); - flush_signals(current); set_bit(MD_RECOVERY_INTR, &mddev->recovery); goto out; } @@ -4102,7 +4089,7 @@ static void md_do_sync(mddev_t *mddev) if (currspeed > sysctl_speed_limit_min) { if ((currspeed > sysctl_speed_limit_max) || !is_mddev_idle(mddev)) { - msleep_interruptible(250); + msleep(250); goto repeat; } } -- cgit v1.2.3 From 7eec314d7512d5281742263cff3853b43df431db Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:44 -0800 Subject: [PATCH] md: improve 'scan_mode' and rename it to 'sync_action' The current sync_action for an array can be one of idle - nothing happening resync - reduncancy being recalcualted recover - missing device being recoverred to spare check - user initiated check of redundancy repair - like resync but user-initiated and ignores bitmap optimisation. Each of these strings can also be written to the 'sync_action' file to cause that action to happen (if appropriate). While 'sync' is not technically correct, as a recovery is *not* a 'sync', I think it is the most servicable word here. Also 'action' is a strong word than 'mode'. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 097ae1b5484..023aecd0295 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1746,9 +1746,9 @@ raid_disks_show(mddev_t *mddev, char *page) static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); static ssize_t -md_show_scan(mddev_t *mddev, char *page) +action_show(mddev_t *mddev, char *page) { - char *type = "none"; + char *type = "idle"; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { @@ -1765,27 +1765,36 @@ md_show_scan(mddev_t *mddev, char *page) } static ssize_t -md_store_scan(mddev_t *mddev, const char *page, size_t len) +action_store(mddev_t *mddev, const char *page, size_t len) { - int canscan=0; + if (!mddev->pers || !mddev->pers->sync_request) + return -EINVAL; + + if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) { + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_unregister_thread(mddev->sync_thread); + mddev->sync_thread = NULL; + mddev->recovery = 0; + } + return len; + } if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; - - if (mddev->pers && mddev->pers->sync_request) - canscan=1; - - if (!canscan) - return -EINVAL; - - if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) - set_bit(MD_RECOVERY_CHECK, &mddev->recovery); - else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) - return -EINVAL; - set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); - set_bit(MD_RECOVERY_SYNC, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 || + strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0) + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + else { + if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) + set_bit(MD_RECOVERY_CHECK, &mddev->recovery); + else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) + return -EINVAL; + set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); + set_bit(MD_RECOVERY_SYNC, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + } md_wakeup_thread(mddev->thread); return len; } @@ -1798,7 +1807,7 @@ mismatch_cnt_show(mddev_t *mddev, char *page) } static struct md_sysfs_entry -md_scan_mode = __ATTR(scan_mode, S_IRUGO|S_IWUSR, md_show_scan, md_store_scan); +md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); static struct md_sysfs_entry -- cgit v1.2.3 From bb636547b02411ca5eef87b1d030ea3fc090a717 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 8 Nov 2005 21:39:45 -0800 Subject: [PATCH] md: document sysfs usage of md, and make a couple of small refinements Document in Documentation/md.txt the files that now appear in sysfs, and make a couple of small refinements to exactly when 'level' and 'raid_disks' are empty, to make it match the documentation. Signed-off-by: Neil Brown Acked-by: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index 023aecd0295..adf960d8a7c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1727,7 +1727,7 @@ static ssize_t level_show(mddev_t *mddev, char *page) { mdk_personality_t *p = mddev->pers; - if (p == NULL) + if (p == NULL && mddev->raid_disks == 0) return 0; if (mddev->level >= 0) return sprintf(page, "RAID-%d\n", mddev->level); @@ -1740,6 +1740,8 @@ static struct md_sysfs_entry md_level = __ATTR_RO(level); static ssize_t raid_disks_show(mddev_t *mddev, char *page) { + if (mddev->raid_disks == 0) + return 0; return sprintf(page, "%d\n", mddev->raid_disks); } -- cgit v1.2.3 From e8a0033451f7972169b2f375be34d9d805ad8687 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 15 Nov 2005 00:09:11 -0800 Subject: [PATCH] md: mark START_ARRAY deprecated with a date This was marked deprecated "after 2.6" back in the 2.5 days. But now it seems there isn't going to be any "after 2.6", and we deprecate by date now. So set a date. Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index adf960d8a7c..a9f032e341c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3156,7 +3156,7 @@ static int md_ioctl(struct inode *inode, struct file *file, if (cnt > 0 ) { printk(KERN_WARNING "md: %s(pid %d) used deprecated START_ARRAY ioctl. " - "This will not be supported beyond 2.6\n", + "This will not be supported beyond July 2006\n", current->comm, current->pid); cnt--; } -- cgit v1.2.3 From 93588e2284b6be1873cc0bb7fbf0947bdbf72830 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 15 Nov 2005 00:09:12 -0800 Subject: [PATCH] md: make md threads interruptible again Despite the fact that md threads don't need to be signalled, and won't respond to signals anyway, we need to have an 'interruptible' wait, else they stay in 'D' state and add to the load average. (akpm: the signal_pending() test is unneeded - we'll fix that up in the next round. For now, leave it there because that's how the code used to be). Signed-off-by: Neil Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/md/md.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index a9f032e341c..f3fed662f32 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3437,10 +3437,19 @@ static int md_thread(void * arg) allow_signal(SIGKILL); while (!kthread_should_stop()) { - wait_event_timeout(thread->wqueue, - test_bit(THREAD_WAKEUP, &thread->flags) - || kthread_should_stop(), - thread->timeout); + /* We need to wait INTERRUPTIBLE so that + * we don't add to the load-average. + * That means we need to be sure no signals are + * pending + */ + if (signal_pending(current)) + flush_signals(current); + + wait_event_interruptible_timeout + (thread->wqueue, + test_bit(THREAD_WAKEUP, &thread->flags) + || kthread_should_stop(), + thread->timeout); try_to_freeze(); clear_bit(THREAD_WAKEUP, &thread->flags); -- cgit v1.2.3