aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c46
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c2
-rw-r--r--block/ll_rw_blk.c6
4 files changed, 32 insertions, 29 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index b6491c020f2..f92ba2a869b 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -532,6 +532,12 @@ static void cfq_add_rq_rb(struct request *rq)
if (!cfq_cfqq_on_rr(cfqq))
cfq_add_cfqq_rr(cfqd, cfqq);
+
+ /*
+ * check if this request is a better next-serve candidate
+ */
+ cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
+ BUG_ON(!cfqq->next_rq);
}
static inline void
@@ -986,9 +992,9 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* expire an async queue immediately if it has used up its slice. idle
* queue always expire after 1 dispatch round.
*/
- if ((!cfq_cfqq_sync(cfqq) &&
+ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq)) {
+ cfq_class_idle(cfqq))) {
cfqq->slice_end = jiffies + 1;
cfq_slice_expired(cfqd, 0, 0);
}
@@ -1051,19 +1057,21 @@ cfq_dispatch_requests(request_queue_t *q, int force)
while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
int max_dispatch;
- /*
- * Don't repeat dispatch from the previous queue.
- */
- if (prev_cfqq == cfqq)
- break;
+ if (cfqd->busy_queues > 1) {
+ /*
+ * Don't repeat dispatch from the previous queue.
+ */
+ if (prev_cfqq == cfqq)
+ break;
- /*
- * So we have dispatched before in this round, if the
- * next queue has idling enabled (must be sync), don't
- * allow it service until the previous have continued.
- */
- if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
- break;
+ /*
+ * So we have dispatched before in this round, if the
+ * next queue has idling enabled (must be sync), don't
+ * allow it service until the previous have continued.
+ */
+ if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
+ break;
+ }
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
@@ -1370,7 +1378,9 @@ retry:
atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (key != CFQ_KEY_ASYNC)
+ cfq_mark_cfqq_idle_window(cfqq);
+
cfq_mark_cfqq_prio_changed(cfqq);
cfq_mark_cfqq_queue_new(cfqq);
cfq_init_prio_data(cfqq);
@@ -1635,12 +1645,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->meta_pending++;
/*
- * check if this request is a better next-serve candidate)) {
- */
- cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
- BUG_ON(!cfqq->next_rq);
-
- /*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
diff --git a/block/elevator.c b/block/elevator.c
index 25f6ef28e3b..96a00c82274 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -964,17 +964,18 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
+ char *def = "";
spin_lock_irq(&elv_list_lock);
BUG_ON(elevator_find(e->elevator_name));
list_add_tail(&e->list, &elv_list);
spin_unlock_irq(&elv_list_lock);
- printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
if (!strcmp(e->elevator_name, chosen_elevator) ||
(!*chosen_elevator &&
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
- printk(" (default)");
- printk("\n");
+ def = " (default)";
+
+ printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def);
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
diff --git a/block/genhd.c b/block/genhd.c
index 050a1f0f3a8..441432a142f 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -62,8 +62,6 @@ int register_blkdev(unsigned int major, const char *name)
/* temporary */
if (major == 0) {
for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
- if (is_lanana_major(index))
- continue;
if (major_names[index] == NULL)
break;
}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 38c293b987b..3de06953ac3 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1221,7 +1221,7 @@ void blk_recount_segments(request_queue_t *q, struct bio *bio)
* considered part of another segment, since that might
* change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
if (high || highprv)
goto new_hw_segment;
if (cluster) {
@@ -3658,8 +3658,8 @@ int __init blk_dev_init(void)
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
register_hotcpu_notifier(&blk_cpu_notifier);
- blk_max_low_pfn = max_low_pfn;
- blk_max_pfn = max_pfn;
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
return 0;
}