aboutsummaryrefslogtreecommitdiff
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c327
1 files changed, 190 insertions, 137 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fd7080ed793..aa1e9535e35 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
-static DEFINE_PER_CPU(unsigned long, ioc_count);
+static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);
@@ -134,13 +134,8 @@ struct cfq_data {
struct rb_root prio_trees[CFQ_PRIO_LISTS];
unsigned int busy_queues;
- /*
- * Used to track any pending rt requests so we can pre-empt current
- * non-RT cfqq in service when this value is non-zero.
- */
- unsigned int busy_rt_queues;
- int rq_in_driver;
+ int rq_in_driver[2];
int sync_flight;
/*
@@ -178,6 +173,7 @@ struct cfq_data {
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
+ unsigned int cfq_latency;
struct list_head cic_list;
@@ -185,13 +181,14 @@ struct cfq_data {
* Fallback dummy cfqq for extreme OOM conditions
*/
struct cfq_queue oom_cfqq;
+
+ unsigned long last_end_sync_rq;
};
enum cfqq_state_flags {
CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
- CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
@@ -199,6 +196,7 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
+ CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
};
#define CFQ_CFQQ_FNS(name) \
@@ -218,7 +216,6 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_dispatch);
-CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
@@ -226,6 +223,7 @@ CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
+CFQ_CFQQ_FNS(coop_preempt);
#undef CFQ_CFQQ_FNS
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -234,33 +232,35 @@ CFQ_CFQQ_FNS(coop);
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
struct io_context *, gfp_t);
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
struct io_context *);
+static inline int rq_in_driver(struct cfq_data *cfqd)
+{
+ return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
+}
+
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
- int is_sync)
+ bool is_sync)
{
- return cic->cfqq[!!is_sync];
+ return cic->cfqq[is_sync];
}
static inline void cic_set_cfqq(struct cfq_io_context *cic,
- struct cfq_queue *cfqq, int is_sync)
+ struct cfq_queue *cfqq, bool is_sync)
{
- cic->cfqq[!!is_sync] = cfqq;
+ cic->cfqq[is_sync] = cfqq;
}
/*
* We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE).
*/
-static inline int cfq_bio_sync(struct bio *bio)
+static inline bool cfq_bio_sync(struct bio *bio)
{
- if (bio_data_dir(bio) == READ || bio_sync(bio))
- return 1;
-
- return 0;
+ return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
}
/*
@@ -287,7 +287,7 @@ static int cfq_queue_empty(struct request_queue *q)
* if a queue is marked sync and has sync io queued. A sync queue with async
* io only, should not get full sync slice length.
*/
-static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
+static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
unsigned short prio)
{
const int base_slice = cfqd->cfq_slice[sync];
@@ -315,7 +315,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* isn't valid until the first request from the dispatch is activated
* and the slice time set.
*/
-static inline int cfq_slice_used(struct cfq_queue *cfqq)
+static inline bool cfq_slice_used(struct cfq_queue *cfqq)
{
if (cfq_cfqq_slice_new(cfqq))
return 0;
@@ -490,7 +490,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
* we will service the queues.
*/
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- int add_front)
+ bool add_front)
{
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
@@ -506,11 +506,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
} else
rb_key += jiffies;
} else if (!add_front) {
+ /*
+ * Get our rb key offset. Subtract any residual slice
+ * value carried from last service. A negative resid
+ * count indicates slice overrun, and this should position
+ * the next service time further away in the tree.
+ */
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
- rb_key += cfqq->slice_resid;
+ rb_key -= cfqq->slice_resid;
cfqq->slice_resid = 0;
- } else
- rb_key = 0;
+ } else {
+ rb_key = -HZ;
+ __cfqq = cfq_rb_first(&cfqd->service_tree);
+ rb_key += __cfqq ? __cfqq->rb_key : jiffies;
+ }
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
/*
@@ -544,7 +553,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
n = &(*p)->rb_left;
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
n = &(*p)->rb_right;
- else if (rb_key < __cfqq->rb_key)
+ else if (time_before(rb_key, __cfqq->rb_key))
n = &(*p)->rb_left;
else
n = &(*p)->rb_right;
@@ -648,8 +657,6 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
- if (cfq_class_rt(cfqq))
- cfqd->busy_rt_queues++;
cfq_resort_rr_list(cfqd, cfqq);
}
@@ -673,8 +680,6 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
- if (cfq_class_rt(cfqq))
- cfqd->busy_rt_queues--;
}
/*
@@ -760,9 +765,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
- cfqd->rq_in_driver++;
+ cfqd->rq_in_driver[rq_is_sync(rq)]++;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
- cfqd->rq_in_driver);
+ rq_in_driver(cfqd));
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
@@ -770,11 +775,12 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
+ WARN_ON(!cfqd->rq_in_driver[sync]);
+ cfqd->rq_in_driver[sync]--;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
- cfqd->rq_in_driver);
+ rq_in_driver(cfqd));
}
static void cfq_remove_request(struct request *rq)
@@ -827,8 +833,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(next->start_time, rq->start_time))
+ time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
cfq_remove_request(next);
}
@@ -844,7 +852,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
* Disallow merge of a sync bio into an async request.
*/
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
- return 0;
+ return false;
/*
* Lookup the cfqq that this bio will be queued with. Allow
@@ -852,13 +860,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
*/
cic = cfq_cic_lookup(cfqd, current->io_context);
if (!cic)
- return 0;
+ return false;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
- if (cfqq == RQ_CFQQ(rq))
- return 1;
-
- return 0;
+ return cfqq == RQ_CFQQ(rq);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@@ -886,7 +891,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
*/
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- int timed_out)
+ bool timed_out)
{
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
@@ -914,7 +919,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
-static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
{
struct cfq_queue *cfqq = cfqd->active_queue;
@@ -942,10 +947,13 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
{
if (!cfqq) {
cfqq = cfq_get_next_queue(cfqd);
- if (cfqq)
+ if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
cfq_clear_cfqq_coop(cfqq);
}
+ if (cfqq)
+ cfq_clear_cfqq_coop_preempt(cfqq);
+
__cfq_set_active_queue(cfqd, cfqq);
return cfqq;
}
@@ -1026,7 +1034,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
*/
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
struct cfq_queue *cur_cfqq,
- int probe)
+ bool probe)
{
struct cfq_queue *cfqq;
@@ -1080,7 +1088,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
/*
* still requests with the driver, don't idle
*/
- if (cfqd->rq_in_driver)
+ if (rq_in_driver(cfqd))
return;
/*
@@ -1090,6 +1098,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
if (!cic || !atomic_read(&cic->ioc->nr_tasks))
return;
+ /*
+ * If our average think time is larger than the remaining time
+ * slice, then don't idle. This avoids overrunning the allotted
+ * time slice.
+ */
+ if (sample_valid(cic->ttime_samples) &&
+ (cfqq->slice_end - jiffies < cic->ttime_mean))
+ return;
+
cfq_mark_cfqq_wait_request(cfqq);
/*
@@ -1115,6 +1132,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
+ cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
cfq_remove_request(rq);
cfqq->dispatched++;
elv_dispatch_sort(q, rq);
@@ -1128,9 +1146,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
*/
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
{
- struct cfq_data *cfqd = cfqq->cfqd;
- struct request *rq;
- int fifo;
+ struct request *rq = NULL;
if (cfq_cfqq_fifo_expire(cfqq))
return NULL;
@@ -1140,13 +1156,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (list_empty(&cfqq->fifo))
return NULL;
- fifo = cfq_cfqq_sync(cfqq);
rq = rq_entry_fifo(cfqq->fifo.next);
-
- if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+ if (time_before(jiffies, rq_fifo_time(rq)))
rq = NULL;
- cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+ cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
@@ -1179,20 +1193,6 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
goto expire;
/*
- * If we have a RT cfqq waiting, then we pre-empt the current non-rt
- * cfqq.
- */
- if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
- /*
- * We simulate this as cfqq timed out so that it gets to bank
- * the remaining of its time slice.
- */
- cfq_log_cfqq(cfqd, cfqq, "preempt");
- cfq_slice_expired(cfqd, 1);
- goto new_queue;
- }
-
- /*
* The active queue has requests and isn't expired, allow it to
* dispatch.
*/
@@ -1261,16 +1261,83 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
+static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ unsigned int max_dispatch;
+
+ /*
+ * Drain async requests before we start sync IO
+ */
+ if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
+ return false;
+
+ /*
+ * If this is an async queue and we have sync IO in flight, let it wait
+ */
+ if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+ return false;
+
+ max_dispatch = cfqd->cfq_quantum;
+ if (cfq_class_idle(cfqq))
+ max_dispatch = 1;
+
+ /*
+ * Does this cfqq already have too much IO in flight?
+ */
+ if (cfqq->dispatched >= max_dispatch) {
+ /*
+ * idle queue must always only have a single IO in flight
+ */
+ if (cfq_class_idle(cfqq))
+ return false;
+
+ /*
+ * We have other queues, don't allow more IO from this one
+ */
+ if (cfqd->busy_queues > 1)
+ return false;
+
+ /*
+ * Sole queue user, allow bigger slice
+ */
+ max_dispatch *= 4;
+ }
+
+ /*
+ * Async queues must wait a bit before being allowed dispatch.
+ * We also ramp up the dispatch depth gradually for async IO,
+ * based on the last sync IO we serviced
+ */
+ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
+ unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+ unsigned int depth;
+
+ depth = last_sync / cfqd->cfq_slice[1];
+ if (!depth && !cfqq->dispatched)
+ depth = 1;
+ if (depth < max_dispatch)
+ max_dispatch = depth;
+ }
+
+ /*
+ * If we're below the current max, allow a dispatch
+ */
+ return cfqq->dispatched < max_dispatch;
+}
+
/*
* Dispatch a request from cfqq, moving them to the request queue
* dispatch list.
*/
-static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct request *rq;
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+ if (!cfq_may_dispatch(cfqd, cfqq))
+ return false;
+
/*
* follow expired path, else get first next available
*/
@@ -1289,6 +1356,8 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic;
}
+
+ return true;
}
/*
@@ -1299,7 +1368,6 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
- unsigned int max_dispatch;
if (!cfqd->busy_queues)
return 0;
@@ -1312,42 +1380,11 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
return 0;
/*
- * If this is an async queue and we have sync IO in flight, let it wait
+ * Dispatch a request from this cfqq, if it is allowed
*/
- if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+ if (!cfq_dispatch_request(cfqd, cfqq))
return 0;
- max_dispatch = cfqd->cfq_quantum;
- if (cfq_class_idle(cfqq))
- max_dispatch = 1;
-
- /*
- * Does this cfqq already have too much IO in flight?
- */
- if (cfqq->dispatched >= max_dispatch) {
- /*
- * idle queue must always only have a single IO in flight
- */
- if (cfq_class_idle(cfqq))
- return 0;
-
- /*
- * We have other queues, don't allow more IO from this one
- */
- if (cfqd->busy_queues > 1)
- return 0;
-
- /*
- * we are the only queue, allow up to 4 times of 'quantum'
- */
- if (cfqq->dispatched >= 4 * max_dispatch)
- return 0;
- }
-
- /*
- * Dispatch a request from this cfqq
- */
- cfq_dispatch_request(cfqd, cfqq);
cfqq->slice_dispatch++;
cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1362,7 +1399,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
cfq_slice_expired(cfqd, 0);
}
- cfq_log(cfqd, "dispatched a request");
+ cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
return 1;
}
@@ -1427,7 +1464,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
cic = container_of(head, struct cfq_io_context, rcu_head);
kmem_cache_free(cfq_ioc_pool, cic);
- elv_ioc_count_dec(ioc_count);
+ elv_ioc_count_dec(cfq_ioc_count);
if (ioc_gone) {
/*
@@ -1436,7 +1473,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
* complete ioc_gone and set it back to NULL
*/
spin_lock(&ioc_gone_lock);
- if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+ if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
complete(ioc_gone);
ioc_gone = NULL;
}
@@ -1562,7 +1599,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
INIT_HLIST_NODE(&cic->cic_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- elv_ioc_count_inc(ioc_count);
+ elv_ioc_count_inc(cfq_ioc_count);
}
return cic;
@@ -1647,7 +1684,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- pid_t pid, int is_sync)
+ pid_t pid, bool is_sync)
{
RB_CLEAR_NODE(&cfqq->rb_node);
RB_CLEAR_NODE(&cfqq->p_node);
@@ -1667,7 +1704,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
static struct cfq_queue *
-cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, gfp_t gfp_mask)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
@@ -1731,7 +1768,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(ioc);
@@ -1963,10 +2000,13 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
- (cfqd->hw_tag && CIC_SEEKY(cic)))
+ (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
- if (cic->ttime_mean > cfqd->cfq_slice_idle)
+ unsigned int slice_idle = cfqd->cfq_slice_idle;
+ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
+ slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
+ if (cic->ttime_mean > slice_idle)
enable_idle = 0;
else
enable_idle = 1;
@@ -1985,7 +2025,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* Check if new_cfqq should preempt the currently active queue. Return 0 for
* no or if we aren't sure, a 1 will cause a preempt.
*/
-static int
+static bool
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
struct request *rq)
{
@@ -1993,48 +2033,56 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
cfqq = cfqd->active_queue;
if (!cfqq)
- return 0;
+ return false;
if (cfq_slice_used(cfqq))
- return 1;
+ return true;
if (cfq_class_idle(new_cfqq))
- return 0;
+ return false;
if (cfq_class_idle(cfqq))
- return 1;
+ return true;
/*
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
- return 1;
+ return true;
/*
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
if (rq_is_meta(rq) && !cfqq->meta_pending)
- return 1;
+ return true;
/*
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
*/
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
- return 1;
+ return true;
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
- return 0;
+ return false;
/*
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
- if (cfq_rq_close(cfqd, rq))
- return 1;
+ if (cfq_rq_close(cfqd, rq) && (!cfq_cfqq_coop(new_cfqq) ||
+ cfqd->busy_queues == 1)) {
+ /*
+ * Mark new queue coop_preempt, so its coop flag will not be
+ * cleared when new queue gets scheduled at the very first time
+ */
+ cfq_mark_cfqq_coop_preempt(new_cfqq);
+ cfq_mark_cfqq_coop(new_cfqq);
+ return true;
+ }
- return 0;
+ return false;
}
/*
@@ -2119,6 +2167,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_add_rq_rb(rq);
+ rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
@@ -2130,11 +2179,11 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
*/
static void cfq_update_hw_tag(struct cfq_data *cfqd)
{
- if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
- cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
+ if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
+ cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
- cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
+ rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
return;
if (cfqd->hw_tag_samples++ < 50)
@@ -2161,16 +2210,18 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_update_hw_tag(cfqd);
- WARN_ON(!cfqd->rq_in_driver);
+ WARN_ON(!cfqd->rq_in_driver[sync]);
WARN_ON(!cfqq->dispatched);
- cfqd->rq_in_driver--;
+ cfqd->rq_in_driver[sync]--;
cfqq->dispatched--;
if (cfq_cfqq_sync(cfqq))
cfqd->sync_flight--;
- if (sync)
+ if (sync) {
RQ_CIC(rq)->last_end_request = now;
+ cfqd->last_end_sync_rq = now;
+ }
/*
* If this is the active queue, check if it needs to be expired,
@@ -2197,7 +2248,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
cfq_arm_slice_timer(cfqd);
}
- if (!cfqd->rq_in_driver)
+ if (!rq_in_driver(cfqd))
cfq_schedule_dispatch(cfqd);
}
@@ -2229,8 +2280,7 @@ static void cfq_prio_boost(struct cfq_queue *cfqq)
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
{
- if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
- !cfq_cfqq_must_alloc_slice(cfqq)) {
+ if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
cfq_mark_cfqq_must_alloc_slice(cfqq);
return ELV_MQUEUE_MUST;
}
@@ -2297,7 +2347,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
- const int is_sync = rq_is_sync(rq);
+ const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
unsigned long flags;
@@ -2317,7 +2367,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
}
cfqq->allocated[rw]++;
- cfq_clear_cfqq_must_alloc(cfqq);
atomic_inc(&cfqq->ref);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2494,8 +2543,9 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->cfq_latency = 1;
cfqd->hw_tag = 1;
-
+ cfqd->last_end_sync_rq = jiffies;
return cfqd;
}
@@ -2563,6 +2613,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -2594,6 +2645,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
+STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
@@ -2609,6 +2661,7 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
+ CFQ_ATTR(low_latency),
__ATTR_NULL
};
@@ -2668,7 +2721,7 @@ static void __exit cfq_exit(void)
* this also protects us from entering cfq_slab_kill() with
* pending RCU callbacks
*/
- if (elv_ioc_count_read(ioc_count))
+ if (elv_ioc_count_read(cfq_ioc_count))
wait_for_completion(&all_gone);
cfq_slab_kill();
}