aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h114
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/iocontext.h95
-rw-r--r--include/linux/ioprio.h13
-rw-r--r--include/linux/mv643xx.h10
-rw-r--r--include/linux/mv643xx_i2c.h22
-rw-r--r--include/linux/scatterlist.h126
-rw-r--r--include/linux/sched.h2
8 files changed, 178 insertions, 205 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e542c8fd921..71e7a847dff 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -34,83 +34,10 @@ struct sg_io_hdr;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
-/*
- * This is the per-process anticipatory I/O scheduler state.
- */
-struct as_io_context {
- spinlock_t lock;
-
- void (*dtor)(struct as_io_context *aic); /* destructor */
- void (*exit)(struct as_io_context *aic); /* called on task exit */
-
- unsigned long state;
- atomic_t nr_queued; /* queued reads & sync writes */
- atomic_t nr_dispatched; /* number of requests gone to the drivers */
-
- /* IO History tracking */
- /* Thinktime */
- unsigned long last_end_request;
- unsigned long ttime_total;
- unsigned long ttime_samples;
- unsigned long ttime_mean;
- /* Layout pattern */
- unsigned int seek_samples;
- sector_t last_request_pos;
- u64 seek_total;
- sector_t seek_mean;
-};
-
-struct cfq_queue;
-struct cfq_io_context {
- struct rb_node rb_node;
- void *key;
-
- struct cfq_queue *cfqq[2];
-
- struct io_context *ioc;
-
- unsigned long last_end_request;
- sector_t last_request_pos;
-
- unsigned long ttime_total;
- unsigned long ttime_samples;
- unsigned long ttime_mean;
-
- unsigned int seek_samples;
- u64 seek_total;
- sector_t seek_mean;
-
- struct list_head queue_list;
-
- void (*dtor)(struct io_context *); /* destructor */
- void (*exit)(struct io_context *); /* called on task exit */
-};
-
-/*
- * This is the per-process I/O subsystem state. It is refcounted and
- * kmalloc'ed. Currently all fields are modified in process io context
- * (apart from the atomic refcount), so require no locking.
- */
-struct io_context {
- atomic_t refcount;
- struct task_struct *task;
-
- unsigned int ioprio_changed;
-
- /*
- * For request batching
- */
- unsigned long last_waited; /* Time last woken after wait for request */
- int nr_batch_requests; /* Number of requests left in the batch */
-
- struct as_io_context *aic;
- struct rb_root cic_root;
- void *ioc_data;
-};
-
-void put_io_context(struct io_context *ioc);
+int put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *get_io_context(gfp_t gfp_flags, int node);
+struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
@@ -539,6 +466,8 @@ enum {
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
+/* rq->queuelist of dequeued request must be list_empty() */
+#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
@@ -718,29 +647,32 @@ static inline void blk_run_address_space(struct address_space *mapping)
}
/*
- * end_request() and friends. Must be called with the request queue spinlock
- * acquired. All functions called within end_request() _must_be_ atomic.
+ * blk_end_request() and friends.
+ * __blk_end_request() and end_request() must be called with
+ * the request queue spinlock acquired.
*
* Several drivers define their own end_request and call
- * end_that_request_first() and end_that_request_last()
- * for parts of the original function. This prevents
- * code duplication in drivers.
+ * blk_end_request() for parts of the original function.
+ * This prevents code duplication in drivers.
*/
-extern int end_that_request_first(struct request *, int, int);
-extern int end_that_request_chunk(struct request *, int, int);
-extern void end_that_request_last(struct request *, int);
+extern int blk_end_request(struct request *rq, int error, int nr_bytes);
+extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
+extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
+ int bidi_bytes);
extern void end_request(struct request *, int);
extern void end_queued_request(struct request *, int);
extern void end_dequeued_request(struct request *, int);
+extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
+ int (drv_callback)(struct request *));
extern void blk_complete_request(struct request *);
/*
- * end_that_request_first/chunk() takes an uptodate argument. we account
- * any value <= as an io error. 0 means -EIO for compatability reasons,
- * any other < 0 value is the direct error type. An uptodate value of
- * 1 indicates successful io completion
+ * blk_end_request() takes bytes instead of sectors as a complete size.
+ * blk_rq_bytes() returns bytes left to complete in the entire request.
+ * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
*/
-#define end_io_error(uptodate) (unlikely((uptodate) <= 0))
+extern unsigned int blk_rq_bytes(struct request *rq);
+extern unsigned int blk_rq_cur_bytes(struct request *rq);
static inline void blkdev_dequeue_request(struct request *req)
{
@@ -893,6 +825,12 @@ static inline void exit_io_context(void)
{
}
+static inline int put_io_context(struct io_context *ioc)
+{
+ return 1;
+}
+
+
#endif /* CONFIG_BLOCK */
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 796019b22b6..e6b3f708067 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -137,7 +137,6 @@ extern struct group_info init_groups;
.time_slice = HZ, \
.nr_cpus_allowed = NR_CPUS, \
}, \
- .ioprio = 0, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
.ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
.ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
new file mode 100644
index 00000000000..593b222d9dc
--- /dev/null
+++ b/include/linux/iocontext.h
@@ -0,0 +1,95 @@
+#ifndef IOCONTEXT_H
+#define IOCONTEXT_H
+
+#include <linux/radix-tree.h>
+
+/*
+ * This is the per-process anticipatory I/O scheduler state.
+ */
+struct as_io_context {
+ spinlock_t lock;
+
+ void (*dtor)(struct as_io_context *aic); /* destructor */
+ void (*exit)(struct as_io_context *aic); /* called on task exit */
+
+ unsigned long state;
+ atomic_t nr_queued; /* queued reads & sync writes */
+ atomic_t nr_dispatched; /* number of requests gone to the drivers */
+
+ /* IO History tracking */
+ /* Thinktime */
+ unsigned long last_end_request;
+ unsigned long ttime_total;
+ unsigned long ttime_samples;
+ unsigned long ttime_mean;
+ /* Layout pattern */
+ unsigned int seek_samples;
+ sector_t last_request_pos;
+ u64 seek_total;
+ sector_t seek_mean;
+};
+
+struct cfq_queue;
+struct cfq_io_context {
+ void *key;
+ unsigned long dead_key;
+
+ struct cfq_queue *cfqq[2];
+
+ struct io_context *ioc;
+
+ unsigned long last_end_request;
+ sector_t last_request_pos;
+
+ unsigned long ttime_total;
+ unsigned long ttime_samples;
+ unsigned long ttime_mean;
+
+ unsigned int seek_samples;
+ u64 seek_total;
+ sector_t seek_mean;
+
+ struct list_head queue_list;
+
+ void (*dtor)(struct io_context *); /* destructor */
+ void (*exit)(struct io_context *); /* called on task exit */
+};
+
+/*
+ * I/O subsystem state of the associated processes. It is refcounted
+ * and kmalloc'ed. These could be shared between processes.
+ */
+struct io_context {
+ atomic_t refcount;
+ atomic_t nr_tasks;
+
+ /* all the fields below are protected by this lock */
+ spinlock_t lock;
+
+ unsigned short ioprio;
+ unsigned short ioprio_changed;
+
+ /*
+ * For request batching
+ */
+ unsigned long last_waited; /* Time last woken after wait for request */
+ int nr_batch_requests; /* Number of requests left in the batch */
+
+ struct as_io_context *aic;
+ struct radix_tree_root radix_root;
+ void *ioc_data;
+};
+
+static inline struct io_context *ioc_task_link(struct io_context *ioc)
+{
+ /*
+ * if ref count is zero, don't allow sharing (ioc is going away, it's
+ * a race).
+ */
+ if (ioc && atomic_inc_not_zero(&ioc->refcount))
+ return ioc;
+
+ return NULL;
+}
+
+#endif
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index baf29387cab..2a3bb1bb743 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -2,6 +2,7 @@
#define IOPRIO_H
#include <linux/sched.h>
+#include <linux/iocontext.h>
/*
* Gives us 8 prio classes with 13-bits of data for each class
@@ -45,18 +46,18 @@ enum {
* the cpu scheduler nice value to an io priority
*/
#define IOPRIO_NORM (4)
-static inline int task_ioprio(struct task_struct *task)
+static inline int task_ioprio(struct io_context *ioc)
{
- if (ioprio_valid(task->ioprio))
- return IOPRIO_PRIO_DATA(task->ioprio);
+ if (ioprio_valid(ioc->ioprio))
+ return IOPRIO_PRIO_DATA(ioc->ioprio);
return IOPRIO_NORM;
}
-static inline int task_ioprio_class(struct task_struct *task)
+static inline int task_ioprio_class(struct io_context *ioc)
{
- if (ioprio_valid(task->ioprio))
- return IOPRIO_PRIO_CLASS(task->ioprio);
+ if (ioprio_valid(ioc->ioprio))
+ return IOPRIO_PRIO_CLASS(ioc->ioprio);
return IOPRIO_CLASS_BE;
}
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
index d2ae6185f03..69327b7b4ce 100644
--- a/include/linux/mv643xx.h
+++ b/include/linux/mv643xx.h
@@ -15,6 +15,7 @@
#include <asm/types.h>
#include <linux/mv643xx_eth.h>
+#include <linux/mv643xx_i2c.h>
/****************************************/
/* Processor Address Space */
@@ -863,7 +864,6 @@
/* I2C Registers */
/****************************************/
-#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c"
#define MV64XXX_I2C_OFFSET 0xc000
#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020
@@ -968,14 +968,6 @@ struct mpsc_pdata {
u32 brg_clk_freq;
};
-/* i2c Platform Device, Driver Data */
-struct mv64xxx_i2c_pdata {
- u32 freq_m;
- u32 freq_n;
- u32 timeout; /* In milliseconds */
- u32 retries;
-};
-
/* Watchdog Platform Device, Driver Data */
#define MV64x60_WDT_NAME "mv64x60_wdt"
diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h
new file mode 100644
index 00000000000..5db5152e9de
--- /dev/null
+++ b/include/linux/mv643xx_i2c.h
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _MV64XXX_I2C_H_
+#define _MV64XXX_I2C_H_
+
+#include <linux/types.h>
+
+#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c"
+
+/* i2c Platform Device, Driver Data */
+struct mv64xxx_i2c_pdata {
+ u32 freq_m;
+ u32 freq_n;
+ u32 timeout; /* In milliseconds */
+};
+
+#endif /*_MV64XXX_I2C_H_*/
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index e3ff21dbac5..a3d567a974e 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -7,6 +7,12 @@
#include <linux/string.h>
#include <asm/io.h>
+struct sg_table {
+ struct scatterlist *sgl; /* the list */
+ unsigned int nents; /* number of mapped entries */
+ unsigned int orig_nents; /* original size of list */
+};
+
/*
* Notes on SG table design.
*
@@ -106,31 +112,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
}
-/**
- * sg_next - return the next scatterlist entry in a list
- * @sg: The current sg entry
- *
- * Description:
- * Usually the next entry will be @sg@ + 1, but if this sg element is part
- * of a chained scatterlist, it could jump to the start of a new
- * scatterlist array.
- *
- **/
-static inline struct scatterlist *sg_next(struct scatterlist *sg)
-{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
- if (sg_is_last(sg))
- return NULL;
-
- sg++;
- if (unlikely(sg_is_chain(sg)))
- sg = sg_chain_ptr(sg);
-
- return sg;
-}
-
/*
* Loop over each sg element, following the pointer to a new list if necessary
*/
@@ -138,40 +119,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
/**
- * sg_last - return the last scatterlist entry in a list
- * @sgl: First entry in the scatterlist
- * @nents: Number of entries in the scatterlist
- *
- * Description:
- * Should only be used casually, it (currently) scan the entire list
- * to get the last entry.
- *
- * Note that the @sgl@ pointer passed in need not be the first one,
- * the important bit is that @nents@ denotes the number of entries that
- * exist from @sgl@.
- *
- **/
-static inline struct scatterlist *sg_last(struct scatterlist *sgl,
- unsigned int nents)
-{
-#ifndef ARCH_HAS_SG_CHAIN
- struct scatterlist *ret = &sgl[nents - 1];
-#else
- struct scatterlist *sg, *ret = NULL;
- unsigned int i;
-
- for_each_sg(sgl, sg, nents, i)
- ret = sg;
-
-#endif
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sgl[0].sg_magic != SG_MAGIC);
- BUG_ON(!sg_is_last(ret));
-#endif
- return ret;
-}
-
-/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
* @prv_nents: Number of entries in prv
@@ -223,47 +170,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
}
/**
- * sg_init_table - Initialize SG table
- * @sgl: The SG table
- * @nents: Number of entries in table
- *
- * Notes:
- * If this is part of a chained sg table, sg_mark_end() should be
- * used only on the last table part.
- *
- **/
-static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
-{
- memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
- {
- unsigned int i;
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
- }
-#endif
- sg_mark_end(&sgl[nents - 1]);
-}
-
-/**
- * sg_init_one - Initialize a single entry sg list
- * @sg: SG entry
- * @buf: Virtual address for IO
- * @buflen: IO length
- *
- * Notes:
- * This should not be used on a single entry that is part of a larger
- * table. Use sg_init_table() for that.
- *
- **/
-static inline void sg_init_one(struct scatterlist *sg, const void *buf,
- unsigned int buflen)
-{
- sg_init_table(sg, 1);
- sg_set_buf(sg, buf, buflen);
-}
-
-/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
*
@@ -293,4 +199,24 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
}
+struct scatterlist *sg_next(struct scatterlist *);
+struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
+void sg_init_table(struct scatterlist *, unsigned int);
+void sg_init_one(struct scatterlist *, const void *, unsigned int);
+
+typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
+typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
+
+void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
+void sg_free_table(struct sg_table *);
+int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
+ sg_alloc_fn *);
+int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
+
+/*
+ * Maximum number of entries that will be allocated in one piece, if
+ * a list larger than this is required then chaining will be utilized.
+ */
+#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
+
#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index df5b24ee80b..2d0546e884e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -27,6 +27,7 @@
#define CLONE_NEWUSER 0x10000000 /* New user namespace */
#define CLONE_NEWPID 0x20000000 /* New pid namespace */
#define CLONE_NEWNET 0x40000000 /* New network namespace */
+#define CLONE_IO 0x80000000 /* Clone io context */
/*
* Scheduling policies
@@ -975,7 +976,6 @@ struct task_struct {
struct hlist_head preempt_notifiers;
#endif
- unsigned short ioprio;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu