aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ata.h15
-rw-r--r--include/linux/bio.h108
-rw-r--r--include/linux/blkdev.h151
-rw-r--r--include/linux/blktrace_api.h62
-rw-r--r--include/linux/completion.h41
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/crypto.h35
-rw-r--r--include/linux/device-mapper.h18
-rw-r--r--include/linux/device.h14
-rw-r--r--include/linux/dlm.h5
-rw-r--r--include/linux/dlm_device.h2
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/fd.h8
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/genhd.h363
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/klist.h3
-rw-r--r--include/linux/libata.h66
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/mtd/blktrans.h2
-rw-r--r--include/linux/notifier.h10
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/security.h54
-rw-r--r--include/linux/string_helpers.h16
26 files changed, 618 insertions, 394 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b68ec09399b..31474e89c59 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -180,6 +180,7 @@ unifdef-y += audit.h
unifdef-y += auto_fs.h
unifdef-y += auxvec.h
unifdef-y += binfmts.h
+unifdef-y += blktrace_api.h
unifdef-y += capability.h
unifdef-y += capi.h
unifdef-y += cciss_ioctl.h
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 8a12d718c16..be00973d1a8 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -88,6 +88,7 @@ enum {
ATA_ID_DLF = 128,
ATA_ID_CSFO = 129,
ATA_ID_CFA_POWER = 160,
+ ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
ATA_ID_SERNO_LEN = 20,
@@ -667,6 +668,15 @@ static inline int ata_id_has_dword_io(const u16 *id)
return 0;
}
+static inline int ata_id_has_unload(const u16 *id)
+{
+ if (ata_id_major_version(id) >= 7 &&
+ (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
+ id[ATA_ID_CFSSE] & (1 << 13))
+ return 1;
+ return 0;
+}
+
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -691,6 +701,11 @@ static inline int ata_id_is_cfa(const u16 *id)
return 0;
}
+static inline int ata_id_is_ssd(const u16 *id)
+{
+ return id[ATA_ID_ROT_SPEED] == 0x01;
+}
+
static inline int ata_drive_40wire(const u16 *dev_id)
{
if (ata_id_is_sata(dev_id))
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0933a14e641..ff5b4cf9e2d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -26,21 +26,8 @@
#ifdef CONFIG_BLOCK
-/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
#include <asm/io.h>
-#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
-#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
-#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
-#else
-#define BIOVEC_VIRT_START_SIZE(x) 0
-#define BIOVEC_VIRT_OVERSIZE(x) 0
-#endif
-
-#ifndef BIO_VMERGE_BOUNDARY
-#define BIO_VMERGE_BOUNDARY 0
-#endif
-
#define BIO_DEBUG
#ifdef BIO_DEBUG
@@ -88,25 +75,14 @@ struct bio {
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
- unsigned short bi_phys_segments;
-
- /* Number of segments after physical and DMA remapping
- * hardware coalescing is performed.
- */
- unsigned short bi_hw_segments;
+ unsigned int bi_phys_segments;
unsigned int bi_size; /* residual I/O count */
- /*
- * To keep track of the max hw size, we account for the
- * sizes of the first and last virtually mergeable segments
- * in this bio
- */
- unsigned int bi_hw_front_size;
- unsigned int bi_hw_back_size;
-
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
+ unsigned int bi_comp_cpu; /* completion CPU */
+
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
@@ -126,11 +102,14 @@ struct bio {
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
#define BIO_EOF 2 /* out-out-bounds error */
-#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
+#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define BIO_USER_MAPPED 6 /* contains user pages */
#define BIO_EOPNOTSUPP 7 /* not supported */
+#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
+#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
+#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
@@ -144,18 +123,31 @@ struct bio {
/*
* bio bi_rw flags
*
- * bit 0 -- read (not set) or write (set)
+ * bit 0 -- data direction
+ * If not set, bio is a read from device. If set, it's a write to device.
* bit 1 -- rw-ahead when set
* bit 2 -- barrier
+ * Insert a serialization point in the IO queue, forcing previously
+ * submitted IO to be completed before this oen is issued.
* bit 3 -- fail fast, don't want low level driver retries
* bit 4 -- synchronous I/O hint: the block layer will unplug immediately
+ * Note that this does NOT indicate that the IO itself is sync, just
+ * that the block layer will not postpone issue of this IO by plugging.
+ * bit 5 -- metadata request
+ * Used for tracing to differentiate metadata and data IO. May also
+ * get some preferential treatment in the IO scheduler
+ * bit 6 -- discard sectors
+ * Informs the lower level device that this range of sectors is no longer
+ * used by the file system and may thus be freed by the device. Used
+ * for flash based storage.
*/
-#define BIO_RW 0
-#define BIO_RW_AHEAD 1
+#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
+#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
#define BIO_RW_FAILFAST 3
#define BIO_RW_SYNC 4
#define BIO_RW_META 5
+#define BIO_RW_DISCARD 6
/*
* upper 16 bits of bi_rw define the io priority of this bio
@@ -185,14 +177,15 @@ struct bio {
#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
-#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
+#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
+#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio)
{
if (bio->bi_vcnt)
return bio_iovec(bio)->bv_len >> 9;
-
- return 0;
+ else /* dataless requests such as discard */
+ return bio->bi_size >> 9;
}
static inline void *bio_data(struct bio *bio)
@@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio)
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
#endif
-#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
- ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -319,15 +310,14 @@ struct bio_pair {
atomic_t cnt;
int error;
};
-extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
- int first_sectors);
-extern mempool_t *bio_split_pool;
+extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(int, int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
+extern struct bio *bio_kmalloc(gfp_t, int);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
@@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *);
extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
-extern int bio_hw_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, gfp_t);
@@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
extern int bio_get_nr_vecs(struct block_device *);
+extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
- unsigned long, unsigned int, int);
+ unsigned long, unsigned int, int, gfp_t);
struct sg_iovec;
+struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
struct block_device *,
- struct sg_iovec *, int, int);
+ struct sg_iovec *, int, int, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
@@ -359,15 +350,25 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
gfp_t, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
-extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
- int, int);
+extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
+ unsigned long, unsigned int, int, gfp_t);
+extern struct bio *bio_copy_user_iov(struct request_queue *,
+ struct rq_map_data *, struct sg_iovec *,
+ int, int, gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
+ * Allow queuer to specify a completion CPU for this bio
+ */
+static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
+{
+ bio->bi_comp_cpu = cpu;
+}
+
+/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* These memory pools in turn all allocate from the bio_slab
@@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline int bio_has_data(struct bio *bio)
+{
+ return bio && bio->bi_io_vec != NULL;
+}
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
#define bip_for_each_vec(bvl, bip, i) \
__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
-static inline int bio_integrity(struct bio *bio)
-{
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- return bio->bi_integrity != NULL;
-#else
- return 0;
-#endif
-}
+#define bio_integrity(bio) (bio->bi_integrity != NULL)
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 53ea933cf60..a92d9e4ea96 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -16,7 +16,9 @@
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>
+#include <linux/gfp.h>
#include <linux/bsg.h>
+#include <linux/smp.h>
#include <asm/scatterlist.h>
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_PM_SUSPEND, /* suspend request */
REQ_TYPE_PM_RESUME, /* resume request */
REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
- REQ_TYPE_FLUSH, /* flush request */
REQ_TYPE_SPECIAL, /* driver defined type */
REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
/*
@@ -76,19 +77,18 @@ enum rq_cmd_type_bits {
*
*/
enum {
- /*
- * just examples for now
- */
REQ_LB_OP_EJECT = 0x40, /* eject request */
- REQ_LB_OP_FLUSH = 0x41, /* flush device */
+ REQ_LB_OP_FLUSH = 0x41, /* flush request */
+ REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
};
/*
- * request type modified bits. first three bits match BIO_RW* bits, important
+ * request type modified bits. first two bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */
+ __REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
@@ -111,6 +111,7 @@ enum rq_flag_bits {
};
#define REQ_RW (1 << __REQ_RW)
+#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@@ -140,12 +141,14 @@ enum rq_flag_bits {
*/
struct request {
struct list_head queuelist;
- struct list_head donelist;
+ struct call_single_data csd;
+ int cpu;
struct request_queue *q;
unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
+ unsigned long atomic_flags;
/* Maintain bio traversal state for part by part I/O submission.
* hard_* are block layer internals, no driver should touch them!
@@ -190,13 +193,6 @@ struct request {
*/
unsigned short nr_phys_segments;
- /* Number of scatter-gather addr+len pairs after
- * physical and DMA remapping hardware coalescing is performed.
- * This is the number of scatter-gather entries the driver
- * will actually have to deal with after DMA mapping is done.
- */
- unsigned short nr_hw_segments;
-
unsigned short ioprio;
void *special;
@@ -220,6 +216,8 @@ struct request {
void *data;
void *sense;
+ unsigned long deadline;
+ struct list_head timeout_list;
unsigned int timeout;
int retries;
@@ -233,6 +231,11 @@ struct request {
struct request *next_rq;
};
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ return req->ioprio;
+}
+
/*
* State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
* requests. Some step values could eventually be made generic.
@@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
+typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
struct bio_vec;
struct bvec_merge_data {
@@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
+typedef int (lld_busy_fn) (struct request_queue *q);
+
+enum blk_eh_timer_return {
+ BLK_EH_NOT_HANDLED,
+ BLK_EH_HANDLED,
+ BLK_EH_RESET_TIMER,
+};
+
+typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
enum blk_queue_state {
Queue_down,
@@ -307,10 +320,13 @@ struct request_queue
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
+ prepare_discard_fn *prepare_discard_fn;
merge_bvec_fn *merge_bvec_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
+ rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
+ lld_busy_fn *lld_busy_fn;
/*
* Dispatch queue sorting
@@ -385,6 +401,10 @@ struct request_queue
unsigned int nr_sorted;
unsigned int in_flight;
+ unsigned int rq_timeout;
+ struct timer_list timeout;
+ struct list_head timeout_list;
+
/*
* sg stuff
*/
@@ -421,6 +441,10 @@ struct request_queue
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
static inline int queue_is_locked(struct request_queue *q)
{
@@ -526,7 +550,10 @@ enum {
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
+#define blk_queue_stackable(q) \
+ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -536,16 +563,18 @@ enum {
#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
-#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq))
+#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
#define blk_pm_request(rq) \
(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
+#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
+#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
/* rq->queuelist of dequeued request must be list_empty() */
@@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \
- (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
+ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
+ (blk_discard_rq(rq) || blk_fs_request((rq))))
/*
* q->prep_rq_fn return values
@@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
}
#endif /* CONFIG_MMU */
+struct rq_map_data {
+ struct page **pages;
+ int page_order;
+ int nr_entries;
+};
+
struct req_iterator {
int i;
struct bio *bio;
@@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
+extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
+extern int blk_lld_busy(struct request_queue *q);
+extern int blk_insert_cloned_request(struct request_queue *q,
+ struct request *rq);
extern void blk_plug_device(struct request_queue *);
extern void blk_plug_device_unlocked(struct request_queue *);
extern int blk_remove_plug(struct request_queue *);
@@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void blk_run_queue(struct request_queue *);
extern void blk_start_queueing(struct request_queue *);
-extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
+extern int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long,
+ gfp_t);
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct sg_iovec *, int, unsigned int);
+ struct rq_map_data *, struct sg_iovec *, int,
+ unsigned int, gfp_t);
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error,
extern int blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
extern void end_request(struct request *, int);
-extern void end_queued_request(struct request *, int);
-extern void end_dequeued_request(struct request *, int);
extern int blk_end_request_callback(struct request *rq, int error,
unsigned int nr_bytes,
int (drv_callback)(struct request *));
extern void blk_complete_request(struct request *);
+extern void __blk_complete_request(struct request *);
+extern void blk_abort_request(struct request *);
+extern void blk_abort_queue(struct request_queue *);
+extern void blk_update_request(struct request *rq, int error,
+ unsigned int nr_bytes);
/*
* blk_end_request() takes bytes instead of sectors as a complete size.
@@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
+extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
+extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
+extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
+extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
extern int blk_do_ordered(struct request_queue *, struct request **);
@@ -837,6 +887,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
}
extern int blkdev_issue_flush(struct block_device *, sector_t *);
+extern int blkdev_issue_discard(struct block_device *,
+ sector_t sector, sector_t nr_sects, gfp_t);
+
+static inline int sb_issue_discard(struct super_block *sb,
+ sector_t block, sector_t nr_blocks)
+{
+ block <<= (sb->s_blocksize_bits - 9);
+ nr_blocks <<= (sb->s_blocksize_bits - 9);
+ return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
+}
/*
* command filter functions
@@ -874,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q)
return q ? q->dma_alignment : 511;
}
+static inline int blk_rq_aligned(struct request_queue *q, void *addr,
+ unsigned int len)
+{
+ unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ return !((unsigned long)addr & alignment) && !(len & alignment);
+}
+
/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
@@ -900,7 +967,7 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
-int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@@ -945,49 +1012,19 @@ struct blk_integrity {
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct block_device *, struct block_device *);
+extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request *);
-static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
-{
- if (bi)
- return bi->tuple_size;
-
- return 0;
-}
-
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline
+struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
return bdev->bd_disk->integrity;
}
-static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
- struct blk_integrity *bi = bdev_get_integrity(bdev);
-
- if (bi)
- return bi->tag_size;
-
- return 0;
-}
-
-static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
-{
- struct blk_integrity *bi = bdev_get_integrity(bdev);
-
- if (bi == NULL)
- return 0;
-
- if (rw == READ && bi->verify_fn != NULL &&
- (bi->flags & INTEGRITY_FLAG_READ))
- return 1;
-
- if (rw == WRITE && bi->generate_fn != NULL &&
- (bi->flags & INTEGRITY_FLAG_WRITE))
- return 1;
-
- return 0;
+ return disk->integrity;
}
static inline int blk_integrity_rq(struct request *rq)
@@ -1004,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq)
#define blk_rq_count_integrity_sg(a) (0)
#define blk_rq_map_integrity_sg(a, b) (0)
#define bdev_get_integrity(a) (0)
-#define bdev_get_tag_size(a) (0)
+#define blk_get_integrity(a) (0)
#define blk_integrity_compare(a, b) (0)
#define blk_integrity_register(a, b) (0)
#define blk_integrity_unregister(a) do { } while (0);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d084b8d227a..3a31eb50616 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -1,8 +1,10 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
+#ifdef __KERNEL__
#include <linux/blkdev.h>
#include <linux/relay.h>
+#endif
/*
* Trace categories
@@ -21,6 +23,7 @@ enum blktrace_cat {
BLK_TC_NOTIFY = 1 << 10, /* special message */
BLK_TC_AHEAD = 1 << 11, /* readahead */
BLK_TC_META = 1 << 12, /* metadata */
+ BLK_TC_DISCARD = 1 << 13, /* discard requests */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
@@ -47,6 +50,7 @@ enum blktrace_act {
__BLK_TA_SPLIT, /* bio was split */
__BLK_TA_BOUNCE, /* bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
+ __BLK_TA_ABORT, /* request aborted */
};
/*
@@ -77,6 +81,7 @@ enum blktrace_notify {
#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
+#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -89,17 +94,17 @@ enum blktrace_notify {
* The trace itself
*/
struct blk_io_trace {
- u32 magic; /* MAGIC << 8 | version */
- u32 sequence; /* event number */
- u64 time; /* in microseconds */
- u64 sector; /* disk offset */
- u32 bytes; /* transfer length */
- u32 action; /* what happened */
- u32 pid; /* who did it */
- u32 device; /* device number */
- u32 cpu; /* on what cpu did it happen */
- u16 error; /* completion error */
- u16 pdu_len; /* length of data after this trace */
+ __u32 magic; /* MAGIC << 8 | version */
+ __u32 sequence; /* event number */
+ __u64 time; /* in microseconds */
+ __u64 sector; /* disk offset */
+ __u32 bytes; /* transfer length */
+ __u32 action; /* what happened */
+ __u32 pid; /* who did it */
+ __u32 device; /* device number */
+ __u32 cpu; /* on what cpu did it happen */
+ __u16 error; /* completion error */
+ __u16 pdu_len; /* length of data after this trace */
};
/*
@@ -117,6 +122,23 @@ enum {
Blktrace_stopped,
};
+#define BLKTRACE_BDEV_SIZE 32
+
+/*
+ * User setup structure passed with BLKTRACESTART
+ */
+struct blk_user_trace_setup {
+ char name[BLKTRACE_BDEV_SIZE]; /* output */
+ __u16 act_mask; /* input */
+ __u32 buf_size; /* input */
+ __u32 buf_nr; /* input */
+ __u64 start_lba;
+ __u64 end_lba;
+ __u32 pid;
+};
+
+#ifdef __KERNEL__
+#if defined(CONFIG_BLK_DEV_IO_TRACE)
struct blk_trace {
int trace_state;
struct rchan *rchan;
@@ -133,21 +155,6 @@ struct blk_trace {
atomic_t dropped;
};
-/*
- * User setup structure passed with BLKTRACESTART
- */
-struct blk_user_trace_setup {
- char name[BDEVNAME_SIZE]; /* output */
- u16 act_mask; /* input */
- u32 buf_size; /* input */
- u32 buf_nr; /* input */
- u64 start_lba;
- u64 end_lba;
- u32 pid;
-};
-
-#ifdef __KERNEL__
-#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
@@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (likely(!bt))
return;
+ if (blk_discard_rq(rq))
+ rw |= (1 << BIO_RW_DISCARD);
+
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 02ef8835999..4a6b604ef7e 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,6 +10,18 @@
#include <linux/wait.h>
+/**
+ * struct completion - structure used to maintain state for a "completion"
+ *
+ * This is the opaque structure used to maintain the state for a "completion".
+ * Completions currently use a FIFO to queue threads that have to wait for
+ * the "completion" event.
+ *
+ * See also: complete(), wait_for_completion() (and friends _timeout,
+ * _interruptible, _interruptible_timeout, and _killable), init_completion(),
+ * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
+ * INIT_COMPLETION().
+ */
struct completion {
unsigned int done;
wait_queue_head_t wait;
@@ -21,6 +33,14 @@ struct completion {
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
+/**
+ * DECLARE_COMPLETION: - declare and initialize a completion structure
+ * @work: identifier for the completion structure
+ *
+ * This macro declares and initializes a completion structure. Generally used
+ * for static declarations. You should use the _ONSTACK variant for automatic
+ * variables.
+ */
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
@@ -29,6 +49,13 @@ struct completion {
* completions - so we use the _ONSTACK() variant for those that
* are on the kernel stack:
*/
+/**
+ * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
+ * @work: identifier for the completion structure
+ *
+ * This macro declares and initializes a completion structure on the kernel
+ * stack.
+ */
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
@@ -36,6 +63,13 @@ struct completion {
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
#endif
+/**
+ * init_completion: - Initialize a dynamically allocated completion
+ * @x: completion structure that is to be initialized
+ *
+ * This inline function will initialize a dynamically created completion
+ * structure.
+ */
static inline void init_completion(struct completion *x)
{
x->done = 0;
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
+/**
+ * INIT_COMPLETION: - reinitialize a completion structure
+ * @x: completion structure to be reinitialized
+ *
+ * This macro should be used to reinitialize a completion structure so it can
+ * be reused. This is especially important after complete_all() is used.
+ */
#define INIT_COMPLETION(x) ((x).done = 0)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d7faf880849..c2747ac2ae4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
#endif
int cpu_up(unsigned int cpu);
+void notify_cpu_starting(unsigned int cpu);
extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c43dc47fdf7..3d2317e4af2 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -38,6 +38,7 @@
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
#define CRYPTO_ALG_TYPE_HASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
+#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -61,6 +62,14 @@
#define CRYPTO_ALG_GENIV 0x00000200
/*
+ * Set if the algorithm has passed automated run-time testing. Note that
+ * if there is no run-time testing for a given algorithm it is considered
+ * to have passed.
+ */
+
+#define CRYPTO_ALG_TESTED 0x00000400
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -105,6 +114,7 @@ struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_ahash;
+struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
struct aead_givcrypt_request;
@@ -290,6 +300,15 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
+struct rng_alg {
+ int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen);
+ int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+
+ unsigned int seedsize;
+};
+
+
#define cra_ablkcipher cra_u.ablkcipher
#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
@@ -298,6 +317,7 @@ struct compress_alg {
#define cra_hash cra_u.hash
#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
+#define cra_rng cra_u.rng
struct crypto_alg {
struct list_head cra_list;
@@ -325,6 +345,7 @@ struct crypto_alg {
struct hash_alg hash;
struct ahash_alg ahash;
struct compress_alg compress;
+ struct rng_alg rng;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -430,6 +451,12 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
+struct rng_tfm {
+ int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen);
+ int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+};
+
#define crt_ablkcipher crt_u.ablkcipher
#define crt_aead crt_u.aead
#define crt_blkcipher crt_u.blkcipher
@@ -437,6 +464,7 @@ struct compress_tfm {
#define crt_hash crt_u.hash
#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
+#define crt_rng crt_u.rng
struct crypto_tfm {
@@ -450,6 +478,7 @@ struct crypto_tfm {
struct hash_tfm hash;
struct ahash_tfm ahash;
struct compress_tfm compress;
+ struct rng_tfm rng;
} crt_u;
struct crypto_alg *__crt_alg;
@@ -481,6 +510,10 @@ struct crypto_hash {
struct crypto_tfm base;
};
+struct crypto_rng {
+ struct crypto_tfm base;
+};
+
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm);
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
/*
* Transform helpers which query the underlying algorithm.
*/
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a90222e3297..08d783592b7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -13,7 +13,6 @@
struct dm_target;
struct dm_table;
-struct dm_dev;
struct mapped_device;
struct bio_vec;
@@ -84,6 +83,12 @@ void dm_error(const char *message);
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
+struct dm_dev {
+ struct block_device *bdev;
+ int mode;
+ char name[16];
+};
+
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
@@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct mapped_device *md);
int dm_noflush_suspending(struct dm_target *ti);
+union map_info *dm_get_mapinfo(struct bio *bio);
/*
* Geometry functions.
@@ -232,6 +238,11 @@ int dm_table_add_target(struct dm_table *t, const char *type,
int dm_table_complete(struct dm_table *t);
/*
+ * Unplug all devices in a table.
+ */
+void dm_table_unplug_all(struct dm_table *t);
+
+/*
* Table reference counting.
*/
struct dm_table *dm_get_table(struct mapped_device *md);
@@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t);
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+/*
+ * A wrapper around vmalloc.
+ */
+void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
+
/*-----------------------------------------------------------------
* Macros.
*---------------------------------------------------------------*/
diff --git a/include/linux/device.h b/include/linux/device.h
index 4d8372d135d..246937c9cbc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -199,6 +199,11 @@ struct class {
struct class_private *p;
};
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
extern struct kobject *sysfs_dev_block_kobj;
extern struct kobject *sysfs_dev_char_kobj;
extern int __must_check __class_register(struct class *class,
@@ -213,6 +218,13 @@ extern void class_unregister(struct class *class);
__class_register(class, &__key); \
})
+extern void class_dev_iter_init(struct class_dev_iter *iter,
+ struct class *class,
+ struct device *start,
+ const struct device_type *type);
+extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
+extern void class_dev_iter_exit(struct class_dev_iter *iter);
+
extern int class_for_each_device(struct class *class, struct device *start,
void *data,
int (*fn)(struct device *dev, void *data));
@@ -396,7 +408,7 @@ struct device {
spinlock_t devres_lock;
struct list_head devres_head;
- struct list_head node;
+ struct klist_node knode_class;
struct class *class;
dev_t devt; /* dev_t, creates the sysfs "dev" */
struct attribute_group **groups; /* optional groups */
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index 203a025e30e..b9cd38603fd 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -65,9 +65,12 @@ struct dlm_lksb {
char * sb_lvbptr;
};
+/* dlm_new_lockspace() flags */
+
#define DLM_LSFL_NODIR 0x00000001
#define DLM_LSFL_TIMEWARN 0x00000002
#define DLM_LSFL_FS 0x00000004
+#define DLM_LSFL_NEWEXCL 0x00000008
#ifdef __KERNEL__
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
index c6034508fed..3060783c419 100644
--- a/include/linux/dlm_device.h
+++ b/include/linux/dlm_device.h
@@ -26,7 +26,7 @@
/* Version of the device interface */
#define DLM_DEVICE_VERSION_MAJOR 6
#define DLM_DEVICE_VERSION_MINOR 0
-#define DLM_DEVICE_VERSION_PATCH 0
+#define DLM_DEVICE_VERSION_PATCH 1
/* struct passed to the lock write */
struct dlm_lock_params {
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 639624b55fb..92f6f634e3e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
extern int elv_register_queue(struct request_queue *q);
extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, int);
+extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
extern void elv_put_request(struct request_queue *, struct request *);
@@ -173,15 +174,15 @@ enum {
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/*
- * Hack to reuse the donelist list_head as the fifo time holder while
+ * Hack to reuse the csd.list list_head as the fifo time holder while
* the request is in the io scheduler. Saves an unsigned long in rq.
*/
-#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next)
-#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp))
+#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
+#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) do { \
list_del_init(&(rq)->queuelist); \
- INIT_LIST_HEAD(&(rq)->donelist); \
+ INIT_LIST_HEAD(&(rq)->csd.list); \
} while (0)
/*
diff --git a/include/linux/fd.h b/include/linux/fd.h
index b6bd41d2b46..f5d194af07a 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -15,10 +15,16 @@ struct floppy_struct {
sect, /* sectors per track */
head, /* nr of heads */
track, /* nr of tracks */
- stretch; /* !=0 means double track steps */
+ stretch; /* bit 0 !=0 means double track steps */
+ /* bit 1 != 0 means swap sides */
+ /* bits 2..9 give the first sector */
+ /* number (the LSB is flipped) */
#define FD_STRETCH 1
#define FD_SWAPSIDES 2
#define FD_ZEROBASED 4
+#define FD_SECTBASEMASK 0x3FC
+#define FD_MKSECTBASE(s) (((s) ^ 1) << 2)
+#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1)
unsigned char gap, /* gap1 size */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 580b513668f..32477e8872d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -86,7 +86,9 @@ extern int dir_notify_enable;
#define READ_META (READ | (1 << BIO_RW_META))
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
-#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
+#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
+#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
+#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
#define SEL_IN 1
#define SEL_OUT 2
@@ -222,6 +224,7 @@ extern int dir_notify_enable;
#define BLKTRACESTART _IO(0x12,116)
#define BLKTRACESTOP _IO(0x12,117)
#define BLKTRACETEARDOWN _IO(0x12,118)
+#define BLKDISCARD _IO(0x12,119)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -1682,6 +1685,7 @@ extern void chrdev_show(struct seq_file *,off_t);
/* fs/block_dev.c */
#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
#ifdef CONFIG_BLOCK
#define BLKDEV_MAJOR_HASH_SIZE 255
@@ -1718,6 +1722,9 @@ extern int fs_may_remount_ro(struct super_block *);
*/
#define bio_data_dir(bio) ((bio)->bi_rw & 1)
+extern void check_disk_size_change(struct gendisk *disk,
+ struct block_device *bdev);
+extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index be4f5e5bfe0..206cdf96c3a 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -11,12 +11,15 @@
#include <linux/types.h>
#include <linux/kdev_t.h>
+#include <linux/rcupdate.h>
#ifdef CONFIG_BLOCK
-#define kobj_to_dev(k) container_of(k, struct device, kobj)
-#define dev_to_disk(device) container_of(device, struct gendisk, dev)
-#define dev_to_part(device) container_of(device, struct hd_struct, dev)
+#define kobj_to_dev(k) container_of((k), struct device, kobj)
+#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
+#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
+#define disk_to_dev(disk) (&(disk)->part0.__dev)
+#define part_to_dev(part) (&((part)->__dev))
extern struct device_type part_type;
extern struct kobject *block_depr;
@@ -55,6 +58,9 @@ enum {
UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
};
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
+
#include <linux/major.h>
#include <linux/device.h>
#include <linux/smp.h>
@@ -87,7 +93,7 @@ struct disk_stats {
struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
- struct device dev;
+ struct device __dev;
struct kobject *holder_dir;
int policy, partno;
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -100,6 +106,7 @@ struct hd_struct {
#else
struct disk_stats dkstats;
#endif
+ struct rcu_head rcu_head;
};
#define GENHD_FL_REMOVABLE 1
@@ -108,100 +115,148 @@ struct hd_struct {
#define GENHD_FL_CD 8
#define GENHD_FL_UP 16
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
-#define GENHD_FL_FAIL 64
+#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
+
+#define BLK_SCSI_MAX_CMDS (256)
+#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+
+struct blk_scsi_cmd_filter {
+ unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
+ unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
+ struct kobject kobj;
+};
+
+struct disk_part_tbl {
+ struct rcu_head rcu_head;
+ int len;
+ struct hd_struct *part[];
+};
struct gendisk {
+ /* major, first_minor and minors are input parameters only,
+ * don't use directly. Use disk_devt() and disk_max_parts().
+ */
int major; /* major number of driver */
int first_minor;
int minors; /* maximum number of minors, =1 for
* disks that can't be partitioned. */
- char disk_name[32]; /* name of major driver */
- struct hd_struct **part; /* [indexed by minor] */
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+
+ /* Array of pointers to partitions indexed by partno.
+ * Protected with matching bdev lock but stat and other
+ * non-critical accesses use RCU. Always access through
+ * helpers.
+ */
+ struct disk_part_tbl *part_tbl;
+ struct hd_struct part0;
+
struct block_device_operations *fops;
struct request_queue *queue;
void *private_data;
- sector_t capacity;
int flags;
struct device *driverfs_dev; // FIXME: remove
- struct device dev;
- struct kobject *holder_dir;
struct kobject *slave_dir;
struct timer_rand_state *random;
- int policy;
atomic_t sync_io; /* RAID */
- unsigned long stamp;
- int in_flight;
-#ifdef CONFIG_SMP
- struct disk_stats *dkstats;
-#else
- struct disk_stats dkstats;
-#endif
struct work_struct async_notify;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *integrity;
#endif
+ int node_id;
};
-/*
- * Macros to operate on percpu disk statistics:
- *
- * The __ variants should only be called in critical sections. The full
- * variants disable/enable preemption.
- */
-static inline struct hd_struct *get_part(struct gendisk *gendiskp,
- sector_t sector)
+static inline struct gendisk *part_to_disk(struct hd_struct *part)
{
- struct hd_struct *part;
- int i;
- for (i = 0; i < gendiskp->minors - 1; i++) {
- part = gendiskp->part[i];
- if (part && part->start_sect <= sector
- && sector < part->start_sect + part->nr_sects)
- return part;
+ if (likely(part)) {
+ if (part->partno)
+ return dev_to_disk(part_to_dev(part)->parent);
+ else
+ return dev_to_disk(part_to_dev(part));
}
return NULL;
}
-#ifdef CONFIG_SMP
-#define __disk_stat_add(gendiskp, field, addnd) \
- (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd)
+static inline int disk_max_parts(struct gendisk *disk)
+{
+ if (disk->flags & GENHD_FL_EXT_DEVT)
+ return DISK_MAX_PARTS;
+ return disk->minors;
+}
-#define disk_stat_read(gendiskp, field) \
-({ \
- typeof(gendiskp->dkstats->field) res = 0; \
- int i; \
- for_each_possible_cpu(i) \
- res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
- res; \
-})
+static inline bool disk_partitionable(struct gendisk *disk)
+{
+ return disk_max_parts(disk) > 1;
+}
-static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
- int i;
+static inline dev_t disk_devt(struct gendisk *disk)
+{
+ return disk_to_dev(disk)->devt;
+}
- for_each_possible_cpu(i)
- memset(per_cpu_ptr(gendiskp->dkstats, i), value,
- sizeof(struct disk_stats));
-}
+static inline dev_t part_devt(struct hd_struct *part)
+{
+ return part_to_dev(part)->devt;
+}
-#define __part_stat_add(part, field, addnd) \
- (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd)
+extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
-#define __all_stat_add(gendiskp, part, field, addnd, sector) \
-({ \
- if (part) \
- __part_stat_add(part, field, addnd); \
- __disk_stat_add(gendiskp, field, addnd); \
-})
+static inline void disk_put_part(struct hd_struct *part)
+{
+ if (likely(part))
+ put_device(part_to_dev(part));
+}
+
+/*
+ * Smarter partition iterator without context limits.
+ */
+#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
+#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
+#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
+
+struct disk_part_iter {
+ struct gendisk *disk;
+ struct hd_struct *part;
+ int idx;
+ unsigned int flags;
+};
+
+extern void disk_part_iter_init(struct disk_part_iter *piter,
+ struct gendisk *disk, unsigned int flags);
+extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
+extern void disk_part_iter_exit(struct disk_part_iter *piter);
+
+extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
+ sector_t sector);
+
+/*
+ * Macros to operate on percpu disk statistics:
+ *
+ * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
+ * and should be called between disk_stat_lock() and
+ * disk_stat_unlock().
+ *
+ * part_stat_read() can be called at any time.
+ *
+ * part_stat_{add|set_all}() and {init|free}_part_stats are for
+ * internal use only.
+ */
+#ifdef CONFIG_SMP
+#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
+#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
+
+#define __part_stat_add(cpu, part, field, addnd) \
+ (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
#define part_stat_read(part, field) \
({ \
- typeof(part->dkstats->field) res = 0; \
+ typeof((part)->dkstats->field) res = 0; \
int i; \
for_each_possible_cpu(i) \
- res += per_cpu_ptr(part->dkstats, i)->field; \
+ res += per_cpu_ptr((part)->dkstats, i)->field; \
res; \
})
@@ -213,171 +268,107 @@ static inline void part_stat_set_all(struct hd_struct *part, int value)
memset(per_cpu_ptr(part->dkstats, i), value,
sizeof(struct disk_stats));
}
-
-#else /* !CONFIG_SMP */
-#define __disk_stat_add(gendiskp, field, addnd) \
- (gendiskp->dkstats.field += addnd)
-#define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field)
-static inline void disk_stat_set_all(struct gendisk *gendiskp, int value)
+static inline int init_part_stats(struct hd_struct *part)
{
- memset(&gendiskp->dkstats, value, sizeof (struct disk_stats));
+ part->dkstats = alloc_percpu(struct disk_stats);
+ if (!part->dkstats)
+ return 0;
+ return 1;
}
-#define __part_stat_add(part, field, addnd) \
- (part->dkstats.field += addnd)
-
-#define __all_stat_add(gendiskp, part, field, addnd, sector) \
-({ \
- if (part) \
- part->dkstats.field += addnd; \
- __disk_stat_add(gendiskp, field, addnd); \
-})
-
-#define part_stat_read(part, field) (part->dkstats.field)
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
+static inline void free_part_stats(struct hd_struct *part)
{
- memset(&part->dkstats, value, sizeof(struct disk_stats));
+ free_percpu(part->dkstats);
}
-#endif /* CONFIG_SMP */
+#else /* !CONFIG_SMP */
+#define part_stat_lock() ({ rcu_read_lock(); 0; })
+#define part_stat_unlock() rcu_read_unlock()
-#define disk_stat_add(gendiskp, field, addnd) \
- do { \
- preempt_disable(); \
- __disk_stat_add(gendiskp, field, addnd); \
- preempt_enable(); \
- } while (0)
-
-#define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1)
-#define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1)
-
-#define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1)
-#define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1)
-
-#define __disk_stat_sub(gendiskp, field, subnd) \
- __disk_stat_add(gendiskp, field, -subnd)
-#define disk_stat_sub(gendiskp, field, subnd) \
- disk_stat_add(gendiskp, field, -subnd)
-
-#define part_stat_add(gendiskp, field, addnd) \
- do { \
- preempt_disable(); \
- __part_stat_add(gendiskp, field, addnd);\
- preempt_enable(); \
- } while (0)
-
-#define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1)
-#define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1)
-
-#define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1)
-#define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1)
-
-#define __part_stat_sub(gendiskp, field, subnd) \
- __part_stat_add(gendiskp, field, -subnd)
-#define part_stat_sub(gendiskp, field, subnd) \
- part_stat_add(gendiskp, field, -subnd)
-
-#define all_stat_add(gendiskp, part, field, addnd, sector) \
- do { \
- preempt_disable(); \
- __all_stat_add(gendiskp, part, field, addnd, sector); \
- preempt_enable(); \
- } while (0)
-
-#define __all_stat_dec(gendiskp, field, sector) \
- __all_stat_add(gendiskp, field, -1, sector)
-#define all_stat_dec(gendiskp, field, sector) \
- all_stat_add(gendiskp, field, -1, sector)
-
-#define __all_stat_inc(gendiskp, part, field, sector) \
- __all_stat_add(gendiskp, part, field, 1, sector)
-#define all_stat_inc(gendiskp, part, field, sector) \
- all_stat_add(gendiskp, part, field, 1, sector)
-
-#define __all_stat_sub(gendiskp, part, field, subnd, sector) \
- __all_stat_add(gendiskp, part, field, -subnd, sector)
-#define all_stat_sub(gendiskp, part, field, subnd, sector) \
- all_stat_add(gendiskp, part, field, -subnd, sector)
-
-/* Inlines to alloc and free disk stats in struct gendisk */
-#ifdef CONFIG_SMP
-static inline int init_disk_stats(struct gendisk *disk)
-{
- disk->dkstats = alloc_percpu(struct disk_stats);
- if (!disk->dkstats)
- return 0;
- return 1;
-}
+#define __part_stat_add(cpu, part, field, addnd) \
+ ((part)->dkstats.field += addnd)
+
+#define part_stat_read(part, field) ((part)->dkstats.field)
-static inline void free_disk_stats(struct gendisk *disk)
+static inline void part_stat_set_all(struct hd_struct *part, int value)
{
- free_percpu(disk->dkstats);
+ memset(&part->dkstats, value, sizeof(struct disk_stats));
}
static inline int init_part_stats(struct hd_struct *part)
{
- part->dkstats = alloc_percpu(struct disk_stats);
- if (!part->dkstats)
- return 0;
return 1;
}
static inline void free_part_stats(struct hd_struct *part)
{
- free_percpu(part->dkstats);
-}
-
-#else /* CONFIG_SMP */
-static inline int init_disk_stats(struct gendisk *disk)
-{
- return 1;
}
-static inline void free_disk_stats(struct gendisk *disk)
-{
-}
+#endif /* CONFIG_SMP */
-static inline int init_part_stats(struct hd_struct *part)
+#define part_stat_add(cpu, part, field, addnd) do { \
+ __part_stat_add((cpu), (part), field, addnd); \
+ if ((part)->partno) \
+ __part_stat_add((cpu), &part_to_disk((part))->part0, \
+ field, addnd); \
+} while (0)
+
+#define part_stat_dec(cpu, gendiskp, field) \
+ part_stat_add(cpu, gendiskp, field, -1)
+#define part_stat_inc(cpu, gendiskp, field) \
+ part_stat_add(cpu, gendiskp, field, 1)
+#define part_stat_sub(cpu, gendiskp, field, subnd) \
+ part_stat_add(cpu, gendiskp, field, -subnd)
+
+static inline void part_inc_in_flight(struct hd_struct *part)
{
- return 1;
+ part->in_flight++;
+ if (part->partno)
+ part_to_disk(part)->part0.in_flight++;
}
-static inline void free_part_stats(struct hd_struct *part)
+static inline void part_dec_in_flight(struct hd_struct *part)
{
+ part->in_flight--;
+ if (part->partno)
+ part_to_disk(part)->part0.in_flight--;
}
-#endif /* CONFIG_SMP */
/* drivers/block/ll_rw_blk.c */
-extern void disk_round_stats(struct gendisk *disk);
-extern void part_round_stats(struct hd_struct *part);
+extern void part_round_stats(int cpu, struct hd_struct *part);
/* drivers/block/genhd.c */
extern int get_blkdev_list(char *, int);
extern void add_disk(struct gendisk *disk);
extern void del_gendisk(struct gendisk *gp);
extern void unlink_gendisk(struct gendisk *gp);
-extern struct gendisk *get_gendisk(dev_t dev, int *part);
+extern struct gendisk *get_gendisk(dev_t dev, int *partno);
+extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
extern void set_device_ro(struct block_device *bdev, int flag);
extern void set_disk_ro(struct gendisk *disk, int flag);
+static inline int get_disk_ro(struct gendisk *disk)
+{
+ return disk->part0.policy;
+}
+
/* drivers/char/random.c */
extern void add_disk_randomness(struct gendisk *disk);
extern void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
{
- return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect;
+ return bdev->bd_part->start_sect;
}
static inline sector_t get_capacity(struct gendisk *disk)
{
- return disk->capacity;
+ return disk->part0.nr_sects;
}
static inline void set_capacity(struct gendisk *disk, sector_t size)
{
- disk->capacity = size;
+ disk->part0.nr_sects = size;
}
#ifdef CONFIG_SOLARIS_X86_PARTITION
@@ -527,9 +518,12 @@ struct unixware_disklabel {
#define ADDPART_FLAG_RAID 1
#define ADDPART_FLAG_WHOLEDISK 2
-extern dev_t blk_lookup_devt(const char *name, int part);
-extern char *disk_name (struct gendisk *hd, int part, char *buf);
+extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
+extern void blk_free_devt(dev_t devt);
+extern dev_t blk_lookup_devt(const char *name, int partno);
+extern char *disk_name (struct gendisk *hd, int partno, char *buf);
+extern int disk_expand_part_tbl(struct gendisk *disk, int target);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int);
extern void delete_partition(struct gendisk *, int);
@@ -546,16 +540,23 @@ extern void blk_register_region(dev_t devt, unsigned long range,
void *data);
extern void blk_unregister_region(dev_t devt, unsigned long range);
-static inline struct block_device *bdget_disk(struct gendisk *disk, int index)
-{
- return bdget(MKDEV(disk->major, disk->first_minor) + index);
-}
+extern ssize_t part_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t part_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+extern ssize_t part_fail_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t part_fail_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+#endif /* CONFIG_FAIL_MAKE_REQUEST */
#else /* CONFIG_BLOCK */
static inline void printk_all_partitions(void) { }
-static inline dev_t blk_lookup_devt(const char *name, int part)
+static inline dev_t blk_lookup_devt(const char *name, int partno)
{
dev_t devt = MKDEV(0, 0);
return devt;
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index c3c19f926e6..14d0df0b574 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -118,7 +118,11 @@ struct gfs2_sb {
char sb_lockproto[GFS2_LOCKNAME_LEN];
char sb_locktable[GFS2_LOCKNAME_LEN];
- /* In gfs1, quota and license dinodes followed */
+
+ struct gfs2_inum __pad3; /* Was quota inode in gfs1 */
+ struct gfs2_inum __pad4; /* Was licence inode in gfs1 */
+#define GFS2_HAS_UUID 1
+ __u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */
};
/*
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 06c338ef7f1..8ea98db223e 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -38,7 +38,7 @@ extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *));
struct klist_node {
- struct klist *n_klist;
+ void *n_klist; /* never access directly */
struct list_head n_node;
struct kref n_ref;
struct completion n_removed;
@@ -57,7 +57,6 @@ extern int klist_node_attached(struct klist_node *n);
struct klist_iter {
struct klist *i_klist;
- struct list_head *i_head;
struct klist_node *i_cur;
};
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 225bfc5bd9e..947cf84e555 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -146,6 +146,7 @@ enum {
ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
+ ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@@ -244,6 +245,7 @@ enum {
ATA_TMOUT_BOOT = 30000, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
ATA_TMOUT_INTERNAL_QUICK = 5000,
+ ATA_TMOUT_MAX_PARK = 30000,
/* FIXME: GoVault needs 2s but we can't afford that without
* parallel probing. 800ms is enough for iVDR disk
@@ -319,8 +321,11 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_LPM = (1 << 4), /* link power management action */
+ ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+ ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
+ ATA_EH_ENABLE_LINK | ATA_EH_LPM,
/* ata_eh_info->flags */
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
@@ -452,6 +457,7 @@ enum link_pm {
MEDIUM_POWER,
};
extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_unload_heads;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
@@ -554,8 +560,8 @@ struct ata_ering {
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned long flags; /* ATA_DFLAG_xxx */
unsigned int horkage; /* List of broken features */
+ unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
#ifdef CONFIG_ATA_ACPI
acpi_handle acpi_handle;
@@ -564,6 +570,7 @@ struct ata_device {
/* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
u64 n_sectors; /* size of device, if ATA */
unsigned int class; /* ATA_DEV_xxx */
+ unsigned long unpark_deadline;
u8 pio_mode;
u8 dma_mode;
@@ -621,6 +628,7 @@ struct ata_eh_context {
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask;
+ unsigned int unloaded_mask;
unsigned int saved_ncq_enabled;
u8 saved_xfer_mode[ATA_MAX_DEVICES];
/* timestamp for the last reset attempt or success */
@@ -688,7 +696,8 @@ struct ata_port {
unsigned int qc_active;
int nr_active_links; /* #links with active qcs */
- struct ata_link link; /* host default link */
+ struct ata_link link; /* host default link */
+ struct ata_link *slave_link; /* see ata_slave_link_init() */
int nr_pmp_links; /* nr of available PMP links */
struct ata_link *pmp_link; /* array of PMP links */
@@ -709,6 +718,7 @@ struct ata_port {
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
+ struct completion park_req_pending;
pm_message_t pm_mesg;
int *pm_result;
@@ -772,8 +782,8 @@ struct ata_port_operations {
/*
* Optional features
*/
- int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val);
- int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val);
+ int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
void (*pmp_attach)(struct ata_port *ap);
void (*pmp_detach)(struct ata_port *ap);
int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
@@ -895,6 +905,7 @@ extern void ata_port_disable(struct ata_port *);
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
+extern int ata_slave_link_init(struct ata_port *ap);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
struct scsi_host_template *sht);
@@ -920,8 +931,8 @@ extern int sata_scr_valid(struct ata_link *link);
extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
-extern int ata_link_online(struct ata_link *link);
-extern int ata_link_offline(struct ata_link *link);
+extern bool ata_link_online(struct ata_link *link);
+extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
@@ -1098,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
+extern struct device_attribute *ata_common_sdev_attrs[];
#define ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
@@ -1112,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops;
.proc_name = drv_name, \
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
- .bios_param = ata_std_bios_param
+ .bios_param = ata_std_bios_param, \
+ .sdev_attrs = ata_common_sdev_attrs
#define ATA_NCQ_SHT(drv_name) \
ATA_BASE_SHT(drv_name), \
@@ -1134,7 +1147,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline int ata_is_host_link(const struct ata_link *link)
{
- return link == &link->ap->link;
+ return link == &link->ap->link || link == link->ap->slave_link;
}
#else /* CONFIG_SATA_PMP */
static inline bool sata_pmp_supported(struct ata_port *ap)
@@ -1167,7 +1180,7 @@ static inline int sata_srst_pmp(struct ata_link *link)
printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
#define ata_link_printk(link, lv, fmt, args...) do { \
- if (sata_pmp_attached((link)->ap)) \
+ if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \
printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
(link)->pmp , ##args); \
else \
@@ -1265,34 +1278,17 @@ static inline int ata_link_active(struct ata_link *link)
return ata_tag_valid(link->active_tag) || link->sactive;
}
-static inline struct ata_link *ata_port_first_link(struct ata_port *ap)
-{
- if (sata_pmp_attached(ap))
- return ap->pmp_link;
- return &ap->link;
-}
-
-static inline struct ata_link *ata_port_next_link(struct ata_link *link)
-{
- struct ata_port *ap = link->ap;
-
- if (ata_is_host_link(link)) {
- if (!sata_pmp_attached(ap))
- return NULL;
- return ap->pmp_link;
- }
-
- if (++link < ap->nr_pmp_links + ap->pmp_link)
- return link;
- return NULL;
-}
+extern struct ata_link *__ata_port_next_link(struct ata_port *ap,
+ struct ata_link *link,
+ bool dev_only);
-#define __ata_port_for_each_link(lk, ap) \
- for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk))
+#define __ata_port_for_each_link(link, ap) \
+ for ((link) = __ata_port_next_link((ap), NULL, false); (link); \
+ (link) = __ata_port_next_link((ap), (link), false))
#define ata_port_for_each_link(link, ap) \
- for ((link) = ata_port_first_link(ap); (link); \
- (link) = ata_port_next_link(link))
+ for ((link) = __ata_port_next_link((ap), NULL, true); (link); \
+ (link) = __ata_port_next_link((ap), (link), true))
#define ata_link_for_each_dev(dev, link) \
for ((dev) = (link)->device; \
diff --git a/include/linux/major.h b/include/linux/major.h
index 53d5fafd85c..88249452b93 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -170,4 +170,6 @@
#define VIOTAPE_MAJOR 230
+#define BLOCK_EXT_MAJOR 259
+
#endif
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 310e6160641..8b4aa0523db 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -41,6 +41,8 @@ struct mtd_blktrans_ops {
unsigned long block, char *buffer);
int (*writesect)(struct mtd_blktrans_dev *dev,
unsigned long block, char *buffer);
+ int (*discard)(struct mtd_blktrans_dev *dev,
+ unsigned long block, unsigned nr_blocks);
/* Block layer ioctls */
int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index da2698b0fdd..b86fa2ffca0 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
- * not handling interrupts, soon dead */
+ * not handling interrupts, soon dead.
+ * Called on the dying cpu, interrupts
+ * are already disabled. Must not
+ * sleep, must not fail */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
+#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
+ * Called on the new cpu, just before
+ * enabling interrupts. Must not sleep,
+ * must not fail */
/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
* operation in progress
@@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
+#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
/* Hibernation and suspend events */
#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5afc1b23346..cf793bbbd05 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -104,8 +104,8 @@ struct prop_local_single {
* snapshot of the last seen global state
* and a lock protecting this state
*/
- int shift;
unsigned long period;
+ int shift;
spinlock_t lock; /* protect the snapshot state */
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad1..5d0819ee442 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -451,8 +451,8 @@ struct signal_struct {
* - everyone except group_exit_task is stopped during signal delivery
* of fatal signals, group_exit_task processes the signal.
*/
- struct task_struct *group_exit_task;
int notify_count;
+ struct task_struct *group_exit_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
@@ -824,6 +824,9 @@ struct sched_domain {
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
#endif
+#ifdef CONFIG_SCHED_DEBUG
+ char *name;
+#endif
};
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
@@ -897,7 +900,7 @@ struct sched_class {
void (*yield_task) (struct rq *rq);
int (*select_task_rq)(struct task_struct *p, int sync);
- void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -1010,8 +1013,8 @@ struct sched_entity {
struct sched_rt_entity {
struct list_head run_list;
- unsigned int time_slice;
unsigned long timeout;
+ unsigned int time_slice;
int nr_cpus_allowed;
struct sched_rt_entity *back;
diff --git a/include/linux/security.h b/include/linux/security.h
index 80c4d002864..f5c4a51eb42 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1560,11 +1560,6 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
-extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
-extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
-extern void securityfs_remove(struct dentry *dentry);
/* Security operations */
int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
@@ -2424,25 +2419,6 @@ static inline int security_netlink_recv(struct sk_buff *skb, int cap)
return cap_netlink_recv(skb, cap);
}
-static inline struct dentry *securityfs_create_dir(const char *name,
- struct dentry *parent)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct dentry *securityfs_create_file(const char *name,
- mode_t mode,
- struct dentry *parent,
- void *data,
- const struct file_operations *fops)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline void securityfs_remove(struct dentry *dentry)
-{
-}
-
static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return -EOPNOTSUPP;
@@ -2806,5 +2782,35 @@ static inline void security_audit_rule_free(void *lsmrule)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_AUDIT */
+#ifdef CONFIG_SECURITYFS
+
+extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
+extern void securityfs_remove(struct dentry *dentry);
+
+#else /* CONFIG_SECURITYFS */
+
+static inline struct dentry *securityfs_create_dir(const char *name,
+ struct dentry *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *securityfs_create_file(const char *name,
+ mode_t mode,
+ struct dentry *parent,
+ void *data,
+ const struct file_operations *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void securityfs_remove(struct dentry *dentry)
+{}
+
+#endif
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
new file mode 100644
index 00000000000..a3eb2f65b65
--- /dev/null
+++ b/include/linux/string_helpers.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_STRING_HELPERS_H_
+#define _LINUX_STRING_HELPERS_H_
+
+#include <linux/types.h>
+
+/* Descriptions of the types of units to
+ * print in */
+enum string_size_units {
+ STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
+ STRING_UNITS_2, /* use binary powers of 2^10 */
+};
+
+int string_get_size(u64 size, enum string_size_units units,
+ char *buf, int len);
+
+#endif