aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bitops/sched.h21
-rw-r--r--include/asm-mips/mach-au1x00/au1xxx_ide.h28
-rw-r--r--include/asm-s390/atomic.h4
-rw-r--r--include/asm-s390/cmb.h1
-rw-r--r--include/asm-s390/processor.h4
-rw-r--r--include/asm-s390/sclp.h47
-rw-r--r--include/asm-s390/sfp-machine.h6
-rw-r--r--include/asm-s390/sfp-util.h11
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/dlm.h14
-rw-r--r--include/linux/dlm_device.h22
-rw-r--r--include/linux/dlm_netlink.h56
-rw-r--r--include/linux/eeprom_93cx6.h72
-rw-r--r--include/linux/firewire-cdev.h297
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/gfs2_ondisk.h142
-rw-r--r--include/linux/gpio_mouse.h61
-rw-r--r--include/linux/hardirq.h13
-rw-r--r--include/linux/hid.h56
-rw-r--r--include/linux/ide.h18
-rw-r--r--include/linux/input.h20
-rw-r--r--include/linux/ioprio.h6
-rw-r--r--include/linux/pipe_fs_i.h117
-rw-r--r--include/linux/sched.h251
-rw-r--r--include/linux/splice.h73
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/topology.h12
-rw-r--r--include/linux/usb.h16
-rw-r--r--include/linux/wait.h16
-rw-r--r--include/pcmcia/ciscode.h2
31 files changed, 989 insertions, 412 deletions
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
index 815bb014806..604fab7031a 100644
--- a/include/asm-generic/bitops/sched.h
+++ b/include/asm-generic/bitops/sched.h
@@ -6,28 +6,23 @@
/*
* Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
- if (unlikely(b[0]))
+ if (b[0])
return __ffs(b[0]);
- if (likely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
+ return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
- if (unlikely(b[0]))
+ if (b[0])
return __ffs(b[0]);
- if (unlikely(b[1]))
+ if (b[1])
return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
+ if (b[2])
return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
+ return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h
index 8fcae21adbd..4663e8b415c 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_ide.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h
@@ -88,26 +88,26 @@ static const struct drive_list_entry dma_white_list [] = {
/*
* Hitachi
*/
- { "HITACHI_DK14FA-20" , "ALL" },
- { "HTS726060M9AT00" , "ALL" },
+ { "HITACHI_DK14FA-20" , NULL },
+ { "HTS726060M9AT00" , NULL },
/*
* Maxtor
*/
- { "Maxtor 6E040L0" , "ALL" },
- { "Maxtor 6Y080P0" , "ALL" },
- { "Maxtor 6Y160P0" , "ALL" },
+ { "Maxtor 6E040L0" , NULL },
+ { "Maxtor 6Y080P0" , NULL },
+ { "Maxtor 6Y160P0" , NULL },
/*
* Seagate
*/
- { "ST3120026A" , "ALL" },
- { "ST320014A" , "ALL" },
- { "ST94011A" , "ALL" },
- { "ST340016A" , "ALL" },
+ { "ST3120026A" , NULL },
+ { "ST320014A" , NULL },
+ { "ST94011A" , NULL },
+ { "ST340016A" , NULL },
/*
* Western Digital
*/
- { "WDC WD400UE-00HCT0" , "ALL" },
- { "WDC WD400JB-00JJC0" , "ALL" },
+ { "WDC WD400UE-00HCT0" , NULL },
+ { "WDC WD400JB-00JJC0" , NULL },
{ NULL , NULL }
};
@@ -116,9 +116,9 @@ static const struct drive_list_entry dma_black_list [] = {
/*
* Western Digital
*/
- { "WDC WD100EB-00CGH0" , "ALL" },
- { "WDC WD200BB-00AUA1" , "ALL" },
- { "WDC AC24300L" , "ALL" },
+ { "WDC WD100EB-00CGH0" , NULL },
+ { "WDC WD200BB-00AUA1" , NULL },
+ { "WDC AC24300L" , NULL },
{ NULL , NULL }
};
#endif
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index c17bdbf2206..ea486952f77 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -24,7 +24,7 @@
*/
typedef struct {
- volatile int counter;
+ int counter;
} __attribute__ ((aligned (4))) atomic_t;
#define ATOMIC_INIT(i) { (i) }
@@ -141,7 +141,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#ifdef __s390x__
typedef struct {
- volatile long long counter;
+ long long counter;
} __attribute__ ((aligned (8))) atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
diff --git a/include/asm-s390/cmb.h b/include/asm-s390/cmb.h
index 241756f80df..021e7c3223e 100644
--- a/include/asm-s390/cmb.h
+++ b/include/asm-s390/cmb.h
@@ -88,7 +88,6 @@ extern u64 cmf_read(struct ccw_device *cdev, int index);
* any
**/
extern int cmf_readall(struct ccw_device *cdev, struct cmbdata*data);
-extern void cmf_reset(struct ccw_device *cdev);
#endif /* __KERNEL__ */
#endif /* S390_CMB_H */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 5cb480af65d..3b972d4c6b2 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -357,8 +357,8 @@ extern void (*s390_base_ext_handler_fn)(void);
/*
* CPU idle notifier chain.
*/
-#define CPU_IDLE 0
-#define CPU_NOT_IDLE 1
+#define S390_CPU_IDLE 0
+#define S390_CPU_NOT_IDLE 1
struct notifier_block;
int register_idle_notifier(struct notifier_block *nb);
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index 21ed6477321..cb9faf1ea5c 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -11,29 +11,6 @@
#include <linux/types.h>
#include <asm/chpid.h>
-struct sccb_header {
- u16 length;
- u8 function_code;
- u8 control_mask[3];
- u16 response_code;
-} __attribute__((packed));
-
-#define LOADPARM_LEN 8
-
-struct sclp_readinfo_sccb {
- struct sccb_header header; /* 0-7 */
- u16 rnmax; /* 8-9 */
- u8 rnsize; /* 10 */
- u8 _reserved0[24 - 11]; /* 11-23 */
- u8 loadparm[LOADPARM_LEN]; /* 24-31 */
- u8 _reserved1[91 - 32]; /* 32-90 */
- u8 flags; /* 91 */
- u8 _reserved2[100 - 92]; /* 92-99 */
- u32 rnsize2; /* 100-103 */
- u64 rnmax2; /* 104-111 */
- u8 _reserved3[4096 - 112]; /* 112-4095 */
-} __attribute__((packed, aligned(4096)));
-
#define SCLP_CHP_INFO_MASK_SIZE 32
struct sclp_chp_info {
@@ -42,12 +19,22 @@ struct sclp_chp_info {
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
};
-extern struct sclp_readinfo_sccb s390_readinfo_sccb;
-extern void sclp_readinfo_early(void);
-extern int sclp_sdias_blk_count(void);
-extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
-extern int sclp_chp_configure(struct chp_id chpid);
-extern int sclp_chp_deconfigure(struct chp_id chpid);
-extern int sclp_chp_read_info(struct sclp_chp_info *info);
+#define LOADPARM_LEN 8
+
+struct sclp_ipl_info {
+ int is_valid;
+ int has_dump;
+ char loadparm[LOADPARM_LEN];
+};
+
+void sclp_readinfo_early(void);
+void sclp_facilities_detect(void);
+unsigned long long sclp_memory_detect(void);
+int sclp_sdias_blk_count(void);
+int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
+int sclp_chp_configure(struct chp_id chpid);
+int sclp_chp_deconfigure(struct chp_id chpid);
+int sclp_chp_read_info(struct sclp_chp_info *info);
+void sclp_get_ipl_info(struct sclp_ipl_info *info);
#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/sfp-machine.h b/include/asm-s390/sfp-machine.h
index 8ca8c77b2d0..4e16aede4b0 100644
--- a/include/asm-s390/sfp-machine.h
+++ b/include/asm-s390/sfp-machine.h
@@ -27,9 +27,9 @@
#define _FP_W_TYPE_SIZE 32
-#define _FP_W_TYPE unsigned long
-#define _FP_WS_TYPE signed long
-#define _FP_I_TYPE long
+#define _FP_W_TYPE unsigned int
+#define _FP_WS_TYPE signed int
+#define _FP_I_TYPE int
#define _FP_MUL_MEAT_S(R,X,Y) \
_FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
diff --git a/include/asm-s390/sfp-util.h b/include/asm-s390/sfp-util.h
index 8cabcd23d97..0addc6466d9 100644
--- a/include/asm-s390/sfp-util.h
+++ b/include/asm-s390/sfp-util.h
@@ -51,6 +51,16 @@
wl = __wl; \
})
+#ifdef __s390x__
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { unsigned long __n; \
+ unsigned int __r, __d; \
+ __n = ((unsigned long)(n1) << 32) + n0; \
+ __d = (d); \
+ (q) = __n / __d; \
+ (r) = __n % __d; \
+ } while (0)
+#else
#define udiv_qrnnd(q, r, n1, n0, d) \
do { unsigned int __r; \
(q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
@@ -58,6 +68,7 @@
} while (0)
extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
unsigned int , unsigned int);
+#endif
#define UDIV_NEEDS_NORMALIZATION 0
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index f317c270d4b..afae306b177 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -49,6 +49,7 @@ header-y += consolemap.h
header-y += const.h
header-y += cycx_cfm.h
header-y += dlm_device.h
+header-y += dlm_netlink.h
header-y += dm-ioctl.h
header-y += dn.h
header-y += dqblk_v1.h
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index db5b00a792f..fae138bd220 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -868,11 +868,6 @@ void kblockd_flush_work(struct work_struct *work);
*/
#define buffer_heads_over_limit 0
-static inline long blk_congestion_wait(int rw, long timeout)
-{
- return io_schedule_timeout(timeout);
-}
-
static inline long nr_blockdev_pages(void)
{
return 0;
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index 1b1dcb9a40b..be9d278761e 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -85,7 +85,11 @@
* Only relevant to locks originating in userspace. A persistent lock will not
* be removed if the process holding the lock exits.
*
- * DLM_LKF_NODLKWT
+ * DLM_LKF_NODLCKWT
+ *
+ * Do not cancel the lock if it gets into conversion deadlock.
+ * Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN.
+ *
* DLM_LKF_NODLCKBLK
*
* net yet implemented
@@ -149,6 +153,7 @@
#define DLM_LKF_ALTPR 0x00008000
#define DLM_LKF_ALTCW 0x00010000
#define DLM_LKF_FORCEUNLOCK 0x00020000
+#define DLM_LKF_TIMEOUT 0x00040000
/*
* Some return codes that are not in errno.h
@@ -199,11 +204,12 @@ struct dlm_lksb {
char * sb_lvbptr;
};
+#define DLM_LSFL_NODIR 0x00000001
+#define DLM_LSFL_TIMEWARN 0x00000002
+#define DLM_LSFL_FS 0x00000004
#ifdef __KERNEL__
-#define DLM_LSFL_NODIR 0x00000001
-
/*
* dlm_new_lockspace
*
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
index c2735cab2eb..9642277a152 100644
--- a/include/linux/dlm_device.h
+++ b/include/linux/dlm_device.h
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -18,21 +18,24 @@
#define DLM_USER_LVB_LEN 32
/* Version of the device interface */
-#define DLM_DEVICE_VERSION_MAJOR 5
-#define DLM_DEVICE_VERSION_MINOR 1
+#define DLM_DEVICE_VERSION_MAJOR 6
+#define DLM_DEVICE_VERSION_MINOR 0
#define DLM_DEVICE_VERSION_PATCH 0
/* struct passed to the lock write */
struct dlm_lock_params {
__u8 mode;
__u8 namelen;
- __u16 flags;
+ __u16 unused;
+ __u32 flags;
__u32 lkid;
__u32 parent;
- void __user *castparam;
+ __u64 xid;
+ __u64 timeout;
+ void __user *castparam;
void __user *castaddr;
void __user *bastparam;
- void __user *bastaddr;
+ void __user *bastaddr;
struct dlm_lksb __user *lksb;
char lvb[DLM_USER_LVB_LEN];
char name[0];
@@ -62,9 +65,15 @@ struct dlm_write_request {
} i;
};
+struct dlm_device_version {
+ __u32 version[3];
+};
+
/* struct read from the "device" fd,
consists mainly of userspace pointers for the library to use */
+
struct dlm_lock_result {
+ __u32 version[3];
__u32 length;
void __user * user_astaddr;
void __user * user_astparam;
@@ -83,6 +92,7 @@ struct dlm_lock_result {
#define DLM_USER_CREATE_LOCKSPACE 4
#define DLM_USER_REMOVE_LOCKSPACE 5
#define DLM_USER_PURGE 6
+#define DLM_USER_DEADLOCK 7
/* Arbitrary length restriction */
#define MAX_LS_NAME_LEN 64
diff --git a/include/linux/dlm_netlink.h b/include/linux/dlm_netlink.h
new file mode 100644
index 00000000000..19276332707
--- /dev/null
+++ b/include/linux/dlm_netlink.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ */
+
+#ifndef _DLM_NETLINK_H
+#define _DLM_NETLINK_H
+
+enum {
+ DLM_STATUS_WAITING = 1,
+ DLM_STATUS_GRANTED = 2,
+ DLM_STATUS_CONVERT = 3,
+};
+
+#define DLM_LOCK_DATA_VERSION 1
+
+struct dlm_lock_data {
+ uint16_t version;
+ uint32_t lockspace_id;
+ int nodeid;
+ int ownpid;
+ uint32_t id;
+ uint32_t remid;
+ uint64_t xid;
+ int8_t status;
+ int8_t grmode;
+ int8_t rqmode;
+ unsigned long timestamp;
+ int resource_namelen;
+ char resource_name[DLM_RESNAME_MAXLEN];
+};
+
+enum {
+ DLM_CMD_UNSPEC = 0,
+ DLM_CMD_HELLO, /* user->kernel */
+ DLM_CMD_TIMEOUT, /* kernel->user */
+ __DLM_CMD_MAX,
+};
+
+#define DLM_CMD_MAX (__DLM_CMD_MAX - 1)
+
+enum {
+ DLM_TYPE_UNSPEC = 0,
+ DLM_TYPE_LOCK,
+ __DLM_TYPE_MAX,
+};
+
+#define DLM_TYPE_MAX (__DLM_TYPE_MAX - 1)
+
+#define DLM_GENL_VERSION 0x1
+#define DLM_GENL_NAME "DLM"
+
+#endif /* _DLM_NETLINK_H */
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
new file mode 100644
index 00000000000..d774b7778c9
--- /dev/null
+++ b/include/linux/eeprom_93cx6.h
@@ -0,0 +1,72 @@
+/*
+ Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: eeprom_93cx6
+ Abstract: EEPROM reader datastructures for 93cx6 chipsets.
+ Supported chipsets: 93c46 & 93c66.
+ */
+
+/*
+ * EEPROM operation defines.
+ */
+#define PCI_EEPROM_WIDTH_93C46 6
+#define PCI_EEPROM_WIDTH_93C66 8
+#define PCI_EEPROM_WIDTH_OPCODE 3
+#define PCI_EEPROM_WRITE_OPCODE 0x05
+#define PCI_EEPROM_READ_OPCODE 0x06
+#define PCI_EEPROM_EWDS_OPCODE 0x10
+#define PCI_EEPROM_EWEN_OPCODE 0x13
+
+/**
+ * struct eeprom_93cx6 - control structure for setting the commands
+ * for reading the eeprom data.
+ * @data: private pointer for the driver.
+ * @register_read(struct eeprom_93cx6 *eeprom): handler to
+ * read the eeprom register, this function should set all reg_* fields.
+ * @register_write(struct eeprom_93cx6 *eeprom): handler to
+ * write to the eeprom register by using all reg_* fields.
+ * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @reg_data_in: register field to indicate data input
+ * @reg_data_out: register field to indicate data output
+ * @reg_data_clock: register field to set the data clock
+ * @reg_chip_select: register field to set the chip select
+ *
+ * This structure is used for the communication between the driver
+ * and the eeprom_93cx6 handlers for reading the eeprom.
+ */
+struct eeprom_93cx6 {
+ void *data;
+
+ void (*register_read)(struct eeprom_93cx6 *eeprom);
+ void (*register_write)(struct eeprom_93cx6 *eeprom);
+
+ int width;
+
+ char reg_data_in;
+ char reg_data_out;
+ char reg_data_clock;
+ char reg_chip_select;
+};
+
+extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
+ const u8 word, u16 *data);
+extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
+ const u8 word, __le16 *data, const u16 words);
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index efbe1fda1a2..1a45d6f41b0 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -30,16 +30,38 @@
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
-/* The 'closure' fields are for user space to use. Data passed in the
- * 'closure' field for a request will be returned in the corresponding
- * event. It's a 64-bit type so that it's a fixed size type big
- * enough to hold a pointer on all platforms. */
-
+/**
+ * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
+ * @closure: For arbitrary use by userspace
+ * @type: Discriminates the fw_cdev_event_ types
+ *
+ * This struct may be used to access generic members of all fw_cdev_event_
+ * types regardless of the specific type.
+ *
+ * Data passed in the @closure field for a request will be returned in the
+ * corresponding event. It is big enough to hold a pointer on all platforms.
+ * The ioctl used to set @closure depends on the @type of event.
+ */
struct fw_cdev_event_common {
__u64 closure;
__u32 type;
};
+/**
+ * struct fw_cdev_event_bus_reset - Sent when a bus reset occurred
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_GET_INFO ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_BUS_RESET
+ * @node_id: New node ID of this node
+ * @local_node_id: Node ID of the local node, i.e. of the controller
+ * @bm_node_id: Node ID of the bus manager
+ * @irm_node_id: Node ID of the iso resource manager
+ * @root_node_id: Node ID of the root node
+ * @generation: New bus generation
+ *
+ * This event is sent when the bus the device belongs to goes through a bus
+ * reset. It provides information about the new bus configuration, such as
+ * new node ID for this device, new root ID, and others.
+ */
struct fw_cdev_event_bus_reset {
__u64 closure;
__u32 type;
@@ -51,6 +73,20 @@ struct fw_cdev_event_bus_reset {
__u32 generation;
};
+/**
+ * struct fw_cdev_event_response - Sent when a response packet was received
+ * @closure: See &fw_cdev_event_common;
+ * set by %FW_CDEV_IOC_SEND_REQUEST ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
+ * @rcode: Response code returned by the remote node
+ * @length: Data length, i.e. the response's payload size in bytes
+ * @data: Payload data, if any
+ *
+ * This event is sent when the stack receives a response to an outgoing request
+ * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
+ * carrying data (read and lock responses) follows immediately and can be
+ * accessed through the @data field.
+ */
struct fw_cdev_event_response {
__u64 closure;
__u32 type;
@@ -59,6 +95,25 @@ struct fw_cdev_event_response {
__u32 data[0];
};
+/**
+ * struct fw_cdev_event_request - Sent on incoming request to an address region
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
+ * @tcode: Transaction code of the incoming request
+ * @offset: The offset into the 48-bit per-node address space
+ * @handle: Reference to the kernel-side pending request
+ * @length: Data length, i.e. the request's payload size in bytes
+ * @data: Incoming data, if any
+ *
+ * This event is sent when the stack receives an incoming request to an address
+ * region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is
+ * guaranteed to be completely contained in the specified region. Userspace is
+ * responsible for sending the response by %FW_CDEV_IOC_SEND_RESPONSE ioctl,
+ * using the same @handle.
+ *
+ * The payload data for requests carrying data (write and lock requests)
+ * follows immediately and can be accessed through the @data field.
+ */
struct fw_cdev_event_request {
__u64 closure;
__u32 type;
@@ -69,14 +124,39 @@ struct fw_cdev_event_request {
__u32 data[0];
};
+/**
+ * struct fw_cdev_event_iso_interrupt - Sent when an iso packet was completed
+ * @closure: See &fw_cdev_event_common;
+ * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_ISO_INTERRUPT
+ * @cycle: Cycle counter of the interrupt packet
+ * @header_length: Total length of following headers, in bytes
+ * @header: Stripped headers, if any
+ *
+ * This event is sent when the controller has completed an &fw_cdev_iso_packet
+ * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
+ * stripped of all packets up until and including the interrupt packet are
+ * returned in the @header field.
+ */
struct fw_cdev_event_iso_interrupt {
__u64 closure;
__u32 type;
__u32 cycle;
- __u32 header_length; /* Length in bytes of following headers. */
+ __u32 header_length;
__u32 header[0];
};
+/**
+ * union fw_cdev_event - Convenience union of fw_cdev_event_ types
+ * @common: Valid for all types
+ * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
+ * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
+ * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
+ * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
+ *
+ * Convenience union for userspace use. Events could be read(2) into a char
+ * buffer and then cast to this union for further processing.
+ */
union fw_cdev_event {
struct fw_cdev_event_common common;
struct fw_cdev_event_bus_reset bus_reset;
@@ -105,35 +185,47 @@ union fw_cdev_event {
*/
#define FW_CDEV_VERSION 1
+/**
+ * struct fw_cdev_get_info - General purpose information ioctl
+ * @version: The version field is just a running serial number.
+ * We never break backwards compatibility, but may add more
+ * structs and ioctls in later revisions.
+ * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration
+ * ROM will be copied into that user space address. In either
+ * case, @rom_length is updated with the actual length of the
+ * configuration ROM.
+ * @rom: If non-zero, address of a buffer to be filled by a copy of the
+ * local node's configuration ROM
+ * @bus_reset: If non-zero, address of a buffer to be filled by a
+ * &struct fw_cdev_event_bus_reset with the current state
+ * of the bus. This does not cause a bus reset to happen.
+ * @bus_reset_closure: Value of &closure in this and subsequent bus reset events
+ * @card: The index of the card this device belongs to
+ */
struct fw_cdev_get_info {
- /* The version field is just a running serial number. We
- * never break backwards compatibility. Userspace passes in
- * the version it expects and the kernel passes back the
- * highest version it can provide. Even if the structs in
- * this interface are extended in a later version, the kernel
- * will not copy back more data than what was present in the
- * interface version userspace expects. */
__u32 version;
-
- /* If non-zero, at most rom_length bytes of config rom will be
- * copied into that user space address. In either case,
- * rom_length is updated with the actual length of the config
- * rom. */
__u32 rom_length;
__u64 rom;
-
- /* If non-zero, a fw_cdev_event_bus_reset struct will be
- * copied here with the current state of the bus. This does
- * not cause a bus reset to happen. The value of closure in
- * this and sub-sequent bus reset events is set to
- * bus_reset_closure. */
__u64 bus_reset;
__u64 bus_reset_closure;
-
- /* The index of the card this devices belongs to. */
__u32 card;
};
+/**
+ * struct fw_cdev_send_request - Send an asynchronous request packet
+ * @tcode: Transaction code of the request
+ * @length: Length of outgoing payload, in bytes
+ * @offset: 48-bit offset at destination node
+ * @closure: Passed back to userspace in the response event
+ * @data: Userspace pointer to payload
+ * @generation: The bus generation where packet is valid
+ *
+ * Send a request to the device. This ioctl implements all outgoing requests.
+ * Both quadlet and block request specify the payload as a pointer to the data
+ * in the @data field. Once the transaction completes, the kernel writes an
+ * &fw_cdev_event_request event back. The @closure field is passed back to
+ * user space in the response event.
+ */
struct fw_cdev_send_request {
__u32 tcode;
__u32 length;
@@ -143,6 +235,19 @@ struct fw_cdev_send_request {
__u32 generation;
};
+/**
+ * struct fw_cdev_send_response - Send an asynchronous response packet
+ * @rcode: Response code as determined by the userspace handler
+ * @length: Length of outgoing payload, in bytes
+ * @data: Userspace pointer to payload
+ * @handle: The handle from the &fw_cdev_event_request
+ *
+ * Send a response to an incoming request. By setting up an address range using
+ * the %FW_CDEV_IOC_ALLOCATE ioctl, userspace can listen for incoming requests. An
+ * incoming request will generate an %FW_CDEV_EVENT_REQUEST, and userspace must
+ * send a reply using this ioctl. The event has a handle to the kernel-side
+ * pending transaction, which should be used with this ioctl.
+ */
struct fw_cdev_send_response {
__u32 rcode;
__u32 length;
@@ -150,6 +255,21 @@ struct fw_cdev_send_response {
__u32 handle;
};
+/**
+ * struct fw_cdev_allocate - Allocate a CSR address range
+ * @offset: Start offset of the address range
+ * @closure: To be passed back to userspace in request events
+ * @length: Length of the address range, in bytes
+ * @handle: Handle to the allocation, written by the kernel
+ *
+ * Allocate an address range in the 48-bit address space on the local node
+ * (the controller). This allows userspace to listen for requests with an
+ * offset within that address range. When the kernel receives a request
+ * within the range, an &fw_cdev_event_request event will be written back.
+ * The @closure field is passed back to userspace in the response event.
+ * The @handle field is an out parameter, returning a handle to the allocated
+ * range to be used for later deallocation of the range.
+ */
struct fw_cdev_allocate {
__u64 offset;
__u64 closure;
@@ -157,6 +277,11 @@ struct fw_cdev_allocate {
__u32 handle;
};
+/**
+ * struct fw_cdev_deallocate - Free an address range allocation
+ * @handle: Handle to the address range, as returned by the kernel when the
+ * range was allocated
+ */
struct fw_cdev_deallocate {
__u32 handle;
};
@@ -164,10 +289,41 @@ struct fw_cdev_deallocate {
#define FW_CDEV_LONG_RESET 0
#define FW_CDEV_SHORT_RESET 1
+/**
+ * struct fw_cdev_initiate_bus_reset - Initiate a bus reset
+ * @type: %FW_CDEV_SHORT_RESET or %FW_CDEV_LONG_RESET
+ *
+ * Initiate a bus reset for the bus this device is on. The bus reset can be
+ * either the original (long) bus reset or the arbitrated (short) bus reset
+ * introduced in 1394a-2000.
+ */
struct fw_cdev_initiate_bus_reset {
- __u32 type;
+ __u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */
};
+/**
+ * struct fw_cdev_add_descriptor - Add contents to the local node's config ROM
+ * @immediate: If non-zero, immediate key to insert before pointer
+ * @key: Upper 8 bits of root directory pointer
+ * @data: Userspace pointer to contents of descriptor block
+ * @length: Length of descriptor block data, in bytes
+ * @handle: Handle to the descriptor, written by the kernel
+ *
+ * Add a descriptor block and optionally a preceding immediate key to the local
+ * node's configuration ROM.
+ *
+ * The @key field specifies the upper 8 bits of the descriptor root directory
+ * pointer and the @data and @length fields specify the contents. The @key
+ * should be of the form 0xXX000000. The offset part of the root directory entry
+ * will be filled in by the kernel.
+ *
+ * If not 0, the @immediate field specifies an immediate key which will be
+ * inserted before the root directory pointer.
+ *
+ * If successful, the kernel adds the descriptor and writes back a handle to the
+ * kernel-side object to be used for later removal of the descriptor block and
+ * immediate key.
+ */
struct fw_cdev_add_descriptor {
__u32 immediate;
__u32 key;
@@ -176,6 +332,14 @@ struct fw_cdev_add_descriptor {
__u32 handle;
};
+/**
+ * struct fw_cdev_remove_descriptor - Remove contents from the configuration ROM
+ * @handle: Handle to the descriptor, as returned by the kernel when the
+ * descriptor was added
+ *
+ * Remove a descriptor block and accompanying immediate key from the local
+ * node's configuration ROM.
+ */
struct fw_cdev_remove_descriptor {
__u32 handle;
};
@@ -183,12 +347,24 @@ struct fw_cdev_remove_descriptor {
#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
-#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1
-#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2
-#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4
-#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8
-#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15
-
+/**
+ * struct fw_cdev_create_iso_context - Create a context for isochronous IO
+ * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
+ * @header_size: Header size to strip for receive contexts
+ * @channel: Channel to bind to
+ * @speed: Speed to transmit at
+ * @closure: To be returned in &fw_cdev_event_iso_interrupt
+ * @handle: Handle to context, written back by kernel
+ *
+ * Prior to sending or receiving isochronous I/O, a context must be created.
+ * The context records information about the transmit or receive configuration
+ * and typically maps to an underlying hardware resource. A context is set up
+ * for either sending or receiving. It is bound to a specific isochronous
+ * channel.
+ *
+ * If a context was successfully created, the kernel writes back a handle to the
+ * context, which must be passed in for subsequent operations on that context.
+ */
struct fw_cdev_create_iso_context {
__u32 type;
__u32 header_size;
@@ -201,15 +377,49 @@ struct fw_cdev_create_iso_context {
#define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v)
#define FW_CDEV_ISO_INTERRUPT (1 << 16)
#define FW_CDEV_ISO_SKIP (1 << 17)
+#define FW_CDEV_ISO_SYNC (1 << 17)
#define FW_CDEV_ISO_TAG(v) ((v) << 18)
#define FW_CDEV_ISO_SY(v) ((v) << 20)
#define FW_CDEV_ISO_HEADER_LENGTH(v) ((v) << 24)
+/**
+ * struct fw_cdev_iso_packet - Isochronous packet
+ * @control: Contains the header length (8 uppermost bits), the sy field
+ * (4 bits), the tag field (2 bits), a sync flag (1 bit),
+ * a skip flag (1 bit), an interrupt flag (1 bit), and the
+ * payload length (16 lowermost bits)
+ * @header: Header and payload
+ *
+ * &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
+ *
+ * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are
+ * specified by IEEE 1394a and IEC 61883.
+ *
+ * FIXME - finish this documentation
+ */
struct fw_cdev_iso_packet {
__u32 control;
__u32 header[0];
};
+/**
+ * struct fw_cdev_queue_iso - Queue isochronous packets for I/O
+ * @packets: Userspace pointer to packet data
+ * @data: Pointer into mmap()'ed payload buffer
+ * @size: Size of packet data in bytes
+ * @handle: Isochronous context handle
+ *
+ * Queue a number of isochronous packets for reception or transmission.
+ * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs,
+ * which describe how to transmit from or receive into a contiguous region
+ * of a mmap()'ed payload buffer. As part of the packet descriptors,
+ * a series of headers can be supplied, which will be prepended to the
+ * payload during DMA.
+ *
+ * The kernel may or may not queue all packets, but will write back updated
+ * values of the @packets, @data and @size fields, so the ioctl can be
+ * resubmitted easily.
+ */
struct fw_cdev_queue_iso {
__u64 packets;
__u64 data;
@@ -217,6 +427,23 @@ struct fw_cdev_queue_iso {
__u32 handle;
};
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_CDEV_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+/**
+ * struct fw_cdev_start_iso - Start an isochronous transmission or reception
+ * @cycle: Cycle in which to start I/O. If @cycle is greater than or
+ * equal to 0, the I/O will start on that cycle.
+ * @sync: Determines the value to wait for for receive packets that have
+ * the %FW_CDEV_ISO_SYNC bit set
+ * @tags: Tag filter bit mask. Only valid for isochronous reception.
+ * Determines the tag values for which packets will be accepted.
+ * Use FW_CDEV_ISO_CONTEXT_MATCH_ macros to set @tags.
+ * @handle: Isochronous context handle within which to transmit or receive
+ */
struct fw_cdev_start_iso {
__s32 cycle;
__u32 sync;
@@ -224,6 +451,10 @@ struct fw_cdev_start_iso {
__u32 handle;
};
+/**
+ * struct fw_cdev_stop_iso - Stop an isochronous transmission or reception
+ * @handle: Handle of isochronous context to stop
+ */
struct fw_cdev_stop_iso {
__u32 handle;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6a41f4cab14..4f0b3bf5983 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1054,7 +1054,7 @@ struct block_device_operations {
};
/*
- * "descriptor" for what we're up to with a read for sendfile().
+ * "descriptor" for what we're up to with a read.
* This allows us to use the same read code yet
* have multiple different users of the data that
* we read from a file.
@@ -1105,7 +1105,6 @@ struct file_operations {
int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
@@ -1762,7 +1761,6 @@ extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
unsigned long, loff_t, loff_t *, size_t, ssize_t);
extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
-extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
extern void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *, struct file *,
loff_t *, read_descriptor_t *, read_actor_t);
@@ -1792,9 +1790,6 @@ extern int nonseekable_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_FS_XIP
extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
loff_t *ppos);
-extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
- size_t count, read_actor_t actor,
- void *target);
extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
size_t len, loff_t *ppos);
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index 8b7e4c1e32a..a44a6a078f0 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -54,18 +54,6 @@ struct gfs2_inum {
__be64 no_addr;
};
-struct gfs2_inum_host {
- __u64 no_formal_ino;
- __u64 no_addr;
-};
-
-static inline int gfs2_inum_equal(const struct gfs2_inum_host *ino1,
- const struct gfs2_inum_host *ino2)
-{
- return ino1->no_formal_ino == ino2->no_formal_ino &&
- ino1->no_addr == ino2->no_addr;
-}
-
/*
* Generic metadata head structure
* Every inplace buffer logged in the journal must start with this.
@@ -94,12 +82,6 @@ struct gfs2_meta_header {
__be32 __pad1; /* Was incarnation number in gfs1 */
};
-struct gfs2_meta_header_host {
- __u32 mh_magic;
- __u32 mh_type;
- __u32 mh_format;
-};
-
/*
* super-block structure
*
@@ -139,23 +121,6 @@ struct gfs2_sb {
/* In gfs1, quota and license dinodes followed */
};
-struct gfs2_sb_host {
- struct gfs2_meta_header_host sb_header;
-
- __u32 sb_fs_format;
- __u32 sb_multihost_format;
-
- __u32 sb_bsize;
- __u32 sb_bsize_shift;
-
- struct gfs2_inum_host sb_master_dir; /* Was jindex dinode in gfs1 */
- struct gfs2_inum_host sb_root_dir;
-
- char sb_lockproto[GFS2_LOCKNAME_LEN];
- char sb_locktable[GFS2_LOCKNAME_LEN];
- /* In gfs1, quota and license dinodes followed */
-};
-
/*
* resource index structure
*/
@@ -173,14 +138,6 @@ struct gfs2_rindex {
__u8 ri_reserved[64];
};
-struct gfs2_rindex_host {
- __u64 ri_addr; /* grp block disk address */
- __u64 ri_data0; /* first data location */
- __u32 ri_length; /* length of rgrp header in fs blocks */
- __u32 ri_data; /* num of data blocks in rgrp */
- __u32 ri_bitbytes; /* number of bytes in data bitmaps */
-};
-
/*
* resource group header structure
*/
@@ -212,13 +169,6 @@ struct gfs2_rgrp {
__u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
};
-struct gfs2_rgrp_host {
- __u32 rg_flags;
- __u32 rg_free;
- __u32 rg_dinodes;
- __u64 rg_igeneration;
-};
-
/*
* quota structure
*/
@@ -230,12 +180,6 @@ struct gfs2_quota {
__u8 qu_reserved[64];
};
-struct gfs2_quota_host {
- __u64 qu_limit;
- __u64 qu_warn;
- __u64 qu_value;
-};
-
/*
* dinode structure
*/
@@ -315,29 +259,11 @@ struct gfs2_dinode {
struct gfs2_inum __pad4; /* Unused even in current gfs1 */
__be64 di_eattr; /* extended attribute block number */
+ __be32 di_atime_nsec; /* nsec portion of atime */
+ __be32 di_mtime_nsec; /* nsec portion of mtime */
+ __be32 di_ctime_nsec; /* nsec portion of ctime */
- __u8 di_reserved[56];
-};
-
-struct gfs2_dinode_host {
- __u64 di_size; /* number of bytes in file */
- __u64 di_blocks; /* number of blocks in file */
-
- /* This section varies from gfs1. Padding added to align with
- * remainder of dinode
- */
- __u64 di_goal_meta; /* rgrp to alloc from next */
- __u64 di_goal_data; /* data block goal */
- __u64 di_generation; /* generation number for NFS */
-
- __u32 di_flags; /* GFS2_DIF_... */
- __u16 di_height; /* height of metadata */
-
- /* These only apply to directories */
- __u16 di_depth; /* Number of bits in the table */
- __u32 di_entries; /* The number of entries in the directory */
-
- __u64 di_eattr; /* extended attribute block number */
+ __u8 di_reserved[44];
};
/*
@@ -414,16 +340,6 @@ struct gfs2_log_header {
__be32 lh_hash;
};
-struct gfs2_log_header_host {
- struct gfs2_meta_header_host lh_header;
-
- __u64 lh_sequence; /* Sequence number of this transaction */
- __u32 lh_flags; /* GFS2_LOG_HEAD_... */
- __u32 lh_tail; /* Block number of log tail */
- __u32 lh_blkno;
- __u32 lh_hash;
-};
-
/*
* Log type descriptor
*/
@@ -464,11 +380,6 @@ struct gfs2_inum_range {
__be64 ir_length;
};
-struct gfs2_inum_range_host {
- __u64 ir_start;
- __u64 ir_length;
-};
-
/*
* Statfs change
* Describes an change to the pool of free and allocated
@@ -481,12 +392,6 @@ struct gfs2_statfs_change {
__be64 sc_dinodes;
};
-struct gfs2_statfs_change_host {
- __u64 sc_total;
- __u64 sc_free;
- __u64 sc_dinodes;
-};
-
/*
* Quota change
* Describes an allocation change for a particular
@@ -501,39 +406,12 @@ struct gfs2_quota_change {
__be32 qc_id;
};
-struct gfs2_quota_change_host {
- __u64 qc_change;
- __u32 qc_flags; /* GFS2_QCF_... */
- __u32 qc_id;
+struct gfs2_quota_lvb {
+ __be32 qb_magic;
+ __u32 __pad;
+ __be64 qb_limit; /* Hard limit of # blocks to alloc */
+ __be64 qb_warn; /* Warn user when alloc is above this # */
+ __be64 qb_value; /* Current # blocks allocated */
};
-#ifdef __KERNEL__
-/* Translation functions */
-
-extern void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf);
-extern void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf);
-extern void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf);
-extern void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf);
-extern void gfs2_rindex_out(const struct gfs2_rindex_host *ri, void *buf);
-extern void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf);
-extern void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf);
-extern void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf);
-struct gfs2_inode;
-extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
-extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
-extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
-extern void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf);
-extern void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf);
-extern void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf);
-extern void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf);
-
-/* Printing functions */
-
-extern void gfs2_rindex_print(const struct gfs2_rindex_host *ri);
-extern void gfs2_dinode_print(const struct gfs2_inode *ip);
-
-#endif /* __KERNEL__ */
-
#endif /* __GFS2_ONDISK_DOT_H__ */
diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h
new file mode 100644
index 00000000000..44ed7aa14d8
--- /dev/null
+++ b/include/linux/gpio_mouse.h
@@ -0,0 +1,61 @@
+/*
+ * Driver for simulating a mouse on GPIO lines.
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _GPIO_MOUSE_H
+#define _GPIO_MOUSE_H
+
+#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00
+#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01
+
+#define GPIO_MOUSE_PIN_UP 0
+#define GPIO_MOUSE_PIN_DOWN 1
+#define GPIO_MOUSE_PIN_LEFT 2
+#define GPIO_MOUSE_PIN_RIGHT 3
+#define GPIO_MOUSE_PIN_BLEFT 4
+#define GPIO_MOUSE_PIN_BMIDDLE 5
+#define GPIO_MOUSE_PIN_BRIGHT 6
+#define GPIO_MOUSE_PIN_MAX 7
+
+/**
+ * struct gpio_mouse_platform_data
+ * @scan_ms: integer in ms specifying the scan periode.
+ * @polarity: Pin polarity, active high or low.
+ * @up: GPIO line for up value.
+ * @down: GPIO line for down value.
+ * @left: GPIO line for left value.
+ * @right: GPIO line for right value.
+ * @bleft: GPIO line for left button.
+ * @bmiddle: GPIO line for middle button.
+ * @bright: GPIO line for right button.
+ *
+ * This struct must be added to the platform_device in the board code.
+ * It is used by the gpio_mouse driver to setup GPIO lines and to
+ * calculate mouse movement.
+ */
+struct gpio_mouse_platform_data {
+ int scan_ms;
+ int polarity;
+
+ union {
+ struct {
+ int up;
+ int down;
+ int left;
+ int right;
+
+ int bleft;
+ int bmiddle;
+ int bright;
+ };
+ int pins[GPIO_MOUSE_PIN_MAX];
+ };
+};
+
+#endif /* _GPIO_MOUSE_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 7803014f3a1..8d302298a16 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -79,6 +79,19 @@
#endif
#ifdef CONFIG_PREEMPT
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
+#ifdef CONFIG_PREEMPT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 827ee748fd4..898103b401f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -263,19 +263,28 @@ struct hid_item {
#define HID_QUIRK_2WHEEL_MOUSE_HACK_5 0x00000100
#define HID_QUIRK_2WHEEL_MOUSE_HACK_ON 0x00000200
#define HID_QUIRK_MIGHTYMOUSE 0x00000400
-#define HID_QUIRK_CYMOTION 0x00000800
-#define HID_QUIRK_POWERBOOK_HAS_FN 0x00001000
-#define HID_QUIRK_POWERBOOK_FN_ON 0x00002000
-#define HID_QUIRK_INVERT_HWHEEL 0x00004000
-#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000
-#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000
-#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00020000
-#define HID_QUIRK_IGNORE_MOUSE 0x00040000
-#define HID_QUIRK_SONY_PS3_CONTROLLER 0x00080000
-#define HID_QUIRK_LOGITECH_DESCRIPTOR 0x00100000
-#define HID_QUIRK_DUPLICATE_USAGES 0x00200000
-#define HID_QUIRK_RESET_LEDS 0x00400000
-#define HID_QUIRK_SWAPPED_MIN_MAX 0x00800000
+#define HID_QUIRK_POWERBOOK_HAS_FN 0x00000800
+#define HID_QUIRK_POWERBOOK_FN_ON 0x00001000
+#define HID_QUIRK_INVERT_HWHEEL 0x00002000
+#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00004000
+#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00008000
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
+#define HID_QUIRK_IGNORE_MOUSE 0x00020000
+#define HID_QUIRK_SONY_PS3_CONTROLLER 0x00040000
+#define HID_QUIRK_DUPLICATE_USAGES 0x00080000
+#define HID_QUIRK_RESET_LEDS 0x00100000
+#define HID_QUIRK_HIDINPUT 0x00200000
+#define HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL 0x00400000
+#define HID_QUIRK_LOGITECH_EXPANDED_KEYMAP 0x00800000
+
+/*
+ * Separate quirks for runtime report descriptor fixup
+ */
+
+#define HID_QUIRK_RDESC_CYMOTION 0x00000001
+#define HID_QUIRK_RDESC_LOGITECH 0x00000002
+#define HID_QUIRK_RDESC_SWAPPED_MIN_MAX 0x00000004
+#define HID_QUIRK_RDESC_PETALYNX 0x00000008
/*
* This is the global environment of the parser. This information is
@@ -488,6 +497,11 @@ struct hid_descriptor {
#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001))
/* HID core API */
+
+#ifdef CONFIG_HID_DEBUG
+extern int hid_debug;
+#endif
+
extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report);
extern int hidinput_connect(struct hid_device *);
@@ -506,6 +520,7 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct, const u32 quirks);
int usbhid_quirks_init(char **quirks_param);
void usbhid_quirks_exit(void);
+void usbhid_fixup_report_descriptor(const u16, const u16, char *, unsigned, char **);
#ifdef CONFIG_HID_FF
int hid_ff_init(struct hid_device *hid);
@@ -523,14 +538,19 @@ static inline int hid_pidff_init(struct hid_device *hid) { return -ENODEV; }
#else
static inline int hid_ff_init(struct hid_device *hid) { return -1; }
#endif
-#ifdef DEBUG
-#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , \
- __FILE__ , ## arg)
+
+#ifdef CONFIG_HID_DEBUG
+#define dbg_hid(format, arg...) if (hid_debug) \
+ printk(KERN_DEBUG "%s: " format ,\
+ __FILE__ , ## arg)
+#define dbg_hid_line(format, arg...) if (hid_debug) \
+ printk(format, ## arg)
#else
-#define dbg(format, arg...) do {} while (0)
+#define dbg_hid(format, arg...) do {} while (0)
+#define dbg_hid_line dbg_hid
#endif
-#define err(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
+#define err_hid(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
__FILE__ , ## arg)
#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 1e365acdd36..19ab2580405 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -25,6 +25,7 @@
#include <asm/system.h>
#include <asm/io.h>
#include <asm/semaphore.h>
+#include <asm/mutex.h>
/******************************************************************************
* IDE driver configuration options (play with these as desired):
@@ -685,6 +686,8 @@ typedef struct hwif_s {
u8 mwdma_mask;
u8 swdma_mask;
+ u8 cbl; /* cable type */
+
hwif_chipset_t chipset; /* sub-module for tuning.. */
struct pci_dev *pci_dev; /* for pci chipsets */
@@ -735,8 +738,8 @@ typedef struct hwif_s {
void (*ide_dma_clear_irq)(ide_drive_t *drive);
void (*dma_host_on)(ide_drive_t *drive);
void (*dma_host_off)(ide_drive_t *drive);
- int (*ide_dma_lostirq)(ide_drive_t *drive);
- int (*ide_dma_timeout)(ide_drive_t *drive);
+ void (*dma_lost_irq)(ide_drive_t *drive);
+ void (*dma_timeout)(ide_drive_t *drive);
void (*OUTB)(u8 addr, unsigned long port);
void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
@@ -791,7 +794,6 @@ typedef struct hwif_s {
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
unsigned reset : 1; /* reset after probe */
unsigned autodma : 1; /* auto-attempt using DMA at boot */
- unsigned udma_four : 1; /* 1=ATA-66 capable, 0=default */
unsigned no_lba48 : 1; /* 1 = cannot do LBA48 */
unsigned no_lba48_dma : 1; /* 1 = cannot do LBA48 DMA */
unsigned auto_poll : 1; /* supports nop auto-poll */
@@ -863,7 +865,7 @@ typedef struct hwgroup_s {
typedef struct ide_driver_s ide_driver_t;
-extern struct semaphore ide_setting_sem;
+extern struct mutex ide_setting_mtx;
int set_io_32bit(ide_drive_t *, int);
int set_pio_mode(ide_drive_t *, int);
@@ -1304,8 +1306,8 @@ extern int __ide_dma_check(ide_drive_t *);
extern int ide_dma_setup(ide_drive_t *);
extern void ide_dma_start(ide_drive_t *);
extern int __ide_dma_end(ide_drive_t *);
-extern int __ide_dma_lostirq(ide_drive_t *);
-extern int __ide_dma_timeout(ide_drive_t *);
+extern void ide_dma_lost_irq(ide_drive_t *);
+extern void ide_dma_timeout(ide_drive_t *);
#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
#else
@@ -1382,11 +1384,11 @@ extern const ide_pio_timings_t ide_pio_timings[6];
extern spinlock_t ide_lock;
-extern struct semaphore ide_cfg_sem;
+extern struct mutex ide_cfg_mtx;
/*
* Structure locking:
*
- * ide_cfg_sem and ide_lock together protect changes to
+ * ide_cfg_mtx and ide_lock together protect changes to
* ide_hwif_t->{next,hwgroup}
* ide_drive_t->next
*
diff --git a/include/linux/input.h b/include/linux/input.h
index d8521c72f69..18c98b54303 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -981,15 +981,15 @@ struct input_dev {
struct mutex mutex; /* serializes open and close operations */
unsigned int users;
- struct class_device cdev;
+ struct device dev;
union { /* temporarily so while we switching to struct device */
- struct device *parent;
- } dev;
+ struct device *dev;
+ } cdev;
struct list_head h_list;
struct list_head node;
};
-#define to_input_dev(d) container_of(d, struct input_dev, cdev)
+#define to_input_dev(d) container_of(d, struct input_dev, dev)
/*
* Verify that we are in sync with input_device_id mod_devicetable.h #defines
@@ -1096,22 +1096,22 @@ struct input_handle {
struct list_head h_node;
};
-#define to_dev(n) container_of(n,struct input_dev,node)
-#define to_handler(n) container_of(n,struct input_handler,node)
-#define to_handle(n) container_of(n,struct input_handle,d_node)
-#define to_handle_h(n) container_of(n,struct input_handle,h_node)
+#define to_dev(n) container_of(n, struct input_dev, node)
+#define to_handler(n) container_of(n, struct input_handler, node)
+#define to_handle(n) container_of(n, struct input_handle, d_node)
+#define to_handle_h(n) container_of(n, struct input_handle, h_node)
struct input_dev *input_allocate_device(void);
void input_free_device(struct input_dev *dev);
static inline struct input_dev *input_get_device(struct input_dev *dev)
{
- return to_input_dev(class_device_get(&dev->cdev));
+ return to_input_dev(get_device(&dev->dev));
}
static inline void input_put_device(struct input_dev *dev)
{
- class_device_put(&dev->cdev);
+ put_device(&dev->dev);
}
static inline void *input_get_drvdata(struct input_dev *dev)
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 8e2042b9d47..2eaa142cd06 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -47,8 +47,10 @@ enum {
#define IOPRIO_NORM (4)
static inline int task_ioprio(struct task_struct *task)
{
- WARN_ON(!ioprio_valid(task->ioprio));
- return IOPRIO_PRIO_DATA(task->ioprio);
+ if (ioprio_valid(task->ioprio))
+ return IOPRIO_PRIO_DATA(task->ioprio);
+
+ return IOPRIO_NORM;
}
static inline int task_nice_ioprio(struct task_struct *task)
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c8884f97122..8e4120285f7 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -9,13 +9,39 @@
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
+/**
+ * struct pipe_buffer - a linux kernel pipe buffer
+ * @page: the page containing the data for the pipe buffer
+ * @offset: offset of data inside the @page
+ * @len: length of data inside the @page
+ * @ops: operations associated with this buffer. See @pipe_buf_operations.
+ * @flags: pipe buffer flags. See above.
+ * @private: private data owned by the ops.
+ **/
struct pipe_buffer {
struct page *page;
unsigned int offset, len;
const struct pipe_buf_operations *ops;
unsigned int flags;
+ unsigned long private;
};
+/**
+ * struct pipe_inode_info - a linux kernel pipe
+ * @wait: reader/writer wait point in case of empty/full pipe
+ * @nrbufs: the number of non-empty pipe buffers in this pipe
+ * @curbuf: the current pipe buffer entry
+ * @tmp_page: cached released page
+ * @readers: number of current readers of this pipe
+ * @writers: number of current writers of this pipe
+ * @waiting_writers: number of writers blocked waiting for room
+ * @r_counter: reader counter
+ * @w_counter: writer counter
+ * @fasync_readers: reader side fasync
+ * @fasync_writers: writer side fasync
+ * @inode: inode this pipe is attached to
+ * @bufs: the circular array of pipe buffers
+ **/
struct pipe_inode_info {
wait_queue_head_t wait;
unsigned int nrbufs, curbuf;
@@ -34,22 +60,73 @@ struct pipe_inode_info {
/*
* Note on the nesting of these functions:
*
- * ->pin()
+ * ->confirm()
* ->steal()
* ...
* ->map()
* ...
* ->unmap()
*
- * That is, ->map() must be called on a pinned buffer, same goes for ->steal().
+ * That is, ->map() must be called on a confirmed buffer,
+ * same goes for ->steal(). See below for the meaning of each
+ * operation. Also see kerneldoc in fs/pipe.c for the pipe
+ * and generic variants of these hooks.
*/
struct pipe_buf_operations {
+ /*
+ * This is set to 1, if the generic pipe read/write may coalesce
+ * data into an existing buffer. If this is set to 0, a new pipe
+ * page segment is always used for new data.
+ */
int can_merge;
+
+ /*
+ * ->map() returns a virtual address mapping of the pipe buffer.
+ * The last integer flag reflects whether this should be an atomic
+ * mapping or not. The atomic map is faster, however you can't take
+ * page faults before calling ->unmap() again. So if you need to eg
+ * access user data through copy_to/from_user(), then you must get
+ * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
+ * atomic maps, so you can't map more than one pipe_buffer at once
+ * and you have to be careful if mapping another page as source
+ * or destination for a copy (IOW, it has to use something else
+ * than KM_USER0).
+ */
void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
+
+ /*
+ * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
+ */
void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
- int (*pin)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * ->confirm() verifies that the data in the pipe buffer is there
+ * and that the contents are good. If the pages in the pipe belong
+ * to a file system, we may need to wait for IO completion in this
+ * hook. Returns 0 for good, or a negative error value in case of
+ * error.
+ */
+ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * When the contents of this pipe buffer has been completely
+ * consumed by a reader, ->release() is called.
+ */
void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * Attempt to take ownership of the pipe buffer and its contents.
+ * ->steal() returns 0 for success, in which case the contents
+ * of the pipe (the buf->page) is locked and now completely owned
+ * by the caller. The page may then be transferred to a different
+ * mapping, the most often used case is insertion into different
+ * file address space cache.
+ */
int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * Get a reference to the pipe buffer.
+ */
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
@@ -68,39 +145,7 @@ void __free_pipe_info(struct pipe_inode_info *);
void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
-int generic_pipe_buf_pin(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
-/*
- * splice is tied to pipes as a transport (at least for now), so we'll just
- * add the splice flags here.
- */
-#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
-#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
- /* we may still block on the fd we splice */
- /* from/to, of course */
-#define SPLICE_F_MORE (0x04) /* expect more data */
-#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
-
-/*
- * Passed to the actors
- */
-struct splice_desc {
- unsigned int len, total_len; /* current and remaining length */
- unsigned int flags; /* splice flags */
- struct file *file; /* file to read/write */
- loff_t pos; /* file position */
-};
-
-typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
- struct splice_desc *);
-
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 693f0e6c54d..cfb680585ab 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,8 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
+/* SCHED_ISO: reserved but not implemented yet */
+#define SCHED_IDLE 5
#ifdef __KERNEL__
@@ -130,6 +132,26 @@ extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
extern unsigned long weighted_cpuload(const int cpu);
+struct seq_file;
+struct cfs_rq;
+#ifdef CONFIG_SCHED_DEBUG
+extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_set_task(struct task_struct *p);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now);
+#else
+static inline void
+proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+}
+static inline void proc_sched_set_task(struct task_struct *p)
+{
+}
+static inline void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq, u64 now)
+{
+}
+#endif
/*
* Task state bitmask. NOTE! These bits are also
@@ -193,6 +215,7 @@ struct task_struct;
extern void sched_init(void);
extern void sched_init_smp(void);
extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -479,7 +502,7 @@ struct signal_struct {
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
- unsigned long long sched_time;
+ unsigned long long sum_sched_runtime;
/*
* We don't bother to synchronize most readers of this at all,
@@ -521,31 +544,6 @@ struct signal_struct {
#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
-
-/*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
- * values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
- */
-
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
-
-#define MAX_PRIO (MAX_RT_PRIO + 40)
-
-#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
-#define rt_task(p) rt_prio((p)->prio)
-#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
-#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
-#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
-
/*
* Some day this will be a full-fledged user tracking system..
*/
@@ -583,13 +581,13 @@ struct reclaim_state;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
/* cumulative counters */
- unsigned long cpu_time, /* time spent on the cpu */
- run_delay, /* time spent waiting on a runqueue */
- pcnt; /* # of timeslices run on this cpu */
+ unsigned long pcnt; /* # of times run on this cpu */
+ unsigned long long cpu_time, /* time spent on the cpu */
+ run_delay; /* time spent waiting on a runqueue */
/* timestamps */
- unsigned long last_arrival, /* when we last ran on a cpu */
- last_queued; /* when we were last queued to run */
+ unsigned long long last_arrival,/* when we last ran on a cpu */
+ last_queued; /* when we were last queued to run */
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
@@ -639,18 +637,24 @@ static inline int sched_info_on(void)
#endif
}
-enum idle_type
-{
- SCHED_IDLE,
- NOT_IDLE,
- NEWLY_IDLE,
- MAX_IDLE_TYPES
+enum cpu_idle_type {
+ CPU_IDLE,
+ CPU_NOT_IDLE,
+ CPU_NEWLY_IDLE,
+ CPU_MAX_IDLE_TYPES
};
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
+
+/*
+ * Increase resolution of nice-level calculations:
+ */
+#define SCHED_LOAD_SHIFT 10
+#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
+
+#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5)
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
@@ -719,14 +723,14 @@ struct sched_domain {
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
- unsigned long lb_cnt[MAX_IDLE_TYPES];
- unsigned long lb_failed[MAX_IDLE_TYPES];
- unsigned long lb_balanced[MAX_IDLE_TYPES];
- unsigned long lb_imbalance[MAX_IDLE_TYPES];
- unsigned long lb_gained[MAX_IDLE_TYPES];
- unsigned long lb_hot_gained[MAX_IDLE_TYPES];
- unsigned long lb_nobusyg[MAX_IDLE_TYPES];
- unsigned long lb_nobusyq[MAX_IDLE_TYPES];
+ unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_gained[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES];
+ unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
/* Active load balancing */
unsigned long alb_cnt;
@@ -753,12 +757,6 @@ struct sched_domain {
extern int partition_sched_domains(cpumask_t *partition1,
cpumask_t *partition2);
-/*
- * Maximum cache size the migration-costs auto-tuning code will
- * search from:
- */
-extern unsigned int max_cache_size;
-
#endif /* CONFIG_SMP */
@@ -809,14 +807,86 @@ struct mempolicy;
struct pipe_inode_info;
struct uts_namespace;
-enum sleep_type {
- SLEEP_NORMAL,
- SLEEP_NONINTERACTIVE,
- SLEEP_INTERACTIVE,
- SLEEP_INTERRUPTED,
+struct rq;
+struct sched_domain;
+
+struct sched_class {
+ struct sched_class *next;
+
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p,
+ int wakeup, u64 now);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p,
+ int sleep, u64 now);
+ void (*yield_task) (struct rq *rq, struct task_struct *p);
+
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+
+ struct task_struct * (*pick_next_task) (struct rq *rq, u64 now);
+ void (*put_prev_task) (struct rq *rq, struct task_struct *p, u64 now);
+
+ int (*load_balance) (struct rq *this_rq, int this_cpu,
+ struct rq *busiest,
+ unsigned long max_nr_move, unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, unsigned long *total_load_moved);
+
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
};
-struct prio_array;
+struct load_weight {
+ unsigned long weight, inv_weight;
+};
+
+/*
+ * CFS stats for a schedulable entity (task, task-group etc)
+ *
+ * Current field usage histogram:
+ *
+ * 4 se->block_start
+ * 4 se->run_node
+ * 4 se->sleep_start
+ * 4 se->sleep_start_fair
+ * 6 se->load.weight
+ * 7 se->delta_fair
+ * 15 se->wait_runtime
+ */
+struct sched_entity {
+ long wait_runtime;
+ unsigned long delta_fair_run;
+ unsigned long delta_fair_sleep;
+ unsigned long delta_exec;
+ s64 fair_key;
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ unsigned int on_rq;
+
+ u64 wait_start_fair;
+ u64 wait_start;
+ u64 exec_start;
+ u64 sleep_start;
+ u64 sleep_start_fair;
+ u64 block_start;
+ u64 sleep_max;
+ u64 block_max;
+ u64 exec_max;
+ u64 wait_max;
+ u64 last_ran;
+
+ u64 sum_exec_runtime;
+ s64 sum_wait_runtime;
+ s64 sum_sleep_runtime;
+ unsigned long wait_runtime_overruns;
+ unsigned long wait_runtime_underruns;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct cfs_rq *cfs_rq;
+ /* rq "owned" by this entity/group: */
+ struct cfs_rq *my_q;
+#endif
+};
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -832,23 +902,20 @@ struct task_struct {
int oncpu;
#endif
#endif
- int load_weight; /* for niceness load balancing purposes */
+
int prio, static_prio, normal_prio;
struct list_head run_list;
- struct prio_array *array;
+ struct sched_class *sched_class;
+ struct sched_entity se;
unsigned short ioprio;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
- unsigned long sleep_avg;
- unsigned long long timestamp, last_ran;
- unsigned long long sched_time; /* sched_clock time spent running */
- enum sleep_type sleep_type;
unsigned int policy;
cpumask_t cpus_allowed;
- unsigned int time_slice, first_time_slice;
+ unsigned int time_slice;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
@@ -1078,6 +1145,37 @@ struct task_struct {
#endif
};
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space. This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO 100
+#define MAX_RT_PRIO MAX_USER_RT_PRIO
+
+#define MAX_PRIO (MAX_RT_PRIO + 40)
+#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+
+static inline int rt_prio(int prio)
+{
+ if (unlikely(prio < MAX_RT_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int rt_task(struct task_struct *p)
+{
+ return rt_prio(p->prio);
+}
+
static inline pid_t process_group(struct task_struct *tsk)
{
return tsk->signal->pgrp;
@@ -1223,7 +1321,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern unsigned long long sched_clock(void);
extern unsigned long long
-current_sched_time(const struct task_struct *current_task);
+task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
@@ -1232,6 +1330,8 @@ extern void sched_exec(void);
#define sched_exec() {}
#endif
+extern void sched_clock_unstable_event(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
@@ -1240,6 +1340,14 @@ static inline void idle_task_exit(void) {}
extern void sched_idle_next(void);
+extern unsigned int sysctl_sched_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_batch_wakeup_granularity;
+extern unsigned int sysctl_sched_stat_granularity;
+extern unsigned int sysctl_sched_runtime_limit;
+extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_features;
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -1317,8 +1425,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
-extern void FASTCALL(sched_exit(struct task_struct * p));
+extern void sched_fork(struct task_struct *p, int clone_flags);
+extern void sched_dead(struct task_struct *p);
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
@@ -1406,7 +1514,7 @@ extern struct mm_struct * mm_alloc(void);
extern void FASTCALL(__mmdrop(struct mm_struct *));
static inline void mmdrop(struct mm_struct * mm)
{
- if (atomic_dec_and_test(&mm->mm_count))
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
@@ -1638,10 +1746,7 @@ static inline unsigned int task_cpu(const struct task_struct *p)
return task_thread_info(p)->cpu;
}
-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
- task_thread_info(p)->cpu = cpu;
-}
+extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
diff --git a/include/linux/splice.h b/include/linux/splice.h
new file mode 100644
index 00000000000..33e447f98a5
--- /dev/null
+++ b/include/linux/splice.h
@@ -0,0 +1,73 @@
+/*
+ * Function declerations and data structures related to the splice
+ * implementation.
+ *
+ * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
+ *
+ */
+#ifndef SPLICE_H
+#define SPLICE_H
+
+#include <linux/pipe_fs_i.h>
+
+/*
+ * splice is tied to pipes as a transport (at least for now), so we'll just
+ * add the splice flags here.
+ */
+#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
+#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+ /* we may still block on the fd we splice */
+ /* from/to, of course */
+#define SPLICE_F_MORE (0x04) /* expect more data */
+#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
+
+/*
+ * Passed to the actors
+ */
+struct splice_desc {
+ unsigned int len, total_len; /* current and remaining length */
+ unsigned int flags; /* splice flags */
+ /*
+ * actor() private data
+ */
+ union {
+ void __user *userptr; /* memory to write to */
+ struct file *file; /* file to read/write */
+ void *data; /* cookie */
+ } u;
+ loff_t pos; /* file position */
+};
+
+struct partial_page {
+ unsigned int offset;
+ unsigned int len;
+ unsigned long private;
+};
+
+/*
+ * Passed to splice_to_pipe
+ */
+struct splice_pipe_desc {
+ struct page **pages; /* page map */
+ struct partial_page *partial; /* pages[] may not be contig */
+ int nr_pages; /* number of pages in map */
+ unsigned int flags; /* splice flags */
+ const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+};
+
+typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
+ struct splice_desc *);
+typedef int (splice_direct_actor)(struct pipe_inode_info *,
+ struct splice_desc *);
+
+extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
+ loff_t *, size_t, unsigned int,
+ splice_actor *);
+extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
+ struct splice_desc *, splice_actor *);
+extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+ struct splice_pipe_desc *);
+extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+ splice_direct_actor *);
+
+#endif
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 4a7ae8ab6eb..129d50f2225 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -253,7 +253,7 @@ struct svc_rqst {
* determine what device number
* to report (real or virtual)
*/
- int rq_sendfile_ok; /* turned off in gss privacy
+ int rq_splice_ok; /* turned off in gss privacy
* to prevent encrypting page
* cache pages */
wait_queue_head_t rq_wait; /* synchronization */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a9d1f049cc1..da6c39b2d05 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -98,7 +98,7 @@
.cache_nice_tries = 0, \
.busy_idx = 0, \
.idle_idx = 0, \
- .newidle_idx = 1, \
+ .newidle_idx = 0, \
.wake_idx = 0, \
.forkexec_idx = 0, \
.flags = SD_LOAD_BALANCE \
@@ -128,14 +128,15 @@
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 2, \
+ .idle_idx = 0, \
+ .newidle_idx = 0, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
| SD_WAKE_AFFINE \
+ | SD_WAKE_IDLE \
| SD_SHARE_PKG_RESOURCES\
| BALANCE_FOR_MC_POWER, \
.last_balance = jiffies, \
@@ -158,14 +159,15 @@
.imbalance_pct = 125, \
.cache_nice_tries = 1, \
.busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 2, \
+ .idle_idx = 0, \
+ .newidle_idx = 0, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
| SD_WAKE_AFFINE \
+ | SD_WAKE_IDLE \
| BALANCE_FOR_PKG_POWER,\
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 94bd38a6d94..56aa2ee21f1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -729,6 +729,22 @@ static inline int usb_endpoint_is_isoc_out(const struct usb_endpoint_descriptor
.bcdDevice_lo = (lo), .bcdDevice_hi = (hi)
/**
+ * USB_DEVICE_INTERFACE_PROTOCOL - macro used to describe a usb
+ * device with a specific interface protocol
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific interface protocol of devices.
+ */
+#define USB_DEVICE_INTERFACE_PROTOCOL(vend,prod,pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_PROTOCOL, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceProtocol = (pr)
+
+/**
* USB_DEVICE_INFO - macro used to describe a class of usb devices
* @cl: bDeviceClass value
* @sc: bDeviceSubClass value
diff --git a/include/linux/wait.h b/include/linux/wait.h
index e820d00e138..0e686280450 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -366,15 +366,15 @@ static inline void remove_wait_queue_locked(wait_queue_head_t *q,
/*
* These are the old interfaces to sleep waiting for an event.
- * They are racy. DO NOT use them, use the wait_event* interfaces above.
- * We plan to remove these interfaces during 2.7.
+ * They are racy. DO NOT use them, use the wait_event* interfaces above.
+ * We plan to remove these interfaces.
*/
-extern void FASTCALL(sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout));
-extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
-extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout));
+extern void sleep_on(wait_queue_head_t *q);
+extern long sleep_on_timeout(wait_queue_head_t *q,
+ signed long timeout);
+extern void interruptible_sleep_on(wait_queue_head_t *q);
+extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
+ signed long timeout);
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time
diff --git a/include/pcmcia/ciscode.h b/include/pcmcia/ciscode.h
index eae7e2e8449..ad6e278ba7f 100644
--- a/include/pcmcia/ciscode.h
+++ b/include/pcmcia/ciscode.h
@@ -126,4 +126,6 @@
#define MANFID_POSSIO 0x030c
#define PRODID_POSSIO_GCC 0x0003
+#define MANFID_NEC 0x0010
+
#endif /* _LINUX_CISCODE_H */