diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-13 11:05:51 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-13 11:05:51 +0200 |
commit | accba5f3965d6a9d1bf7c1e1a7995d17e9d521b6 (patch) | |
tree | 8fb40782e79472ed882ff2098d4dd295557278ee /include/linux | |
parent | 6852fd9b86d05063c6ef49d2e12e061cc7f6a105 (diff) | |
parent | 4480f15b3306f43bbb0310d461142b4e897ca45b (diff) |
Merge branch 'linus' into oprofile-v2
Conflicts:
arch/x86/kernel/apic_32.c
arch/x86/oprofile/nmi_int.c
include/linux/pci_ids.h
Diffstat (limited to 'include/linux')
247 files changed, 8009 insertions, 1997 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 4c4142c5aa6..282a504bd1d 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -97,6 +97,7 @@ header-y += ioctl.h header-y += ip6_tunnel.h header-y += ipmi_msgdefs.h header-y += ipsec.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += iso_fs.h @@ -125,6 +126,7 @@ header-y += pci_regs.h header-y += pfkeyv2.h header-y += pg.h header-y += phantom.h +header-y += phonet.h header-y += pkt_cls.h header-y += pkt_sched.h header-y += posix_types.h @@ -166,7 +168,8 @@ unifdef-y += acct.h unifdef-y += adb.h unifdef-y += adfs_fs.h unifdef-y += agpgart.h -ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \ + $(srctree)/include/asm-$(SRCARCH)/a.out.h),) unifdef-y += a.out.h endif unifdef-y += apm_bios.h @@ -178,6 +181,7 @@ unifdef-y += audit.h unifdef-y += auto_fs.h unifdef-y += auxvec.h unifdef-y += binfmts.h +unifdef-y += blktrace_api.h unifdef-y += capability.h unifdef-y += capi.h unifdef-y += cciss_ioctl.h @@ -230,6 +234,7 @@ unifdef-y += if_fddi.h unifdef-y += if_frad.h unifdef-y += if_ltalk.h unifdef-y += if_link.h +unifdef-y += if_phonet.h unifdef-y += if_pppol2tp.h unifdef-y += if_pppox.h unifdef-y += if_tr.h @@ -249,13 +254,16 @@ unifdef-y += isdn.h unifdef-y += isdnif.h unifdef-y += isdn_divertif.h unifdef-y += isdn_ppp.h +unifdef-y += ivtv.h +unifdef-y += ivtvfb.h unifdef-y += joystick.h unifdef-y += kdev_t.h unifdef-y += kd.h unifdef-y += kernelcapi.h unifdef-y += kernel.h unifdef-y += keyboard.h -ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),) +ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \ + $(srctree)/include/asm-$(SRCARCH)/kvm.h),) unifdef-y += kvm.h endif unifdef-y += llc.h @@ -294,7 +302,6 @@ unifdef-y += parport.h unifdef-y += patchkey.h unifdef-y += pci.h unifdef-y += personality.h -unifdef-y += pim.h unifdef-y += pktcdvd.h unifdef-y += pmu.h unifdef-y += poll.h @@ -355,6 +362,7 @@ unifdef-y += virtio_balloon.h unifdef-y += virtio_console.h unifdef-y += virtio_pci.h unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 972b12bcfb3..2b8df8b420f 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -30,6 +30,8 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 +#include <linux/list.h> + enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -78,6 +80,8 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); +extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); diff --git a/include/linux/aio.h b/include/linux/aio.h index b51ddd28444..09b276c3522 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -7,7 +7,6 @@ #include <linux/uio.h> #include <asm/atomic.h> -#include <linux/uio.h> #define AIO_MAXSEGS 4 #define AIO_KIOGRP_NR_ATOMIC 8 diff --git a/include/linux/ata.h b/include/linux/ata.h index 1c622e2b050..a53318b8cbd 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -30,6 +30,7 @@ #define __LINUX_ATA_H__ #include <linux/types.h> +#include <asm/byteorder.h> /* defines only for the constants which don't work well as enums */ #define ATA_DMA_BOUNDARY 0xffffUL @@ -46,18 +47,49 @@ enum { ATA_MAX_SECTORS_TAPE = 65535, ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, ATA_ID_MWDMA_MODES = 63, ATA_ID_PIO_MODES = 64, ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, ATA_ID_EIDE_PIO = 67, ATA_ID_EIDE_PIO_IORDY = 68, - ATA_ID_UDMA_MODES = 88, + ATA_ID_QUEUE_DEPTH = 75, ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, + ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), ATA_ID_SERNO_LEN = 20, @@ -123,13 +155,26 @@ enum { ATA_BUSY = (1 << 7), /* BSY status bit */ ATA_DRDY = (1 << 6), /* device ready */ ATA_DF = (1 << 5), /* device fault */ + ATA_DSC = (1 << 4), /* drive seek complete */ ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_CORR = (1 << 2), /* corrected data error */ + ATA_IDX = (1 << 1), /* index */ ATA_ERR = (1 << 0), /* have an error */ ATA_SRST = (1 << 2), /* software reset */ ATA_ICRC = (1 << 7), /* interface CRC error */ + ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ ATA_UNC = (1 << 6), /* uncorrectable media error */ + ATA_MC = (1 << 5), /* media changed */ ATA_IDNF = (1 << 4), /* ID not found */ + ATA_MCR = (1 << 3), /* media change requested */ ATA_ABORTED = (1 << 2), /* command aborted */ + ATA_TRK0NF = (1 << 1), /* track 0 not found */ + ATA_AMNF = (1 << 0), /* address mark not found */ + ATAPI_LFS = 0xF0, /* last failed sense */ + ATAPI_EOM = ATA_TRK0NF, /* end of media */ + ATAPI_ILI = ATA_AMNF, /* illegal length indication */ + ATAPI_IO = (1 << 1), + ATAPI_COD = (1 << 0), /* ATA command block registers */ ATA_REG_DATA = 0x00, @@ -192,6 +237,13 @@ enum { ATA_CMD_PMP_WRITE = 0xE8, ATA_CMD_CONF_OVERLAY = 0xB1, ATA_CMD_SEC_FREEZE_LOCK = 0xF5, + ATA_CMD_SMART = 0xB0, + ATA_CMD_MEDIA_LOCK = 0xDE, + ATA_CMD_MEDIA_UNLOCK = 0xDF, + /* marked obsolete in the ATA/ATAPI-7 spec */ + ATA_CMD_RESTORE = 0x10, + /* EXABYTE specific */ + ATA_EXABYTE_ENABLE_NEST = 0xF0, /* READ_LOG_EXT pages */ ATA_LOG_SATA_NCQ = 0x10, @@ -232,6 +284,10 @@ enum { SETFEATURES_WC_ON = 0x02, /* Enable write cache */ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + /* Enable/Disable Automatic Acoustic Management */ + SETFEATURES_AAM_ON = 0x42, + SETFEATURES_AAM_OFF = 0xC2, + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ @@ -254,6 +310,15 @@ enum { ATA_DCO_IDENTIFY = 0xC2, ATA_DCO_SET = 0xC3, + /* feature values for SMART */ + ATA_SMART_ENABLE = 0xD8, + ATA_SMART_READ_VALUES = 0xD0, + ATA_SMART_READ_THRESHOLDS = 0xD1, + + /* password used in LBA Mid / LBA High for executing SMART commands */ + ATA_SMART_LBAM_PASS = 0x4F, + ATA_SMART_LBAH_PASS = 0xC2, + /* ATAPI stuff */ ATAPI_PKT_DMA = (1 << 0), ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: @@ -438,17 +503,17 @@ static inline int ata_is_data(u8 prot) /* * id tests */ -#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) -#define ata_id_has_lba(id) ((id)[49] & (1 << 9)) -#define ata_id_has_dma(id) ((id)[49] & (1 << 8)) +#define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) +#define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) +#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) #define ata_id_has_ncq(id) ((id)[76] & (1 << 8)) -#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1) -#define ata_id_removeable(id) ((id)[0] & (1 << 7)) +#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) +#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) #define ata_id_has_atapi_AN(id) \ ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ ((id)[78] & (1 << 5)) ) -#define ata_id_iordy_disable(id) ((id)[49] & (1 << 10)) -#define ata_id_has_iordy(id) ((id)[49] & (1 << 11)) +#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) +#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) #define ata_id_u32(id,n) \ (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) #define ata_id_u64(id,n) \ @@ -457,7 +522,7 @@ static inline int ata_is_data(u8 prot) ((u64) (id)[(n) + 1] << 16) | \ ((u64) (id)[(n) + 0]) ) -#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) +#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) static inline bool ata_id_has_hipm(const u16 *id) { @@ -482,75 +547,106 @@ static inline bool ata_id_has_dipm(const u16 *id) static inline int ata_id_has_fua(const u16 *id) { - if ((id[84] & 0xC000) != 0x4000) + if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) return 0; - return id[84] & (1 << 6); + return id[ATA_ID_CFSSE] & (1 << 6); } static inline int ata_id_has_flush(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) + return 0; + return id[ATA_ID_COMMAND_SET_2] & (1 << 12); +} + +static inline int ata_id_flush_enabled(const u16 *id) +{ + if (ata_id_has_flush(id) == 0) return 0; - return id[83] & (1 << 12); + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return 0; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 12); } static inline int ata_id_has_flush_ext(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[83] & (1 << 13); + return id[ATA_ID_COMMAND_SET_2] & (1 << 13); +} + +static inline int ata_id_flush_ext_enabled(const u16 *id) +{ + if (ata_id_has_flush_ext(id) == 0) + return 0; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return 0; + /* + * some Maxtor disks have bit 13 defined incorrectly + * so check bit 10 too + */ + return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; } static inline int ata_id_has_lba48(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - if (!ata_id_u64(id, 100)) + if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) return 0; - return id[83] & (1 << 10); + return id[ATA_ID_COMMAND_SET_2] & (1 << 10); +} + +static inline int ata_id_lba48_enabled(const u16 *id) +{ + if (ata_id_has_lba48(id) == 0) + return 0; + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) + return 0; + return id[ATA_ID_CFS_ENABLE_2] & (1 << 10); } static inline int ata_id_hpa_enabled(const u16 *id) { /* Yes children, word 83 valid bits cover word 82 data */ - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; /* And 87 covers 85-87 */ - if ((id[87] & 0xC000) != 0x4000) + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return 0; /* Check command sets enabled as well as supported */ - if ((id[85] & ( 1 << 10)) == 0) + if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) return 0; - return id[82] & (1 << 10); + return id[ATA_ID_COMMAND_SET_1] & (1 << 10); } static inline int ata_id_has_wcache(const u16 *id) { /* Yes children, word 83 valid bits cover word 82 data */ - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[82] & (1 << 5); + return id[ATA_ID_COMMAND_SET_1] & (1 << 5); } static inline int ata_id_has_pm(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[82] & (1 << 3); + return id[ATA_ID_COMMAND_SET_1] & (1 << 3); } static inline int ata_id_rahead_enabled(const u16 *id) { - if ((id[87] & 0xC000) != 0x4000) + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return 0; - return id[85] & (1 << 6); + return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); } static inline int ata_id_wcache_enabled(const u16 *id) { - if ((id[87] & 0xC000) != 0x4000) + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return 0; - return id[85] & (1 << 5); + return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); } /** @@ -581,7 +677,15 @@ static inline unsigned int ata_id_major_version(const u16 *id) static inline int ata_id_is_sata(const u16 *id) { - return ata_id_major_version(id) >= 5 && id[93] == 0; + /* + * See if word 93 is 0 AND drive is at least ATA-5 compatible + * verifying that word 80 by casting it to a signed type -- + * this trick allows us to filter out the reserved values of + * 0x0000 and 0xffff along with the earlier ATA revisions... + */ + if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020) + return 1; + return 0; } static inline int ata_id_has_tpm(const u16 *id) @@ -599,7 +703,16 @@ static inline int ata_id_has_dword_io(const u16 *id) /* ATA 8 reuses this flag for "trusted" computing */ if (ata_id_major_version(id) > 7) return 0; - if (id[48] & (1 << 0)) + if (id[ATA_ID_DWORD_IO] & (1 << 0)) + return 1; + return 0; +} + +static inline int ata_id_has_unload(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 && + id[ATA_ID_CFSSE] & (1 << 13)) return 1; return 0; } @@ -608,45 +721,50 @@ static inline int ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command has not been issued to the device then the values of - id[54] to id[56] are vendor specific. */ - return (id[53] & 0x01) && /* Current translation valid */ - id[54] && /* cylinders in current translation */ - id[55] && /* heads in current translation */ - id[55] <= 16 && - id[56]; /* sectors in current translation */ + id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ + return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ + id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ + id[ATA_ID_CUR_HEADS] && /* heads in current translation */ + id[ATA_ID_CUR_HEADS] <= 16 && + id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ } static inline int ata_id_is_cfa(const u16 *id) { - u16 v = id[0]; - if (v == 0x848A) /* Standard CF */ + if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */ return 1; /* Could be CF hiding as standard ATA */ - if (ata_id_major_version(id) >= 3 && id[82] != 0xFFFF && - (id[82] & ( 1 << 2))) + if (ata_id_major_version(id) >= 3 && + id[ATA_ID_COMMAND_SET_1] != 0xFFFF && + (id[ATA_ID_COMMAND_SET_1] & (1 << 2))) return 1; return 0; } +static inline int ata_id_is_ssd(const u16 *id) +{ + return id[ATA_ID_ROT_SPEED] == 0x01; +} + static inline int ata_drive_40wire(const u16 *dev_id) { if (ata_id_is_sata(dev_id)) return 0; /* SATA */ - if ((dev_id[93] & 0xE000) == 0x6000) + if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) return 0; /* 80 wire */ return 1; } static inline int ata_drive_40wire_relaxed(const u16 *dev_id) { - if ((dev_id[93] & 0x2000) == 0x2000) + if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) return 0; /* 80 wire */ return 1; } static inline int atapi_cdb_len(const u16 *dev_id) { - u16 tmp = dev_id[0] & 0x3; + u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; switch (tmp) { case 0: return 12; case 1: return 16; @@ -656,7 +774,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) static inline int atapi_command_packet_set(const u16 *dev_id) { - return (dev_id[0] >> 8) & 0x1f; + return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } static inline int atapi_id_dmadir(const u16 *dev_id) @@ -664,6 +782,76 @@ static inline int atapi_id_dmadir(const u16 *dev_id) return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000); } +/* + * ata_id_is_lba_capacity_ok() performs a sanity check on + * the claimed LBA capacity value for the device. + * + * Returns 1 if LBA capacity looks sensible, 0 otherwise. + * + * It is called only once for each device. + */ +static inline int ata_id_is_lba_capacity_ok(u16 *id) +{ + unsigned long lba_sects, chs_sects, head, tail; + + /* No non-LBA info .. so valid! */ + if (id[ATA_ID_CYLS] == 0) + return 1; + + lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + + /* + * The ATA spec tells large drives to return + * C/H/S = 16383/16/63 independent of their size. + * Some drives can be jumpered to use 15 heads instead of 16. + * Some drives can be jumpered to use 4092 cyls instead of 16383. + */ + if ((id[ATA_ID_CYLS] == 16383 || + (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) && + id[ATA_ID_SECTORS] == 63 && + (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) && + (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS])) + return 1; + + chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS]; + + /* perform a rough sanity check on lba_sects: within 10% is OK */ + if (lba_sects - chs_sects < chs_sects/10) + return 1; + + /* some drives have the word order reversed */ + head = (lba_sects >> 16) & 0xffff; + tail = lba_sects & 0xffff; + lba_sects = head | (tail << 16); + + if (lba_sects - chs_sects < chs_sects/10) { + *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects); + return 1; /* LBA capacity is (now) good */ + } + + return 0; /* LBA capacity value may be bad */ +} + +static inline void ata_id_to_hd_driveid(u16 *id) +{ +#ifdef __BIG_ENDIAN + /* accessed in struct hd_driveid as 8-bit values */ + id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]); + id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]); + id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]); + id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]); + id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]); + + /* as 32-bit values */ + *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY); + *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG); + + /* as 64-bit value */ + *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] = + ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); +#endif +} + static inline int is_multi_taskfile(struct ata_taskfile *tf) { return (tf->command == ATA_CMD_READ_MULTI) || @@ -682,7 +870,7 @@ static inline int ata_ok(u8 status) static inline int lba_28_ok(u64 block, u32 n_block) { /* check the ending block number */ - return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256); + return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256); } static inline int lba_48_ok(u64 block, u32 n_block) diff --git a/include/linux/bio.h b/include/linux/bio.h index 0933a14e641..ff5b4cf9e2d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -26,21 +26,8 @@ #ifdef CONFIG_BLOCK -/* Platforms may set this to teach the BIO layer about IOMMU hardware. */ #include <asm/io.h> -#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY) -#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1)) -#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE) -#else -#define BIOVEC_VIRT_START_SIZE(x) 0 -#define BIOVEC_VIRT_OVERSIZE(x) 0 -#endif - -#ifndef BIO_VMERGE_BOUNDARY -#define BIO_VMERGE_BOUNDARY 0 -#endif - #define BIO_DEBUG #ifdef BIO_DEBUG @@ -88,25 +75,14 @@ struct bio { /* Number of segments in this BIO after * physical address coalescing is performed. */ - unsigned short bi_phys_segments; - - /* Number of segments after physical and DMA remapping - * hardware coalescing is performed. - */ - unsigned short bi_hw_segments; + unsigned int bi_phys_segments; unsigned int bi_size; /* residual I/O count */ - /* - * To keep track of the max hw size, we account for the - * sizes of the first and last virtually mergeable segments - * in this bio - */ - unsigned int bi_hw_front_size; - unsigned int bi_hw_back_size; - unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ + unsigned int bi_comp_cpu; /* completion CPU */ + struct bio_vec *bi_io_vec; /* the actual vec list */ bio_end_io_t *bi_end_io; @@ -126,11 +102,14 @@ struct bio { #define BIO_UPTODATE 0 /* ok after I/O completion */ #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ #define BIO_EOF 2 /* out-out-bounds error */ -#define BIO_SEG_VALID 3 /* nr_hw_seg valid */ +#define BIO_SEG_VALID 3 /* bi_phys_segments valid */ #define BIO_CLONED 4 /* doesn't own data */ #define BIO_BOUNCED 5 /* bio is a bounce bio */ #define BIO_USER_MAPPED 6 /* contains user pages */ #define BIO_EOPNOTSUPP 7 /* not supported */ +#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ +#define BIO_NULL_MAPPED 9 /* contains invalid user pages */ +#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) /* @@ -144,18 +123,31 @@ struct bio { /* * bio bi_rw flags * - * bit 0 -- read (not set) or write (set) + * bit 0 -- data direction + * If not set, bio is a read from device. If set, it's a write to device. * bit 1 -- rw-ahead when set * bit 2 -- barrier + * Insert a serialization point in the IO queue, forcing previously + * submitted IO to be completed before this oen is issued. * bit 3 -- fail fast, don't want low level driver retries * bit 4 -- synchronous I/O hint: the block layer will unplug immediately + * Note that this does NOT indicate that the IO itself is sync, just + * that the block layer will not postpone issue of this IO by plugging. + * bit 5 -- metadata request + * Used for tracing to differentiate metadata and data IO. May also + * get some preferential treatment in the IO scheduler + * bit 6 -- discard sectors + * Informs the lower level device that this range of sectors is no longer + * used by the file system and may thus be freed by the device. Used + * for flash based storage. */ -#define BIO_RW 0 -#define BIO_RW_AHEAD 1 +#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ +#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ #define BIO_RW_BARRIER 2 #define BIO_RW_FAILFAST 3 #define BIO_RW_SYNC 4 #define BIO_RW_META 5 +#define BIO_RW_DISCARD 6 /* * upper 16 bits of bi_rw define the io priority of this bio @@ -185,14 +177,15 @@ struct bio { #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) -#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size) +#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) +#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) static inline unsigned int bio_cur_sectors(struct bio *bio) { if (bio->bi_vcnt) return bio_iovec(bio)->bv_len >> 9; - - return 0; + else /* dataless requests such as discard */ + return bio->bi_size >> 9; } static inline void *bio_data(struct bio *bio) @@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio) ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) #endif -#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \ - ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0) #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ (((addr1) | (mask)) == (((addr2) - 1) | (mask))) #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ @@ -319,15 +310,14 @@ struct bio_pair { atomic_t cnt; int error; }; -extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, - int first_sectors); -extern mempool_t *bio_split_pool; +extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); extern void bio_pair_release(struct bio_pair *dbio); extern struct bio_set *bioset_create(int, int); extern void bioset_free(struct bio_set *); extern struct bio *bio_alloc(gfp_t, int); +extern struct bio *bio_kmalloc(gfp_t, int); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); extern void bio_free(struct bio *, struct bio_set *); @@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *); extern void bio_endio(struct bio *, int); struct request_queue; extern int bio_phys_segments(struct request_queue *, struct bio *); -extern int bio_hw_segments(struct request_queue *, struct bio *); extern void __bio_clone(struct bio *, struct bio *); extern struct bio *bio_clone(struct bio *, gfp_t); @@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); extern int bio_get_nr_vecs(struct block_device *); +extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int); extern struct bio *bio_map_user(struct request_queue *, struct block_device *, - unsigned long, unsigned int, int); + unsigned long, unsigned int, int, gfp_t); struct sg_iovec; +struct rq_map_data; extern struct bio *bio_map_user_iov(struct request_queue *, struct block_device *, - struct sg_iovec *, int, int); + struct sg_iovec *, int, int, gfp_t); extern void bio_unmap_user(struct bio *); extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, gfp_t); @@ -359,15 +350,25 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); -extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); -extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, - int, int); +extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, + unsigned long, unsigned int, int, gfp_t); +extern struct bio *bio_copy_user_iov(struct request_queue *, + struct rq_map_data *, struct sg_iovec *, + int, int, gfp_t); extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); extern unsigned int bvec_nr_vecs(unsigned short idx); /* + * Allow queuer to specify a completion CPU for this bio + */ +static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) +{ + bio->bi_comp_cpu = cpu; +} + +/* * bio_set is used to allow other portions of the IO system to * allocate their own private memory pools for bio and iovec structures. * These memory pools in turn all allocate from the bio_slab @@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) +/* + * Check whether this bio carries any data or not. A NULL bio is allowed. + */ +static inline int bio_has_data(struct bio *bio) +{ + return bio && bio->bi_io_vec != NULL; +} + #if defined(CONFIG_BLK_DEV_INTEGRITY) #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) @@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, #define bip_for_each_vec(bvl, bip, i) \ __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) -static inline int bio_integrity(struct bio *bio) -{ -#if defined(CONFIG_BLK_DEV_INTEGRITY) - return bio->bi_integrity != NULL; -#else - return 0; -#endif -} +#define bio_integrity(bio) (bio->bi_integrity != NULL) extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c44..89781fd4885 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); +extern int bitmap_scnprintf_len(unsigned int nr_bits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88d68081a0f..a92d9e4ea96 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -16,7 +16,9 @@ #include <linux/bio.h> #include <linux/module.h> #include <linux/stringify.h> +#include <linux/gfp.h> #include <linux/bsg.h> +#include <linux/smp.h> #include <asm/scatterlist.h> @@ -54,7 +56,6 @@ enum rq_cmd_type_bits { REQ_TYPE_PM_SUSPEND, /* suspend request */ REQ_TYPE_PM_RESUME, /* resume request */ REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ - REQ_TYPE_FLUSH, /* flush request */ REQ_TYPE_SPECIAL, /* driver defined type */ REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ /* @@ -76,19 +77,18 @@ enum rq_cmd_type_bits { * */ enum { - /* - * just examples for now - */ REQ_LB_OP_EJECT = 0x40, /* eject request */ - REQ_LB_OP_FLUSH = 0x41, /* flush device */ + REQ_LB_OP_FLUSH = 0x41, /* flush request */ + REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ }; /* - * request type modified bits. first three bits match BIO_RW* bits, important + * request type modified bits. first two bits match BIO_RW* bits, important */ enum rq_flag_bits { __REQ_RW, /* not set, read. set, write */ __REQ_FAILFAST, /* no low level driver retries */ + __REQ_DISCARD, /* request to discard sectors */ __REQ_SORTED, /* elevator knows about this request */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_HARDBARRIER, /* may not be passed by drive either */ @@ -111,6 +111,7 @@ enum rq_flag_bits { }; #define REQ_RW (1 << __REQ_RW) +#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_FAILFAST (1 << __REQ_FAILFAST) #define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) @@ -140,12 +141,14 @@ enum rq_flag_bits { */ struct request { struct list_head queuelist; - struct list_head donelist; + struct call_single_data csd; + int cpu; struct request_queue *q; unsigned int cmd_flags; enum rq_cmd_type_bits cmd_type; + unsigned long atomic_flags; /* Maintain bio traversal state for part by part I/O submission. * hard_* are block layer internals, no driver should touch them! @@ -190,13 +193,6 @@ struct request { */ unsigned short nr_phys_segments; - /* Number of scatter-gather addr+len pairs after - * physical and DMA remapping hardware coalescing is performed. - * This is the number of scatter-gather entries the driver - * will actually have to deal with after DMA mapping is done. - */ - unsigned short nr_hw_segments; - unsigned short ioprio; void *special; @@ -220,6 +216,8 @@ struct request { void *data; void *sense; + unsigned long deadline; + struct list_head timeout_list; unsigned int timeout; int retries; @@ -233,6 +231,11 @@ struct request { struct request *next_rq; }; +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + /* * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME * requests. Some step values could eventually be made generic. @@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q); typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); typedef int (prep_rq_fn) (struct request_queue *, struct request *); typedef void (unplug_fn) (struct request_queue *); +typedef int (prepare_discard_fn) (struct request_queue *, struct request *); struct bio_vec; struct bvec_merge_data { @@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); +typedef int (lld_busy_fn) (struct request_queue *q); + +enum blk_eh_timer_return { + BLK_EH_NOT_HANDLED, + BLK_EH_HANDLED, + BLK_EH_RESET_TIMER, +}; + +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); enum blk_queue_state { Queue_down, @@ -280,6 +293,15 @@ struct blk_queue_tag { atomic_t refcnt; /* map can be shared */ }; +#define BLK_SCSI_MAX_CMDS (256) +#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) + +struct blk_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; + struct kobject kobj; +}; + struct request_queue { /* @@ -298,10 +320,13 @@ struct request_queue make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; + prepare_discard_fn *prepare_discard_fn; merge_bvec_fn *merge_bvec_fn; prepare_flush_fn *prepare_flush_fn; softirq_done_fn *softirq_done_fn; + rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; + lld_busy_fn *lld_busy_fn; /* * Dispatch queue sorting @@ -376,6 +401,10 @@ struct request_queue unsigned int nr_sorted; unsigned int in_flight; + unsigned int rq_timeout; + struct timer_list timeout; + struct list_head timeout_list; + /* * sg stuff */ @@ -398,6 +427,7 @@ struct request_queue #if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device bsg_dev; #endif + struct blk_cmd_filter cmd_filter; }; #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ @@ -411,6 +441,10 @@ struct request_queue #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ +#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ +#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ +#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ +#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ static inline int queue_is_locked(struct request_queue *q) { @@ -516,7 +550,10 @@ enum { #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) +#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) +#define blk_queue_stackable(q) \ + test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) @@ -526,16 +563,18 @@ enum { #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) -#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) +#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) #define blk_pm_request(rq) \ (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) +#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) +#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) /* rq->queuelist of dequeued request must be list_empty() */ @@ -582,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) #define RQ_NOMERGE_FLAGS \ (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) #define rq_mergeable(rq) \ - (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) + (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ + (blk_discard_rq(rq) || blk_fs_request((rq)))) /* * q->prep_rq_fn return values @@ -627,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) } #endif /* CONFIG_MMU */ +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; +}; + struct req_iterator { int i; struct bio *bio; @@ -654,7 +700,12 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); +extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); +extern int blk_lld_busy(struct request_queue *q); +extern int blk_insert_cloned_request(struct request_queue *q, + struct request *rq); extern void blk_plug_device(struct request_queue *); +extern void blk_plug_device_unlocked(struct request_queue *); extern int blk_remove_plug(struct request_queue *); extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct request_queue *, @@ -694,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q); extern void __blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *); extern void blk_start_queueing(struct request_queue *); -extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); +extern int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, + gfp_t); extern int blk_rq_unmap_user(struct bio *); extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_user_iov(struct request_queue *, struct request *, - struct sg_iovec *, int, unsigned int); + struct rq_map_data *, struct sg_iovec *, int, + unsigned int, gfp_t); extern int blk_execute_rq(struct request_queue *, struct gendisk *, struct request *, int); extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, @@ -739,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error, extern int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes); extern void end_request(struct request *, int); -extern void end_queued_request(struct request *, int); -extern void end_dequeued_request(struct request *, int); extern int blk_end_request_callback(struct request *rq, int error, unsigned int nr_bytes, int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); +extern void __blk_complete_request(struct request *); +extern void blk_abort_request(struct request *); +extern void blk_abort_queue(struct request_queue *); +extern void blk_update_request(struct request *rq, int error, + unsigned int nr_bytes); /* * blk_end_request() takes bytes instead of sectors as a complete size. @@ -779,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size); +extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); +extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); extern int blk_do_ordered(struct request_queue *, struct request **); @@ -806,8 +867,6 @@ extern void blk_put_queue(struct request_queue *); /* * tag stuff */ -#define blk_queue_tag_depth(q) ((q)->queue_tags->busy) -#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); @@ -828,15 +887,23 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, } extern int blkdev_issue_flush(struct block_device *, sector_t *); +extern int blkdev_issue_discard(struct block_device *, + sector_t sector, sector_t nr_sects, gfp_t); + +static inline int sb_issue_discard(struct super_block *sb, + sector_t block, sector_t nr_blocks) +{ + block <<= (sb->s_blocksize_bits - 9); + nr_blocks <<= (sb->s_blocksize_bits - 9); + return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); +} /* * command filter functions */ -extern int blk_verify_command(struct file *file, unsigned char *cmd); -extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter, - unsigned char *cmd, mode_t *f_mode); -extern int blk_register_filter(struct gendisk *disk); -extern void blk_unregister_filter(struct gendisk *disk); +extern int blk_verify_command(struct blk_cmd_filter *filter, + unsigned char *cmd, int has_write_perm); +extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); #define MAX_PHYS_SEGMENTS 128 #define MAX_HW_SEGMENTS 128 @@ -867,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q) return q ? q->dma_alignment : 511; } +static inline int blk_rq_aligned(struct request_queue *q, void *addr, + unsigned int len) +{ + unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; + return !((unsigned long)addr & alignment) && !(len & alignment); +} + /* assumes size > 256 */ static inline unsigned int blksize_bits(unsigned int size) { @@ -893,7 +967,7 @@ static inline void put_dev_sector(Sector p) } struct work_struct; -int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ @@ -938,49 +1012,19 @@ struct blk_integrity { extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); extern void blk_integrity_unregister(struct gendisk *); -extern int blk_integrity_compare(struct block_device *, struct block_device *); +extern int blk_integrity_compare(struct gendisk *, struct gendisk *); extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); extern int blk_rq_count_integrity_sg(struct request *); -static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) -{ - if (bi) - return bi->tuple_size; - - return 0; -} - -static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev) +static inline +struct blk_integrity *bdev_get_integrity(struct block_device *bdev) { return bdev->bd_disk->integrity; } -static inline unsigned int bdev_get_tag_size(struct block_device *bdev) +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { - struct blk_integrity *bi = bdev_get_integrity(bdev); - - if (bi) - return bi->tag_size; - - return 0; -} - -static inline int bdev_integrity_enabled(struct block_device *bdev, int rw) -{ - struct blk_integrity *bi = bdev_get_integrity(bdev); - - if (bi == NULL) - return 0; - - if (rw == READ && bi->verify_fn != NULL && - (bi->flags & INTEGRITY_FLAG_READ)) - return 1; - - if (rw == WRITE && bi->generate_fn != NULL && - (bi->flags & INTEGRITY_FLAG_WRITE)) - return 1; - - return 0; + return disk->integrity; } static inline int blk_integrity_rq(struct request *rq) @@ -997,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq) #define blk_rq_count_integrity_sg(a) (0) #define blk_rq_map_integrity_sg(a, b) (0) #define bdev_get_integrity(a) (0) -#define bdev_get_tag_size(a) (0) +#define blk_get_integrity(a) (0) #define blk_integrity_compare(a, b) (0) #define blk_integrity_register(a, b) (0) #define blk_integrity_unregister(a) do { } while (0); diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index d084b8d227a..3a31eb50616 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -1,8 +1,10 @@ #ifndef BLKTRACE_H #define BLKTRACE_H +#ifdef __KERNEL__ #include <linux/blkdev.h> #include <linux/relay.h> +#endif /* * Trace categories @@ -21,6 +23,7 @@ enum blktrace_cat { BLK_TC_NOTIFY = 1 << 10, /* special message */ BLK_TC_AHEAD = 1 << 11, /* readahead */ BLK_TC_META = 1 << 12, /* metadata */ + BLK_TC_DISCARD = 1 << 13, /* discard requests */ BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ }; @@ -47,6 +50,7 @@ enum blktrace_act { __BLK_TA_SPLIT, /* bio was split */ __BLK_TA_BOUNCE, /* bio was bounced */ __BLK_TA_REMAP, /* bio was remapped */ + __BLK_TA_ABORT, /* request aborted */ }; /* @@ -77,6 +81,7 @@ enum blktrace_notify { #define BLK_TA_SPLIT (__BLK_TA_SPLIT) #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) +#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE)) #define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) #define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) @@ -89,17 +94,17 @@ enum blktrace_notify { * The trace itself */ struct blk_io_trace { - u32 magic; /* MAGIC << 8 | version */ - u32 sequence; /* event number */ - u64 time; /* in microseconds */ - u64 sector; /* disk offset */ - u32 bytes; /* transfer length */ - u32 action; /* what happened */ - u32 pid; /* who did it */ - u32 device; /* device number */ - u32 cpu; /* on what cpu did it happen */ - u16 error; /* completion error */ - u16 pdu_len; /* length of data after this trace */ + __u32 magic; /* MAGIC << 8 | version */ + __u32 sequence; /* event number */ + __u64 time; /* in microseconds */ + __u64 sector; /* disk offset */ + __u32 bytes; /* transfer length */ + __u32 action; /* what happened */ + __u32 pid; /* who did it */ + __u32 device; /* device number */ + __u32 cpu; /* on what cpu did it happen */ + __u16 error; /* completion error */ + __u16 pdu_len; /* length of data after this trace */ }; /* @@ -117,6 +122,23 @@ enum { Blktrace_stopped, }; +#define BLKTRACE_BDEV_SIZE 32 + +/* + * User setup structure passed with BLKTRACESTART + */ +struct blk_user_trace_setup { + char name[BLKTRACE_BDEV_SIZE]; /* output */ + __u16 act_mask; /* input */ + __u32 buf_size; /* input */ + __u32 buf_nr; /* input */ + __u64 start_lba; + __u64 end_lba; + __u32 pid; +}; + +#ifdef __KERNEL__ +#if defined(CONFIG_BLK_DEV_IO_TRACE) struct blk_trace { int trace_state; struct rchan *rchan; @@ -133,21 +155,6 @@ struct blk_trace { atomic_t dropped; }; -/* - * User setup structure passed with BLKTRACESTART - */ -struct blk_user_trace_setup { - char name[BDEVNAME_SIZE]; /* output */ - u16 act_mask; /* input */ - u32 buf_size; /* input */ - u32 buf_nr; /* input */ - u64 start_lba; - u64 end_lba; - u32 pid; -}; - -#ifdef __KERNEL__ -#if defined(CONFIG_BLK_DEV_IO_TRACE) extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); @@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (likely(!bt)) return; + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + if (blk_pc_request(rq)) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 652470b687c..95837bfb525 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 82aa36c53ea..eadaab44015 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) -TAS_BUFFER_FNS(Lock, locked) BUFFER_FNS(Req, req) TAS_BUFFER_FNS(Req, req) BUFFER_FNS(Mapped, mapped) @@ -205,6 +204,8 @@ void block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, + unsigned long from); int block_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t*); @@ -319,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) __wait_on_buffer(bh); } +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); +} + static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (test_set_buffer_locked(bh)) + if (!trylock_buffer(bh)) __lock_buffer(bh); } diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h new file mode 100644 index 00000000000..29f002d73d9 --- /dev/null +++ b/include/linux/byteorder.h @@ -0,0 +1,372 @@ +#ifndef _LINUX_BYTEORDER_H +#define _LINUX_BYTEORDER_H + +#include <linux/types.h> +#include <linux/swab.h> + +#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define one endianness +#endif + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define arch endianness +#endif + +#ifdef __LITTLE_ENDIAN +# undef __LITTLE_ENDIAN +# define __LITTLE_ENDIAN 1234 +#endif + +#ifdef __BIG_ENDIAN +# undef __BIG_ENDIAN +# define __BIG_ENDIAN 4321 +#endif + +#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) +# define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) +# define __BIG_ENDIAN_BITFIELD +#endif + +#ifdef __LITTLE_ENDIAN +# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) + +# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) +# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) +# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) +#endif + +#ifdef __BIG_ENDIAN +# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) + +# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) +# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) +# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) +#endif + +/* + * These helpers could be phased out over time as the base version + * handles constant folding. + */ +#define __constant_htonl(x) __cpu_to_be32(x) +#define __constant_ntohl(x) __be32_to_cpu(x) +#define __constant_htons(x) __cpu_to_be16(x) +#define __constant_ntohs(x) __be16_to_cpu(x) + +#define __constant_le16_to_cpu(x) __le16_to_cpu(x) +#define __constant_le32_to_cpu(x) __le32_to_cpu(x) +#define __constant_le64_to_cpu(x) __le64_to_cpu(x) +#define __constant_be16_to_cpu(x) __be16_to_cpu(x) +#define __constant_be32_to_cpu(x) __be32_to_cpu(x) +#define __constant_be64_to_cpu(x) __be64_to_cpu(x) + +#define __constant_cpu_to_le16(x) __cpu_to_le16(x) +#define __constant_cpu_to_le32(x) __cpu_to_le32(x) +#define __constant_cpu_to_le64(x) __cpu_to_le64(x) +#define __constant_cpu_to_be16(x) __cpu_to_be16(x) +#define __constant_cpu_to_be32(x) __cpu_to_be32(x) +#define __constant_cpu_to_be64(x) __cpu_to_be64(x) + +static inline void __le16_to_cpus(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_le16s(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __le32_to_cpus(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_le32s(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __le64_to_cpus(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_le64s(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __be16_to_cpus(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_be16s(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __be32_to_cpus(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_be32s(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __be64_to_cpus(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_be64s(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline __u16 __le16_to_cpup(const __le16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __le32_to_cpup(const __le32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __le64_to_cpup(const __le64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le16)*p; +#else + return (__force __le16)__swab16p(p); +#endif +} + +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le32)*p; +#else + return (__force __le32)__swab32p(p); +#endif +} + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le64)*p; +#else + return (__force __le64)__swab64p(p); +#endif +} + +static inline __u16 __be16_to_cpup(const __be16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __be32_to_cpup(const __be32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __be64_to_cpup(const __be64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be16)*p; +#else + return (__force __be16)__swab16p(p); +#endif +} + +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)*p; +#else + return (__force __be32)__swab32p(p); +#endif +} + +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)*p; +#else + return (__force __be64)__swab64p(p); +#endif +} + +#ifdef __KERNEL__ + +# define le16_to_cpu __le16_to_cpu +# define le32_to_cpu __le32_to_cpu +# define le64_to_cpu __le64_to_cpu +# define be16_to_cpu __be16_to_cpu +# define be32_to_cpu __be32_to_cpu +# define be64_to_cpu __be64_to_cpu +# define cpu_to_le16 __cpu_to_le16 +# define cpu_to_le32 __cpu_to_le32 +# define cpu_to_le64 __cpu_to_le64 +# define cpu_to_be16 __cpu_to_be16 +# define cpu_to_be32 __cpu_to_be32 +# define cpu_to_be64 __cpu_to_be64 + +# define le16_to_cpup __le16_to_cpup +# define le32_to_cpup __le32_to_cpup +# define le64_to_cpup __le64_to_cpup +# define be16_to_cpup __be16_to_cpup +# define be32_to_cpup __be32_to_cpup +# define be64_to_cpup __be64_to_cpup +# define cpu_to_le16p __cpu_to_le16p +# define cpu_to_le32p __cpu_to_le32p +# define cpu_to_le64p __cpu_to_le64p +# define cpu_to_be16p __cpu_to_be16p +# define cpu_to_be32p __cpu_to_be32p +# define cpu_to_be64p __cpu_to_be64p + +# define le16_to_cpus __le16_to_cpus +# define le32_to_cpus __le32_to_cpus +# define le64_to_cpus __le64_to_cpus +# define be16_to_cpus __be16_to_cpus +# define be32_to_cpus __be32_to_cpus +# define be64_to_cpus __be64_to_cpus +# define cpu_to_le16s __cpu_to_le16s +# define cpu_to_le32s __cpu_to_le32s +# define cpu_to_le64s __cpu_to_le64s +# define cpu_to_be16s __cpu_to_be16s +# define cpu_to_be32s __cpu_to_be32s +# define cpu_to_be64s __cpu_to_be64s + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ +# undef ntohl +# undef ntohs +# undef htonl +# undef htons + +# define ___htonl(x) __cpu_to_be32(x) +# define ___htons(x) __cpu_to_be16(x) +# define ___ntohl(x) __be32_to_cpu(x) +# define ___ntohs(x) __be16_to_cpu(x) + +# define htonl(x) ___htonl(x) +# define ntohl(x) ___ntohl(x) +# define htons(x) ___htons(x) +# define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpup(var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpup(var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpup(var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpup(var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpup(var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpup(var) + val); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BYTEORDER_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 02673846d20..9d1fe30b6f6 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -503,8 +503,19 @@ extern const kernel_cap_t __cap_init_eff_set; kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); -int capable(int cap); -int __capable(struct task_struct *t, int cap); +/** + * has_capability - Determine if a task has a superior capability available + * @t: The task in question + * @cap: The capability to be tested for + * + * Return true if the specified task has the given superior capability + * currently in effect, false if not. + * + * Note that this does not set PF_SUPERPRIV on the task. + */ +#define has_capability(t, cap) (security_capable((t), (cap)) == 0) + +extern int capable(int cap); #endif /* __KERNEL__ */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index c33b0dc28e4..ed3a5d473e5 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb); extern int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, ktime_t now); +extern void clockevents_handle_noop(struct clock_event_device *dev); + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern void clockevents_notify(unsigned long reason, void *arg); #else diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h new file mode 100644 index 00000000000..8c0f9505b48 --- /dev/null +++ b/include/linux/cnt32_to_63.h @@ -0,0 +1,80 @@ +/* + * Extend a 32-bit counter to 63 bits + * + * Author: Nicolas Pitre + * Created: December 3, 2006 + * Copyright: MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#ifndef __LINUX_CNT32_TO_63_H__ +#define __LINUX_CNT32_TO_63_H__ + +#include <linux/compiler.h> +#include <linux/types.h> +#include <asm/byteorder.h> + +/* this is used only to give gcc a clue about good code generation */ +union cnt32_to_63 { + struct { +#if defined(__LITTLE_ENDIAN) + u32 lo, hi; +#elif defined(__BIG_ENDIAN) + u32 hi, lo; +#endif + }; + u64 val; +}; + + +/** + * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter + * @cnt_lo: The low part of the counter + * + * Many hardware clock counters are only 32 bits wide and therefore have + * a relatively short period making wrap-arounds rather frequent. This + * is a problem when implementing sched_clock() for example, where a 64-bit + * non-wrapping monotonic value is expected to be returned. + * + * To overcome that limitation, let's extend a 32-bit counter to 63 bits + * in a completely lock free fashion. Bits 0 to 31 of the clock are provided + * by the hardware while bits 32 to 62 are stored in memory. The top bit in + * memory is used to synchronize with the hardware clock half-period. When + * the top bit of both counters (hardware and in memory) differ then the + * memory is updated with a new value, incrementing it when the hardware + * counter wraps around. + * + * Because a word store in memory is atomic then the incremented value will + * always be in synch with the top bit indicating to any potential concurrent + * reader if the value in memory is up to date or not with regards to the + * needed increment. And any race in updating the value in memory is harmless + * as the same value would simply be stored more than once. + * + * The only restriction for the algorithm to work properly is that this + * code must be executed at least once per each half period of the 32-bit + * counter to properly update the state bit in memory. This is usually not a + * problem in practice, but if it is then a kernel timer could be scheduled + * to manage for this code to be executed often enough. + * + * Note that the top bit (bit 63) in the returned value should be considered + * as garbage. It is not cleared here because callers are likely to use a + * multiplier on the returned value which can get rid of the top bit + * implicitly by making the multiplier even, therefore saving on a runtime + * clear-bit instruction. Otherwise caller must remember to clear the top + * bit explicitly. + */ +#define cnt32_to_63(cnt_lo) \ +({ \ + static volatile u32 __m_cnt_hi; \ + union cnt32_to_63 __x; \ + __x.hi = __m_cnt_hi; \ + __x.lo = (cnt_lo); \ + if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ + __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ + __x.val; \ +}) + +#endif diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index 31b75311e2c..dcc228aa335 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h @@ -37,7 +37,7 @@ extern const struct file_operations coda_ioctl_operations; /* operations shared over more than one file */ int coda_open(struct inode *i, struct file *f); int coda_release(struct inode *i, struct file *f); -int coda_permission(struct inode *inode, int mask, struct nameidata *nd); +int coda_permission(struct inode *inode, int mask); int coda_revalidate_inode(struct dentry *); int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); int coda_setattr(struct dentry *, struct iattr *); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c8bd2daf95e..8322141ee48 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *); * ACCESS_ONCE() in different C statements. * * This macro does absolutely -nothing- to prevent the CPU from reordering, - * merging, or refetching absolutely anything at any time. + * merging, or refetching absolutely anything at any time. Its main intended + * use is to mediate communication between process-level code and irq/NMI + * handlers, all running on the same CPU. */ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) diff --git a/include/linux/completion.h b/include/linux/completion.h index d2961b66d53..4a6b604ef7e 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -10,6 +10,18 @@ #include <linux/wait.h> +/** + * struct completion - structure used to maintain state for a "completion" + * + * This is the opaque structure used to maintain the state for a "completion". + * Completions currently use a FIFO to queue threads that have to wait for + * the "completion" event. + * + * See also: complete(), wait_for_completion() (and friends _timeout, + * _interruptible, _interruptible_timeout, and _killable), init_completion(), + * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and + * INIT_COMPLETION(). + */ struct completion { unsigned int done; wait_queue_head_t wait; @@ -21,6 +33,14 @@ struct completion { #define COMPLETION_INITIALIZER_ONSTACK(work) \ ({ init_completion(&work); work; }) +/** + * DECLARE_COMPLETION: - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure. Generally used + * for static declarations. You should use the _ONSTACK variant for automatic + * variables. + */ #define DECLARE_COMPLETION(work) \ struct completion work = COMPLETION_INITIALIZER(work) @@ -29,6 +49,13 @@ struct completion { * completions - so we use the _ONSTACK() variant for those that * are on the kernel stack: */ +/** + * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure on the kernel + * stack. + */ #ifdef CONFIG_LOCKDEP # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) @@ -36,6 +63,13 @@ struct completion { # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) #endif +/** + * init_completion: - Initialize a dynamically allocated completion + * @x: completion structure that is to be initialized + * + * This inline function will initialize a dynamically created completion + * structure. + */ static inline void init_completion(struct completion *x) { x->done = 0; @@ -49,10 +83,20 @@ extern unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); extern unsigned long wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout); +extern bool try_wait_for_completion(struct completion *x); +extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); +/** + * INIT_COMPLETION: - reinitialize a completion structure + * @x: completion structure to be reinitialized + * + * This macro should be used to reinitialize a completion structure so it can + * be reused. This is especially important after complete_all() is used. + */ #define INIT_COMPLETION(x) ((x).done = 0) + #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d62c19ff041..7f627775c94 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -40,6 +40,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/err.h> #include <asm/atomic.h> @@ -129,8 +130,25 @@ struct configfs_attribute { /* * Users often need to create attribute structures for their configurable * attributes, containing a configfs_attribute member and function pointers - * for the show() and store() operations on that attribute. They can use - * this macro (similar to sysfs' __ATTR) to make defining attributes easier. + * for the show() and store() operations on that attribute. If they don't + * need anything else on the extended attribute structure, they can use + * this macro to define it The argument _item is the name of the + * config_item structure. + */ +#define CONFIGFS_ATTR_STRUCT(_item) \ +struct _item##_attribute { \ + struct configfs_attribute attr; \ + ssize_t (*show)(struct _item *, char *); \ + ssize_t (*store)(struct _item *, const char *, size_t); \ +} + +/* + * With the extended attribute structure, users can use this macro + * (similar to sysfs' __ATTR) to make defining attributes easier. + * An example: + * #define MYITEM_ATTR(_name, _mode, _show, _store) \ + * struct myitem_attribute childless_attr_##_name = \ + * __CONFIGFS_ATTR(_name, _mode, _show, _store) */ #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ { \ @@ -142,6 +160,52 @@ struct configfs_attribute { .show = _show, \ .store = _store, \ } +/* Here is a readonly version, only requiring a show() operation */ +#define __CONFIGFS_ATTR_RO(_name, _show) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = 0444, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ +} + +/* + * With these extended attributes, the simple show_attribute() and + * store_attribute() operations need to call the show() and store() of the + * attributes. This is a common pattern, so we provide a macro to define + * them. The argument _item is the name of the config_item structure. + * This macro expects the attributes to be named "struct <name>_attribute" + * and the function to_<name>() to exist; + */ +#define CONFIGFS_ATTR_OPS(_item) \ +static ssize_t _item##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_item##_attr->show) \ + ret = _item##_attr->show(_item, page); \ + return ret; \ +} \ +static ssize_t _item##_attr_store(struct config_item *item, \ + struct configfs_attribute *attr, \ + const char *page, size_t count) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = -EINVAL; \ + \ + if (_item##_attr->store) \ + ret = _item##_attr->store(_item, page, count); \ + return ret; \ +} /* * If allow_link() exists, the item can symlink(2) out to other diff --git a/include/linux/connector.h b/include/linux/connector.h index 96a89d3d672..5c7f9468f75 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -38,8 +38,9 @@ #define CN_W1_VAL 0x1 #define CN_IDX_V86D 0x4 #define CN_VAL_V86D_UVESAFB 0x1 +#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ -#define CN_NETLINK_USERS 5 +#define CN_NETLINK_USERS 6 /* * Maximum connector's message size. diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d7faf880849..c2747ac2ae4 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) #endif int cpu_up(unsigned int cpu); +void notify_cpu_starting(unsigned int cpu); extern void cpu_hotplug_init(void); extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2270ca5ec63..1ee608fd7b7 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -106,6 +106,7 @@ struct cpufreq_policy { #define CPUFREQ_ADJUST (0) #define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (2) +#define CPUFREQ_START (3) #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ @@ -186,7 +187,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int relation); -extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy); +extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, + unsigned int cpu); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); @@ -225,7 +227,9 @@ struct cpufreq_driver { unsigned int (*get) (unsigned int cpu); /* optional */ - unsigned int (*getavg) (unsigned int cpu); + unsigned int (*getavg) (struct cpufreq_policy *policy, + unsigned int cpu); + int (*exit) (struct cpufreq_policy *policy); int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1b5c98e7fef..d3219d73f8e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -62,15 +62,7 @@ * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set - *ifdef CONFIG_HAS_CPUMASK_OF_CPU - * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v - * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] - * cpumask_of_cpu_ptr(v, cpu) Combines above two operations - *else - * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v - * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) - * cpumask_of_cpu_ptr(v, cpu) Combines above two operations - *endif + * (can be used as an lvalue) * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask @@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return (const cpumask_t *)p; +} + +/* + * In cases where we take the address of the cpumask immediately, + * gcc optimizes it out (it's a constant) and there's no huge stack + * variable created: + */ +#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) -#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP -extern cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) -#define cpumask_of_cpu_ptr(v, cpu) \ - const cpumask_t *v = &cpumask_of_cpu(cpu) -#define cpumask_of_cpu_ptr_declare(v) \ - const cpumask_t *v -#define cpumask_of_cpu_ptr_next(v, cpu) \ - v = &cpumask_of_cpu(cpu) -#else -#define cpumask_of_cpu(cpu) \ -({ \ - typeof(_unused_cpumask_arg_) m; \ - if (sizeof(m) == sizeof(unsigned long)) { \ - m.bits[0] = 1UL<<(cpu); \ - } else { \ - cpus_clear(m); \ - cpu_set((cpu), m); \ - } \ - m; \ -}) -#define cpumask_of_cpu_ptr(v, cpu) \ - cpumask_t _##v = cpumask_of_cpu(cpu); \ - const cpumask_t *v = &_##v -#define cpumask_of_cpu_ptr_declare(v) \ - cpumask_t _##v; \ - const cpumask_t *v = &_##v -#define cpumask_of_cpu_ptr_next(v, cpu) \ - _##v = cpumask_of_cpu(cpu) -#endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e8f450c499b..2691926fb50 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void) static inline void rebuild_sched_domains(void) { - partition_sched_domains(0, NULL, NULL); + partition_sched_domains(1, NULL, NULL); } #endif /* !CONFIG_CPUSETS */ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 6cd39a927e1..025e4f57510 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -8,7 +8,13 @@ #include <linux/proc_fs.h> #define ELFCORE_ADDR_MAX (-1ULL) + +#ifdef CONFIG_PROC_VMCORE extern unsigned long long elfcorehdr_addr; +#else +static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; +#endif + extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); extern const struct file_operations proc_vmcore_operations; diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 00000000000..b69222cc1fd --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,50 @@ +/* Credentials management + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#define get_current_user() (get_uid(current->user)) + +#define task_uid(task) ((task)->uid) +#define task_gid(task) ((task)->gid) +#define task_euid(task) ((task)->euid) +#define task_egid(task) ((task)->egid) + +#define current_uid() (current->uid) +#define current_gid() (current->gid) +#define current_euid() (current->euid) +#define current_egid() (current->egid) +#define current_suid() (current->suid) +#define current_sgid() (current->sgid) +#define current_fsuid() (current->fsuid) +#define current_fsgid() (current->fsgid) +#define current_cap() (current->cap_effective) + +#define current_uid_gid(_uid, _gid) \ +do { \ + *(_uid) = current->uid; \ + *(_gid) = current->gid; \ +} while(0) + +#define current_euid_egid(_uid, _gid) \ +do { \ + *(_uid) = current->euid; \ + *(_gid) = current->egid; \ +} while(0) + +#define current_fsuid_fsgid(_uid, _gid) \ +do { \ + *(_uid) = current->fsuid; \ + *(_gid) = current->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index c43dc47fdf7..3d2317e4af2 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -38,6 +38,7 @@ #define CRYPTO_ALG_TYPE_DIGEST 0x00000008 #define CRYPTO_ALG_TYPE_HASH 0x00000009 #define CRYPTO_ALG_TYPE_AHASH 0x0000000a +#define CRYPTO_ALG_TYPE_RNG 0x0000000c #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c @@ -61,6 +62,14 @@ #define CRYPTO_ALG_GENIV 0x00000200 /* + * Set if the algorithm has passed automated run-time testing. Note that + * if there is no run-time testing for a given algorithm it is considered + * to have passed. + */ + +#define CRYPTO_ALG_TESTED 0x00000400 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_REQ_MASK 0x000fff00 @@ -105,6 +114,7 @@ struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; struct crypto_ahash; +struct crypto_rng; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; @@ -290,6 +300,15 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +struct rng_alg { + int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); + + unsigned int seedsize; +}; + + #define cra_ablkcipher cra_u.ablkcipher #define cra_aead cra_u.aead #define cra_blkcipher cra_u.blkcipher @@ -298,6 +317,7 @@ struct compress_alg { #define cra_hash cra_u.hash #define cra_ahash cra_u.ahash #define cra_compress cra_u.compress +#define cra_rng cra_u.rng struct crypto_alg { struct list_head cra_list; @@ -325,6 +345,7 @@ struct crypto_alg { struct hash_alg hash; struct ahash_alg ahash; struct compress_alg compress; + struct rng_alg rng; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); @@ -430,6 +451,12 @@ struct compress_tfm { u8 *dst, unsigned int *dlen); }; +struct rng_tfm { + int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, + unsigned int dlen); + int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); +}; + #define crt_ablkcipher crt_u.ablkcipher #define crt_aead crt_u.aead #define crt_blkcipher crt_u.blkcipher @@ -437,6 +464,7 @@ struct compress_tfm { #define crt_hash crt_u.hash #define crt_ahash crt_u.ahash #define crt_compress crt_u.compress +#define crt_rng crt_u.rng struct crypto_tfm { @@ -450,6 +478,7 @@ struct crypto_tfm { struct hash_tfm hash; struct ahash_tfm ahash; struct compress_tfm compress; + struct rng_tfm rng; } crt_u; struct crypto_alg *__crt_alg; @@ -481,6 +510,10 @@ struct crypto_hash { struct crypto_tfm base; }; +struct crypto_rng { + struct crypto_tfm base; +}; + enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, @@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); void crypto_free_tfm(struct crypto_tfm *tfm); +int alg_test(const char *driver, const char *alg, u32 type, u32 mask); + /* * Transform helpers which query the underlying algorithm. */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98202c672fd..efba1de629a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -230,6 +230,7 @@ extern void d_delete(struct dentry *); extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct inode *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a90222e3297..08d783592b7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -13,7 +13,6 @@ struct dm_target; struct dm_table; -struct dm_dev; struct mapped_device; struct bio_vec; @@ -84,6 +83,12 @@ void dm_error(const char *message); */ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); +struct dm_dev { + struct block_device *bdev; + int mode; + char name[16]; +}; + /* * Constructors should call these functions to ensure destination devices * are opened/closed correctly. @@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct mapped_device *md); int dm_noflush_suspending(struct dm_target *ti); +union map_info *dm_get_mapinfo(struct bio *bio); /* * Geometry functions. @@ -232,6 +238,11 @@ int dm_table_add_target(struct dm_table *t, const char *type, int dm_table_complete(struct dm_table *t); /* + * Unplug all devices in a table. + */ +void dm_table_unplug_all(struct dm_table *t); + +/* * Table reference counting. */ struct dm_table *dm_get_table(struct mapped_device *md); @@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t); */ int dm_swap_table(struct mapped_device *md, struct dm_table *t); +/* + * A wrapper around vmalloc. + */ +void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); + /*----------------------------------------------------------------- * Macros. *---------------------------------------------------------------*/ diff --git a/include/linux/device.h b/include/linux/device.h index d24a47f80f9..246937c9cbc 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -199,6 +199,11 @@ struct class { struct class_private *p; }; +struct class_dev_iter { + struct klist_iter ki; + const struct device_type *type; +}; + extern struct kobject *sysfs_dev_block_kobj; extern struct kobject *sysfs_dev_char_kobj; extern int __must_check __class_register(struct class *class, @@ -213,6 +218,13 @@ extern void class_unregister(struct class *class); __class_register(class, &__key); \ }) +extern void class_dev_iter_init(struct class_dev_iter *iter, + struct class *class, + struct device *start, + const struct device_type *type); +extern struct device *class_dev_iter_next(struct class_dev_iter *iter); +extern void class_dev_iter_exit(struct class_dev_iter *iter); + extern int class_for_each_device(struct class *class, struct device *start, void *data, int (*fn)(struct device *dev, void *data)); @@ -358,6 +370,7 @@ struct device { struct kobject kobj; char bus_id[BUS_ID_SIZE]; /* position on parent bus */ + const char *init_name; /* initial name of the device */ struct device_type *type; unsigned uevent_suppress:1; @@ -395,7 +408,7 @@ struct device { spinlock_t devres_lock; struct list_head devres_head; - struct list_head node; + struct klist_node knode_class; struct class *class; dev_t devt; /* dev_t, creates the sysfs "dev" */ struct attribute_group **groups; /* optional groups */ @@ -406,7 +419,7 @@ struct device { /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> -static inline const char *dev_name(struct device *dev) +static inline const char *dev_name(const struct device *dev) { /* will be changed into kobject_name(&dev->kobj) in the near future */ return dev->bus_id; @@ -518,7 +531,7 @@ extern void device_shutdown(void); extern void sysdev_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ -extern const char *dev_driver_string(struct device *dev); +extern const char *dev_driver_string(const struct device *dev); #define dev_printk(level, dev, format, arg...) \ printk(level "%s %s: " format , dev_driver_string(dev) , \ dev_name(dev) , ## arg) diff --git a/include/linux/dlm.h b/include/linux/dlm.h index 203a025e30e..b9cd38603fd 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h @@ -2,7 +2,7 @@ ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions @@ -65,9 +65,12 @@ struct dlm_lksb { char * sb_lvbptr; }; +/* dlm_new_lockspace() flags */ + #define DLM_LSFL_NODIR 0x00000001 #define DLM_LSFL_TIMEWARN 0x00000002 #define DLM_LSFL_FS 0x00000004 +#define DLM_LSFL_NEWEXCL 0x00000008 #ifdef __KERNEL__ diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h index c6034508fed..3060783c419 100644 --- a/include/linux/dlm_device.h +++ b/include/linux/dlm_device.h @@ -26,7 +26,7 @@ /* Version of the device interface */ #define DLM_DEVICE_VERSION_MAJOR 6 #define DLM_DEVICE_VERSION_MINOR 0 -#define DLM_DEVICE_VERSION_PATCH 0 +#define DLM_DEVICE_VERSION_PATCH 1 /* struct passed to the lock write */ struct dlm_lock_params { diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446b642..c30879cf93b 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -27,6 +27,7 @@ struct dm9000_plat_data { unsigned int flags; + unsigned char dev_addr[6]; /* allow replacement IO routines */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 952e0f857ac..ba9114ec5d3 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -48,6 +48,11 @@ static inline int is_device_dma_capable(struct device *dev) return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; } +static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size) +{ + return addr + size <= mask; +} + #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> #else @@ -58,6 +63,13 @@ static inline int is_device_dma_capable(struct device *dev) #define dma_sync_single dma_sync_single_for_cpu #define dma_sync_sg dma_sync_sg_for_cpu +static inline u64 dma_get_mask(struct device *dev) +{ + if (dev && dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + return DMA_32BIT_MASK; +} + extern u64 dma_get_required_mask(struct device *dev); static inline unsigned int dma_get_max_seg_size(struct device *dev) diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 56c73b84755..c360c558e59 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -25,9 +25,99 @@ #include <linux/types.h> #include <linux/msi.h> -#ifdef CONFIG_DMAR +#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) struct intel_iommu; +struct dmar_drhd_unit { + struct list_head list; /* list of drhd units */ + struct acpi_dmar_header *hdr; /* ACPI header */ + u64 reg_base_addr; /* register base address*/ + struct pci_dev **devices; /* target device array */ + int devices_cnt; /* target device count */ + u8 ignored:1; /* ignore drhd */ + u8 include_all:1; + struct intel_iommu *iommu; +}; + +extern struct list_head dmar_drhd_units; + +#define for_each_drhd_unit(drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) + +extern int dmar_table_init(void); +extern int early_dmar_detect(void); +extern int dmar_dev_scope_init(void); + +/* Intel IOMMU detection */ +extern void detect_intel_iommu(void); + + +extern int parse_ioapics_under_ir(void); +extern int alloc_iommu(struct dmar_drhd_unit *); +#else +static inline void detect_intel_iommu(void) +{ + return; +} + +static inline int dmar_table_init(void) +{ + return -ENODEV; +} +#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ + +#ifdef CONFIG_INTR_REMAP +extern int intr_remapping_enabled; +extern int enable_intr_remapping(int); + +struct irte { + union { + struct { + __u64 present : 1, + fpd : 1, + dst_mode : 1, + redir_hint : 1, + trigger_mode : 1, + dlvry_mode : 3, + avail : 4, + __reserved_1 : 4, + vector : 8, + __reserved_2 : 8, + dest_id : 32; + }; + __u64 low; + }; + + union { + struct { + __u64 sid : 16, + sq : 2, + svt : 2, + __reserved_3 : 44; + }; + __u64 high; + }; +}; +extern int get_irte(int irq, struct irte *entry); +extern int modify_irte(int irq, struct irte *irte_modified); +extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); +extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, + u16 sub_handle); +extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); +extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); +extern int flush_irte(int irq); +extern int free_irte(int irq); + +extern int irq_remapped(int irq); +extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); +extern struct intel_iommu *map_ioapic_to_ir(int apic); +#else +#define irq_remapped(irq) (0) +#define enable_intr_remapping(mode) (-1) +#define intr_remapping_enabled (0) +#endif + +#ifdef CONFIG_DMAR extern const char *dmar_get_fault_reason(u8 fault_reason); /* Can't use the common MSI interrupt functions @@ -40,47 +130,30 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); extern int arch_setup_dmar_msi(unsigned int irq); -/* Intel IOMMU detection and initialization functions */ -extern void detect_intel_iommu(void); -extern int intel_iommu_init(void); - -extern int dmar_table_init(void); -extern int early_dmar_detect(void); - -extern struct list_head dmar_drhd_units; +extern int iommu_detected, no_iommu; extern struct list_head dmar_rmrr_units; - -struct dmar_drhd_unit { - struct list_head list; /* list of drhd units */ - u64 reg_base_addr; /* register base address*/ - struct pci_dev **devices; /* target device array */ - int devices_cnt; /* target device count */ - u8 ignored:1; /* ignore drhd */ - u8 include_all:1; - struct intel_iommu *iommu; -}; - struct dmar_rmrr_unit { struct list_head list; /* list of rmrr units */ + struct acpi_dmar_header *hdr; /* ACPI header */ u64 base_address; /* reserved base address*/ u64 end_address; /* reserved end address */ struct pci_dev **devices; /* target devices */ int devices_cnt; /* target device count */ }; -#define for_each_drhd_unit(drhd) \ - list_for_each_entry(drhd, &dmar_drhd_units, list) #define for_each_rmrr_units(rmrr) \ list_for_each_entry(rmrr, &dmar_rmrr_units, list) +/* Intel DMAR initialization functions */ +extern int intel_iommu_init(void); +extern int dmar_disabled; #else -static inline void detect_intel_iommu(void) -{ - return; -} static inline int intel_iommu_init(void) { +#ifdef CONFIG_INTR_REMAP + return dmar_dev_scope_init(); +#else return -ENODEV; +#endif } - #endif /* !CONFIG_DMAR */ #endif /* __DMAR_H__ */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 639624b55fb..92f6f634e3e 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request extern int elv_register_queue(struct request_queue *q); extern void elv_unregister_queue(struct request_queue *q); extern int elv_may_queue(struct request_queue *, int); +extern void elv_abort_queue(struct request_queue *); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *, struct request *, gfp_t); extern void elv_put_request(struct request_queue *, struct request *); @@ -173,15 +174,15 @@ enum { #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) /* - * Hack to reuse the donelist list_head as the fifo time holder while + * Hack to reuse the csd.list list_head as the fifo time holder while * the request is in the io scheduler. Saves an unsigned long in rq. */ -#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next) -#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp)) +#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next) +#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp)) #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) do { \ list_del_init(&(rq)->queuelist); \ - INIT_LIST_HEAD(&(rq)->donelist); \ + INIT_LIST_HEAD(&(rq)->csd.list); \ } while (0) /* diff --git a/include/linux/elf.h b/include/linux/elf.h index edc3dac3f02..0b61ca41a04 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -360,6 +360,7 @@ typedef struct elf64_shdr { #define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */ #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ +#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ /* Note header in a PT_NOTE section */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87df36..b4b038b89ee 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -27,9 +27,24 @@ struct ethtool_cmd { __u8 autoneg; /* Enable or disable autonegotiation */ __u32 maxtxpkt; /* Tx pkts before generating tx int */ __u32 maxrxpkt; /* Rx pkts before generating rx int */ - __u32 reserved[4]; + __u16 speed_hi; + __u16 reserved2; + __u32 reserved[3]; }; +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + #define ETHTOOL_BUSINFO_LEN 32 /* these strings are set to whatever the driver author decides... */ struct ethtool_drvinfo { diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index f5abd130663..27e772cefb6 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -35,6 +35,27 @@ enum fid_type { FILEID_INO32_GEN_PARENT = 2, /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number. + */ + FILEID_BTRFS_WITHOUT_PARENT = 0x4d, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number, + * 64 bit parent object ID, 32 bit parent generation. + */ + FILEID_BTRFS_WITH_PARENT = 0x4e, + + /* + * 64 bit object ID, 64 bit root object ID, + * 32 bit generation number, + * 64 bit parent object ID, 32 bit parent generation, + * 64 bit parent root object ID. + */ + FILEID_BTRFS_WITH_PARENT_ROOT = 0x4f, + + /* * 32 bit block number, 16 bit partition reference, * 16 bit unused, 32 bit generation number. */ diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 80171ee89a2..8120fa1bc23 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -837,6 +837,8 @@ extern void ext3_truncate (struct inode *); extern void ext3_set_inode_flags(struct inode *); extern void ext3_get_inode_flags(struct ext3_inode_info *); extern void ext3_set_aops(struct inode *inode); +extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 len); /* ioctl.c */ extern int ext3_ioctl (struct inode *, struct file *, unsigned int, diff --git a/include/linux/fb.h b/include/linux/fb.h index 3b8870e32af..531ccd5f596 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -976,6 +976,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, /* drivers/video/fb_defio.c */ extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync); diff --git a/include/linux/fd.h b/include/linux/fd.h index b6bd41d2b46..f5d194af07a 100644 --- a/include/linux/fd.h +++ b/include/linux/fd.h @@ -15,10 +15,16 @@ struct floppy_struct { sect, /* sectors per track */ head, /* nr of heads */ track, /* nr of tracks */ - stretch; /* !=0 means double track steps */ + stretch; /* bit 0 !=0 means double track steps */ + /* bit 1 != 0 means swap sides */ + /* bits 2..9 give the first sector */ + /* number (the LSB is flipped) */ #define FD_STRETCH 1 #define FD_SWAPSIDES 2 #define FD_ZEROBASED 4 +#define FD_SECTBASEMASK 0x3FC +#define FD_MKSECTBASE(s) (((s) ^ 1) << 2) +#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1) unsigned char gap, /* gap1 size */ diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h new file mode 100644 index 00000000000..671decbd2ae --- /dev/null +++ b/include/linux/fiemap.h @@ -0,0 +1,64 @@ +/* + * FS_IOC_FIEMAP ioctl infrastructure. + * + * Some portions copyright (C) 2007 Cluster File Systems, Inc + * + * Authors: Mark Fasheh <mfasheh@suse.com> + * Kalpak Shah <kalpak.shah@sun.com> + * Andreas Dilger <adilger@sun.com> + */ + +#ifndef _LINUX_FIEMAP_H +#define _LINUX_FIEMAP_H + +struct fiemap_extent { + __u64 fe_logical; /* logical offset in bytes for the start of + * the extent from the beginning of the file */ + __u64 fe_physical; /* physical offset in bytes for the start + * of the extent from the beginning of the disk */ + __u64 fe_length; /* length in bytes for this extent */ + __u64 fe_reserved64[2]; + __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ + __u32 fe_reserved[3]; +}; + +struct fiemap { + __u64 fm_start; /* logical offset (inclusive) at + * which to start mapping (in) */ + __u64 fm_length; /* logical length of mapping which + * userspace wants (in) */ + __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ + __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ + __u32 fm_extent_count; /* size of fm_extents array (in) */ + __u32 fm_reserved; + struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */ +}; + +#define FIEMAP_MAX_OFFSET (~0ULL) + +#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ +#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ + +#define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR) + +#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ +#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ +#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. + * Sets EXTENT_UNKNOWN. */ +#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read + * while fs is unmounted */ +#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. + * Sets EXTENT_NO_BYPASS. */ +#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be + * block aligned. */ +#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. + * Sets EXTENT_NOT_ALIGNED.*/ +#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. + * Sets EXTENT_NOT_ALIGNED.*/ +#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but + * no data (i.e. zero). */ +#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively + * support extents. Result + * merged for efficiency. */ + +#endif /* _LINUX_FIEMAP_H */ diff --git a/include/linux/file.h b/include/linux/file.h index 27c64bdc68c..a20259e248a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -34,8 +34,9 @@ extern struct file *fget(unsigned int fd); extern struct file *fget_light(unsigned int fd, int *fput_needed); extern void set_close_on_exec(unsigned int fd, int flag); extern void put_filp(struct file *); +extern int alloc_fd(unsigned start, unsigned flags); extern int get_unused_fd(void); -extern int get_unused_fd_flags(int flags); +#define get_unused_fd_flags(flags) alloc_fd(0, (flags)) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index acbdbcc1605..6e199c8dfac 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,34 +24,8 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -/** - * Adds a firmware mapping entry. This function uses kmalloc() for memory - * allocation. Use firmware_map_add_early() if you want to use the bootmem - * allocator. - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type); - -/** - * Adds a firmware mapping entry. This function uses the bootmem allocator - * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type); diff --git a/include/linux/fs.h b/include/linux/fs.h index 49d8eb7a71b..44e3cb2f196 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -60,6 +60,8 @@ extern int dir_notify_enable; #define MAY_WRITE 2 #define MAY_READ 4 #define MAY_APPEND 8 +#define MAY_ACCESS 16 +#define MAY_OPEN 32 #define FMODE_READ 1 #define FMODE_WRITE 2 @@ -84,7 +86,9 @@ extern int dir_notify_enable; #define READ_META (READ | (1 << BIO_RW_META)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) -#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) +#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) +#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) +#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) #define SEL_IN 1 #define SEL_OUT 2 @@ -220,6 +224,7 @@ extern int dir_notify_enable; #define BLKTRACESTART _IO(0x12,116) #define BLKTRACESTOP _IO(0x12,117) #define BLKTRACETEARDOWN _IO(0x12,118) +#define BLKDISCARD _IO(0x12,119) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ @@ -229,6 +234,7 @@ extern int dir_notify_enable; #define FS_IOC_SETFLAGS _IOW('f', 2, long) #define FS_IOC_GETVERSION _IOR('v', 1, long) #define FS_IOC_SETVERSION _IOW('v', 2, long) +#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) #define FS_IOC32_GETFLAGS _IOR('f', 1, int) #define FS_IOC32_SETFLAGS _IOW('f', 2, int) #define FS_IOC32_GETVERSION _IOR('v', 1, int) @@ -277,7 +283,7 @@ extern int dir_notify_enable; #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/dcache.h> -#include <linux/namei.h> +#include <linux/path.h> #include <linux/stat.h> #include <linux/cache.h> #include <linux/kobject.h> @@ -289,6 +295,7 @@ extern int dir_notify_enable; #include <linux/mutex.h> #include <linux/capability.h> #include <linux/semaphore.h> +#include <linux/fiemap.h> #include <asm/atomic.h> #include <asm/byteorder.h> @@ -318,22 +325,23 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * Attribute flags. These should be or-ed together to figure out what * has been changed! */ -#define ATTR_MODE 1 -#define ATTR_UID 2 -#define ATTR_GID 4 -#define ATTR_SIZE 8 -#define ATTR_ATIME 16 -#define ATTR_MTIME 32 -#define ATTR_CTIME 64 -#define ATTR_ATIME_SET 128 -#define ATTR_MTIME_SET 256 -#define ATTR_FORCE 512 /* Not a change, but a change it */ -#define ATTR_ATTR_FLAG 1024 -#define ATTR_KILL_SUID 2048 -#define ATTR_KILL_SGID 4096 -#define ATTR_FILE 8192 -#define ATTR_KILL_PRIV 16384 -#define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */ +#define ATTR_MODE (1 << 0) +#define ATTR_UID (1 << 1) +#define ATTR_GID (1 << 2) +#define ATTR_SIZE (1 << 3) +#define ATTR_ATIME (1 << 4) +#define ATTR_MTIME (1 << 5) +#define ATTR_CTIME (1 << 6) +#define ATTR_ATIME_SET (1 << 7) +#define ATTR_MTIME_SET (1 << 8) +#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ +#define ATTR_ATTR_FLAG (1 << 10) +#define ATTR_KILL_SUID (1 << 11) +#define ATTR_KILL_SGID (1 << 12) +#define ATTR_FILE (1 << 13) +#define ATTR_KILL_PRIV (1 << 14) +#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ +#define ATTR_TIMES_SET (1 << 16) /* * This is the Inode Attributes structure, used for notify_change(). It @@ -440,6 +448,27 @@ static inline size_t iov_iter_count(struct iov_iter *i) return i->count; } +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); @@ -481,6 +510,8 @@ struct address_space_operations { int (*migratepage) (struct address_space *, struct page *, struct page *); int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); }; /* @@ -499,7 +530,7 @@ struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ - rwlock_t tree_lock; /* and rwlock protecting it */ + spinlock_t tree_lock; /* and lock protecting it */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ @@ -792,7 +823,7 @@ struct file { #define f_dentry f_path.dentry #define f_vfsmnt f_path.mnt const struct file_operations *f_op; - atomic_t f_count; + atomic_long_t f_count; unsigned int f_flags; mode_t f_mode; loff_t f_pos; @@ -821,8 +852,8 @@ extern spinlock_t files_lock; #define file_list_lock() spin_lock(&files_lock); #define file_list_unlock() spin_unlock(&files_lock); -#define get_file(x) atomic_inc(&(x)->f_count) -#define file_count(x) atomic_read(&(x)->f_count) +#define get_file(x) atomic_long_inc(&(x)->f_count) +#define file_count(x) atomic_long_read(&(x)->f_count) #ifdef CONFIG_DEBUG_WRITECOUNT static inline void file_take_write(struct file *f) @@ -1136,7 +1167,7 @@ extern int vfs_permission(struct nameidata *, int); extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, int); extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); -extern int vfs_symlink(struct inode *, struct dentry *, const char *, int); +extern int vfs_symlink(struct inode *, struct dentry *, const char *); extern int vfs_link(struct dentry *, struct inode *, struct dentry *); extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *); @@ -1153,6 +1184,20 @@ extern void dentry_unhash(struct dentry *dentry); extern int file_permission(struct file *, int); /* + * VFS FS_IOC_FIEMAP helper definitions. + */ +struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ + struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent + * array */ +}; +int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, + u64 phys, u64 len, u32 flags); +int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); + +/* * File types * * NOTE! These match bits 12..15 of stat.st_mode @@ -1195,27 +1240,6 @@ struct block_device_operations { struct module *owner; }; -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user * buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); - /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ @@ -1272,7 +1296,7 @@ struct inode_operations { void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); - int (*permission) (struct inode *, int, struct nameidata *); + int (*permission) (struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); @@ -1282,6 +1306,8 @@ struct inode_operations { void (*truncate_range)(struct inode *, loff_t, loff_t); long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, + u64 len); }; struct seq_file; @@ -1677,6 +1703,7 @@ extern void chrdev_show(struct seq_file *,off_t); /* fs/block_dev.c */ #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ #ifdef CONFIG_BLOCK #define BLKDEV_MAJOR_HASH_SIZE 255 @@ -1696,9 +1723,9 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); extern void make_bad_inode(struct inode *); extern int is_bad_inode(struct inode *); -extern const struct file_operations read_fifo_fops; -extern const struct file_operations write_fifo_fops; -extern const struct file_operations rdwr_fifo_fops; +extern const struct file_operations read_pipefifo_fops; +extern const struct file_operations write_pipefifo_fops; +extern const struct file_operations rdwr_pipefifo_fops; extern int fs_may_remount_ro(struct super_block *); @@ -1713,6 +1740,9 @@ extern int fs_may_remount_ro(struct super_block *); */ #define bio_data_dir(bio) ((bio)->bi_rw & 1) +extern void check_disk_size_change(struct gendisk *disk, + struct block_device *bdev); +extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); extern int __invalidate_device(struct block_device *); extern int invalidate_partition(struct gendisk *, int); @@ -1767,7 +1797,7 @@ extern int do_remount_sb(struct super_block *sb, int flags, extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *); -extern int permission(struct inode *, int, struct nameidata *); +extern int inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int, int (*check_acl)(struct inode *, int)); @@ -1831,7 +1861,7 @@ extern void clear_inode(struct inode *); extern void destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); extern int should_remove_suid(struct dentry *); -extern int remove_suid(struct dentry *); +extern int file_remove_suid(struct file *); extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void remove_inode_hash(struct inode *); @@ -1975,6 +2005,9 @@ extern int vfs_fstat(unsigned int, struct kstat *); extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg); +extern int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block); extern void get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 282f5421912..9e5a06e78d0 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -7,7 +7,7 @@ struct fs_struct { atomic_t count; rwlock_t lock; int umask; - struct path root, pwd, altroot; + struct path root, pwd; }; #define INIT_FS { \ @@ -19,7 +19,6 @@ struct fs_struct { extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); -extern void set_fs_altroot(void); extern void set_fs_root(struct fs_struct *, struct path *); extern void set_fs_pwd(struct fs_struct *, struct path *); extern struct fs_struct *copy_fs_struct(struct fs_struct *); diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h index 809bb9ffc78..36b61ff3927 100644 --- a/include/linux/fs_uart_pd.h +++ b/include/linux/fs_uart_pd.h @@ -12,7 +12,6 @@ #ifndef FS_UART_PD_H #define FS_UART_PD_H -#include <linux/version.h> #include <asm/types.h> enum fs_uart_id { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f368d041e02..bb384068272 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -98,6 +98,27 @@ static inline void tracer_disable(void) #endif } +/* Ftrace disable/restore without lock. Some synchronization mechanism + * must be used to prevent ftrace_enabled to be changed between + * disable/restore. */ +static inline int __ftrace_enabled_save(void) +{ +#ifdef CONFIG_FTRACE + int saved_ftrace_enabled = ftrace_enabled; + ftrace_enabled = 0; + return saved_ftrace_enabled; +#else + return 0; +#endif +} + +static inline void __ftrace_enabled_restore(int enabled) +{ +#ifdef CONFIG_FTRACE + ftrace_enabled = enabled; +#endif +} + #ifdef CONFIG_FRAME_POINTER /* TODO: need to fix this for ARM */ # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 118216f1bd3..206cdf96c3a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -11,12 +11,15 @@ #include <linux/types.h> #include <linux/kdev_t.h> +#include <linux/rcupdate.h> #ifdef CONFIG_BLOCK -#define kobj_to_dev(k) container_of(k, struct device, kobj) -#define dev_to_disk(device) container_of(device, struct gendisk, dev) -#define dev_to_part(device) container_of(device, struct hd_struct, dev) +#define kobj_to_dev(k) container_of((k), struct device, kobj) +#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) +#define dev_to_part(device) container_of((device), struct hd_struct, __dev) +#define disk_to_dev(disk) (&(disk)->part0.__dev) +#define part_to_dev(part) (&((part)->__dev)) extern struct device_type part_type; extern struct kobject *block_depr; @@ -55,6 +58,9 @@ enum { UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ }; +#define DISK_MAX_PARTS 256 +#define DISK_NAME_LEN 32 + #include <linux/major.h> #include <linux/device.h> #include <linux/smp.h> @@ -87,7 +93,7 @@ struct disk_stats { struct hd_struct { sector_t start_sect; sector_t nr_sects; - struct device dev; + struct device __dev; struct kobject *holder_dir; int policy, partno; #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -100,6 +106,7 @@ struct hd_struct { #else struct disk_stats dkstats; #endif + struct rcu_head rcu_head; }; #define GENHD_FL_REMOVABLE 1 @@ -108,7 +115,7 @@ struct hd_struct { #define GENHD_FL_CD 8 #define GENHD_FL_UP 16 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 -#define GENHD_FL_FAIL 64 +#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) @@ -119,99 +126,137 @@ struct blk_scsi_cmd_filter { struct kobject kobj; }; +struct disk_part_tbl { + struct rcu_head rcu_head; + int len; + struct hd_struct *part[]; +}; + struct gendisk { + /* major, first_minor and minors are input parameters only, + * don't use directly. Use disk_devt() and disk_max_parts(). + */ int major; /* major number of driver */ int first_minor; int minors; /* maximum number of minors, =1 for * disks that can't be partitioned. */ - char disk_name[32]; /* name of major driver */ - struct hd_struct **part; /* [indexed by minor] */ + + char disk_name[DISK_NAME_LEN]; /* name of major driver */ + + /* Array of pointers to partitions indexed by partno. + * Protected with matching bdev lock but stat and other + * non-critical accesses use RCU. Always access through + * helpers. + */ + struct disk_part_tbl *part_tbl; + struct hd_struct part0; + struct block_device_operations *fops; struct request_queue *queue; - struct blk_scsi_cmd_filter cmd_filter; void *private_data; - sector_t capacity; int flags; struct device *driverfs_dev; // FIXME: remove - struct device dev; - struct kobject *holder_dir; struct kobject *slave_dir; struct timer_rand_state *random; - int policy; atomic_t sync_io; /* RAID */ - unsigned long stamp; - int in_flight; -#ifdef CONFIG_SMP - struct disk_stats *dkstats; -#else - struct disk_stats dkstats; -#endif struct work_struct async_notify; #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity *integrity; #endif + int node_id; }; -/* - * Macros to operate on percpu disk statistics: - * - * The __ variants should only be called in critical sections. The full - * variants disable/enable preemption. - */ -static inline struct hd_struct *get_part(struct gendisk *gendiskp, - sector_t sector) +static inline struct gendisk *part_to_disk(struct hd_struct *part) { - struct hd_struct *part; - int i; - for (i = 0; i < gendiskp->minors - 1; i++) { - part = gendiskp->part[i]; - if (part && part->start_sect <= sector - && sector < part->start_sect + part->nr_sects) - return part; + if (likely(part)) { + if (part->partno) + return dev_to_disk(part_to_dev(part)->parent); + else + return dev_to_disk(part_to_dev(part)); } return NULL; } -#ifdef CONFIG_SMP -#define __disk_stat_add(gendiskp, field, addnd) \ - (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) +static inline int disk_max_parts(struct gendisk *disk) +{ + if (disk->flags & GENHD_FL_EXT_DEVT) + return DISK_MAX_PARTS; + return disk->minors; +} -#define disk_stat_read(gendiskp, field) \ -({ \ - typeof(gendiskp->dkstats->field) res = 0; \ - int i; \ - for_each_possible_cpu(i) \ - res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ - res; \ -}) +static inline bool disk_partitionable(struct gendisk *disk) +{ + return disk_max_parts(disk) > 1; +} -static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { - int i; +static inline dev_t disk_devt(struct gendisk *disk) +{ + return disk_to_dev(disk)->devt; +} - for_each_possible_cpu(i) - memset(per_cpu_ptr(gendiskp->dkstats, i), value, - sizeof(struct disk_stats)); -} +static inline dev_t part_devt(struct hd_struct *part) +{ + return part_to_dev(part)->devt; +} -#define __part_stat_add(part, field, addnd) \ - (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd) +extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno); -#define __all_stat_add(gendiskp, part, field, addnd, sector) \ -({ \ - if (part) \ - __part_stat_add(part, field, addnd); \ - __disk_stat_add(gendiskp, field, addnd); \ -}) +static inline void disk_put_part(struct hd_struct *part) +{ + if (likely(part)) + put_device(part_to_dev(part)); +} + +/* + * Smarter partition iterator without context limits. + */ +#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ +#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ +#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ + +struct disk_part_iter { + struct gendisk *disk; + struct hd_struct *part; + int idx; + unsigned int flags; +}; + +extern void disk_part_iter_init(struct disk_part_iter *piter, + struct gendisk *disk, unsigned int flags); +extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter); +extern void disk_part_iter_exit(struct disk_part_iter *piter); + +extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, + sector_t sector); + +/* + * Macros to operate on percpu disk statistics: + * + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters + * and should be called between disk_stat_lock() and + * disk_stat_unlock(). + * + * part_stat_read() can be called at any time. + * + * part_stat_{add|set_all}() and {init|free}_part_stats are for + * internal use only. + */ +#ifdef CONFIG_SMP +#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) +#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) + +#define __part_stat_add(cpu, part, field, addnd) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) #define part_stat_read(part, field) \ ({ \ - typeof(part->dkstats->field) res = 0; \ + typeof((part)->dkstats->field) res = 0; \ int i; \ for_each_possible_cpu(i) \ - res += per_cpu_ptr(part->dkstats, i)->field; \ + res += per_cpu_ptr((part)->dkstats, i)->field; \ res; \ }) @@ -223,171 +268,107 @@ static inline void part_stat_set_all(struct hd_struct *part, int value) memset(per_cpu_ptr(part->dkstats, i), value, sizeof(struct disk_stats)); } - -#else /* !CONFIG_SMP */ -#define __disk_stat_add(gendiskp, field, addnd) \ - (gendiskp->dkstats.field += addnd) -#define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field) -static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) +static inline int init_part_stats(struct hd_struct *part) { - memset(&gendiskp->dkstats, value, sizeof (struct disk_stats)); + part->dkstats = alloc_percpu(struct disk_stats); + if (!part->dkstats) + return 0; + return 1; } -#define __part_stat_add(part, field, addnd) \ - (part->dkstats.field += addnd) - -#define __all_stat_add(gendiskp, part, field, addnd, sector) \ -({ \ - if (part) \ - part->dkstats.field += addnd; \ - __disk_stat_add(gendiskp, field, addnd); \ -}) - -#define part_stat_read(part, field) (part->dkstats.field) - -static inline void part_stat_set_all(struct hd_struct *part, int value) +static inline void free_part_stats(struct hd_struct *part) { - memset(&part->dkstats, value, sizeof(struct disk_stats)); + free_percpu(part->dkstats); } -#endif /* CONFIG_SMP */ +#else /* !CONFIG_SMP */ +#define part_stat_lock() ({ rcu_read_lock(); 0; }) +#define part_stat_unlock() rcu_read_unlock() -#define disk_stat_add(gendiskp, field, addnd) \ - do { \ - preempt_disable(); \ - __disk_stat_add(gendiskp, field, addnd); \ - preempt_enable(); \ - } while (0) - -#define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1) -#define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1) - -#define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1) -#define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1) - -#define __disk_stat_sub(gendiskp, field, subnd) \ - __disk_stat_add(gendiskp, field, -subnd) -#define disk_stat_sub(gendiskp, field, subnd) \ - disk_stat_add(gendiskp, field, -subnd) - -#define part_stat_add(gendiskp, field, addnd) \ - do { \ - preempt_disable(); \ - __part_stat_add(gendiskp, field, addnd);\ - preempt_enable(); \ - } while (0) - -#define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1) -#define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1) - -#define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1) -#define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1) - -#define __part_stat_sub(gendiskp, field, subnd) \ - __part_stat_add(gendiskp, field, -subnd) -#define part_stat_sub(gendiskp, field, subnd) \ - part_stat_add(gendiskp, field, -subnd) - -#define all_stat_add(gendiskp, part, field, addnd, sector) \ - do { \ - preempt_disable(); \ - __all_stat_add(gendiskp, part, field, addnd, sector); \ - preempt_enable(); \ - } while (0) - -#define __all_stat_dec(gendiskp, field, sector) \ - __all_stat_add(gendiskp, field, -1, sector) -#define all_stat_dec(gendiskp, field, sector) \ - all_stat_add(gendiskp, field, -1, sector) - -#define __all_stat_inc(gendiskp, part, field, sector) \ - __all_stat_add(gendiskp, part, field, 1, sector) -#define all_stat_inc(gendiskp, part, field, sector) \ - all_stat_add(gendiskp, part, field, 1, sector) - -#define __all_stat_sub(gendiskp, part, field, subnd, sector) \ - __all_stat_add(gendiskp, part, field, -subnd, sector) -#define all_stat_sub(gendiskp, part, field, subnd, sector) \ - all_stat_add(gendiskp, part, field, -subnd, sector) - -/* Inlines to alloc and free disk stats in struct gendisk */ -#ifdef CONFIG_SMP -static inline int init_disk_stats(struct gendisk *disk) -{ - disk->dkstats = alloc_percpu(struct disk_stats); - if (!disk->dkstats) - return 0; - return 1; -} +#define __part_stat_add(cpu, part, field, addnd) \ + ((part)->dkstats.field += addnd) + +#define part_stat_read(part, field) ((part)->dkstats.field) -static inline void free_disk_stats(struct gendisk *disk) +static inline void part_stat_set_all(struct hd_struct *part, int value) { - free_percpu(disk->dkstats); + memset(&part->dkstats, value, sizeof(struct disk_stats)); } static inline int init_part_stats(struct hd_struct *part) { - part->dkstats = alloc_percpu(struct disk_stats); - if (!part->dkstats) - return 0; return 1; } static inline void free_part_stats(struct hd_struct *part) { - free_percpu(part->dkstats); -} - -#else /* CONFIG_SMP */ -static inline int init_disk_stats(struct gendisk *disk) -{ - return 1; } -static inline void free_disk_stats(struct gendisk *disk) -{ -} +#endif /* CONFIG_SMP */ -static inline int init_part_stats(struct hd_struct *part) +#define part_stat_add(cpu, part, field, addnd) do { \ + __part_stat_add((cpu), (part), field, addnd); \ + if ((part)->partno) \ + __part_stat_add((cpu), &part_to_disk((part))->part0, \ + field, addnd); \ +} while (0) + +#define part_stat_dec(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, -1) +#define part_stat_inc(cpu, gendiskp, field) \ + part_stat_add(cpu, gendiskp, field, 1) +#define part_stat_sub(cpu, gendiskp, field, subnd) \ + part_stat_add(cpu, gendiskp, field, -subnd) + +static inline void part_inc_in_flight(struct hd_struct *part) { - return 1; + part->in_flight++; + if (part->partno) + part_to_disk(part)->part0.in_flight++; } -static inline void free_part_stats(struct hd_struct *part) +static inline void part_dec_in_flight(struct hd_struct *part) { + part->in_flight--; + if (part->partno) + part_to_disk(part)->part0.in_flight--; } -#endif /* CONFIG_SMP */ /* drivers/block/ll_rw_blk.c */ -extern void disk_round_stats(struct gendisk *disk); -extern void part_round_stats(struct hd_struct *part); +extern void part_round_stats(int cpu, struct hd_struct *part); /* drivers/block/genhd.c */ extern int get_blkdev_list(char *, int); extern void add_disk(struct gendisk *disk); extern void del_gendisk(struct gendisk *gp); extern void unlink_gendisk(struct gendisk *gp); -extern struct gendisk *get_gendisk(dev_t dev, int *part); +extern struct gendisk *get_gendisk(dev_t dev, int *partno); +extern struct block_device *bdget_disk(struct gendisk *disk, int partno); extern void set_device_ro(struct block_device *bdev, int flag); extern void set_disk_ro(struct gendisk *disk, int flag); +static inline int get_disk_ro(struct gendisk *disk) +{ + return disk->part0.policy; +} + /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk); extern void rand_initialize_disk(struct gendisk *disk); static inline sector_t get_start_sect(struct block_device *bdev) { - return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect; + return bdev->bd_part->start_sect; } static inline sector_t get_capacity(struct gendisk *disk) { - return disk->capacity; + return disk->part0.nr_sects; } static inline void set_capacity(struct gendisk *disk, sector_t size) { - disk->capacity = size; + disk->part0.nr_sects = size; } #ifdef CONFIG_SOLARIS_X86_PARTITION @@ -537,9 +518,12 @@ struct unixware_disklabel { #define ADDPART_FLAG_RAID 1 #define ADDPART_FLAG_WHOLEDISK 2 -extern dev_t blk_lookup_devt(const char *name, int part); -extern char *disk_name (struct gendisk *hd, int part, char *buf); +extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt); +extern void blk_free_devt(dev_t devt); +extern dev_t blk_lookup_devt(const char *name, int partno); +extern char *disk_name (struct gendisk *hd, int partno, char *buf); +extern int disk_expand_part_tbl(struct gendisk *disk, int target); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); extern void delete_partition(struct gendisk *, int); @@ -556,16 +540,23 @@ extern void blk_register_region(dev_t devt, unsigned long range, void *data); extern void blk_unregister_region(dev_t devt, unsigned long range); -static inline struct block_device *bdget_disk(struct gendisk *disk, int index) -{ - return bdget(MKDEV(disk->major, disk->first_minor) + index); -} +extern ssize_t part_size_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_stat_show(struct device *dev, + struct device_attribute *attr, char *buf); +#ifdef CONFIG_FAIL_MAKE_REQUEST +extern ssize_t part_fail_show(struct device *dev, + struct device_attribute *attr, char *buf); +extern ssize_t part_fail_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* CONFIG_FAIL_MAKE_REQUEST */ #else /* CONFIG_BLOCK */ static inline void printk_all_partitions(void) { } -static inline dev_t blk_lookup_devt(const char *name, int part) +static inline dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); return devt; diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h index c3c19f926e6..14d0df0b574 100644 --- a/include/linux/gfs2_ondisk.h +++ b/include/linux/gfs2_ondisk.h @@ -118,7 +118,11 @@ struct gfs2_sb { char sb_lockproto[GFS2_LOCKNAME_LEN]; char sb_locktable[GFS2_LOCKNAME_LEN]; - /* In gfs1, quota and license dinodes followed */ + + struct gfs2_inum __pad3; /* Was quota inode in gfs1 */ + struct gfs2_inum __pad4; /* Was licence inode in gfs1 */ +#define GFS2_HAS_UUID 1 + __u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */ }; /* diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h deleted file mode 100644 index efef11db790..00000000000 --- a/include/linux/harrier_defs.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * include/linux/harrier_defs.h - * - * Definitions for Motorola MCG Harrier North Bridge & Memory controller - * - * Author: Dale Farnsworth - * dale.farnsworth@mvista.com - * - * Extracted from asm-ppc/harrier.h by: - * Randy Vinson - * rvinson@mvista.com - * - * Copyright 2001-2002 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __ASMPPC_HARRIER_DEFS_H -#define __ASMPPC_HARRIER_DEFS_H - -#define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000 - -#define HARRIER_VEND_DEV_ID 0x1057480b - -#define HARRIER_VENI_OFF 0x00 - -#define HARRIER_REVI_OFF 0x05 -#define HARRIER_UCTL_OFF 0xd0 -#define HARRIER_XTAL64_MASK 0x02 - -#define HARRIER_MISC_CSR_OFF 0x1c -#define HARRIER_RSTOUT 0x01000000 -#define HARRIER_SYSCON 0x08000000 -#define HARRIER_EREADY 0x10000000 -#define HARRIER_ERDYS 0x20000000 - -/* Function exception registers */ -#define HARRIER_FEEN_OFF 0x40 /* enable */ -#define HARRIER_FEST_OFF 0x44 /* status */ -#define HARRIER_FEMA_OFF 0x48 /* mask */ -#define HARRIER_FECL_OFF 0x4c /* clear */ - -#define HARRIER_FE_DMA 0x80 -#define HARRIER_FE_MIDB 0x40 -#define HARRIER_FE_MIM0 0x20 -#define HARRIER_FE_MIM1 0x10 -#define HARRIER_FE_MIP 0x08 -#define HARRIER_FE_UA0 0x04 -#define HARRIER_FE_UA1 0x02 -#define HARRIER_FE_ABT 0x01 - -#define HARRIER_SERIAL_0_OFF 0xc0 - -#define HARRIER_MBAR_OFF 0xe0 -#define HARRIER_MBAR_MSK 0xfffc0000 -#define HARRIER_MPIC_CSR_OFF 0xe4 -#define HARRIER_MPIC_OPI_ENABLE 0x40 -#define HARRIER_MPIC_IFEVP_OFF 0x10200 -#define HARRIER_MPIC_IFEVP_VECT_MSK 0xff -#define HARRIER_MPIC_IFEDE_OFF 0x10210 - -/* - * Define the Memory Controller register offsets. - */ -#define HARRIER_SDBA_OFF 0x110 -#define HARRIER_SDBB_OFF 0x114 -#define HARRIER_SDBC_OFF 0x118 -#define HARRIER_SDBD_OFF 0x11c -#define HARRIER_SDBE_OFF 0x120 -#define HARRIER_SDBF_OFF 0x124 -#define HARRIER_SDBG_OFF 0x128 -#define HARRIER_SDBH_OFF 0x12c - -#define HARRIER_SDB_ENABLE 0x00000100 -#define HARRIER_SDB_SIZE_MASK 0xf -#define HARRIER_SDB_SIZE_SHIFT 16 -#define HARRIER_SDB_BASE_MASK 0xff -#define HARRIER_SDB_BASE_SHIFT 24 - -/* - * Define outbound register offsets. - */ -#define HARRIER_OTAD0_OFF 0x220 -#define HARRIER_OTOF0_OFF 0x224 -#define HARRIER_OTAD1_OFF 0x228 -#define HARRIER_OTOF1_OFF 0x22c -#define HARRIER_OTAD2_OFF 0x230 -#define HARRIER_OTOF2_OFF 0x234 -#define HARRIER_OTAD3_OFF 0x238 -#define HARRIER_OTOF3_OFF 0x23c - -#define HARRIER_OTADX_START_MSK 0xffff0000UL -#define HARRIER_OTADX_END_MSK 0x0000ffffUL - -#define HARRIER_OTOFX_OFF_MSK 0xffff0000UL -#define HARRIER_OTOFX_ENA 0x80UL -#define HARRIER_OTOFX_WPE 0x10UL -#define HARRIER_OTOFX_SGE 0x08UL -#define HARRIER_OTOFX_RAE 0x04UL -#define HARRIER_OTOFX_MEM 0x02UL -#define HARRIER_OTOFX_IOM 0x01UL - -/* - * Define generic message passing register offsets - */ -/* Mirrored registers (visible from both PowerPC and PCI space) */ -#define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */ -#define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */ -#define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */ -#define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */ -#define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */ - -#define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */ -#define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */ -#define HARRIER_MGID_OFF 0x18 /* inbound doorbells */ - -/* PowerPC-only registers */ -#define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */ - -/* PCI-only registers */ -#define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */ -#define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */ -#define HARRIER_MG_OMI0 (1<<4) -#define HARRIER_MG_OMI1 (1<<5) - -#define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */ - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_XCSR_TO_PCFS_OFF 0x300 - -/* - * Define message passing attribute register offset - */ -#define HARRIER_MPAT_OFF 0x44 - -/* - * Define inbound attribute register offsets. - */ -#define HARRIER_ITSZ0_OFF 0x48 -#define HARRIER_ITAT0_OFF 0x4c - -#define HARRIER_ITSZ1_OFF 0x50 -#define HARRIER_ITAT1_OFF 0x54 - -#define HARRIER_ITSZ2_OFF 0x58 -#define HARRIER_ITAT2_OFF 0x5c - -#define HARRIER_ITSZ3_OFF 0x60 -#define HARRIER_ITAT3_OFF 0x64 - -/* inbound translation size constants */ -#define HARRIER_ITSZ_MSK 0xff -#define HARRIER_ITSZ_4KB 0x00 -#define HARRIER_ITSZ_8KB 0x01 -#define HARRIER_ITSZ_16KB 0x02 -#define HARRIER_ITSZ_32KB 0x03 -#define HARRIER_ITSZ_64KB 0x04 -#define HARRIER_ITSZ_128KB 0x05 -#define HARRIER_ITSZ_256KB 0x06 -#define HARRIER_ITSZ_512KB 0x07 -#define HARRIER_ITSZ_1MB 0x08 -#define HARRIER_ITSZ_2MB 0x09 -#define HARRIER_ITSZ_4MB 0x0A -#define HARRIER_ITSZ_8MB 0x0B -#define HARRIER_ITSZ_16MB 0x0C -#define HARRIER_ITSZ_32MB 0x0D -#define HARRIER_ITSZ_64MB 0x0E -#define HARRIER_ITSZ_128MB 0x0F -#define HARRIER_ITSZ_256MB 0x10 -#define HARRIER_ITSZ_512MB 0x11 -#define HARRIER_ITSZ_1GB 0x12 -#define HARRIER_ITSZ_2GB 0x13 - -/* inbound translation offset */ -#define HARRIER_ITOF_SHIFT 0x10 -#define HARRIER_ITOF_MSK 0xffff - -/* inbound translation atttributes */ -#define HARRIER_ITAT_PRE (1<<3) -#define HARRIER_ITAT_RAE (1<<4) -#define HARRIER_ITAT_WPE (1<<5) -#define HARRIER_ITAT_MEM (1<<6) -#define HARRIER_ITAT_ENA (1<<7) -#define HARRIER_ITAT_GBL (1<<16) - -#define HARRIER_LBA_OFF 0x80 -#define HARRIER_LBA_MSK (1<<31) - -#define HARRIER_XCSR_SIZE 1024 - -/* macros to calculate message passing register offsets */ -#define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x) - -#define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x) - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0 -#define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1 -#define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2 -#define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3 -#define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4 - -#define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x) - -#endif /* __ASMPPC_HARRIER_DEFS_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce61cb..2f245fe63bd 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -47,14 +47,22 @@ enum hrtimer_restart { * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and * does not restart the timer - * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context - * Special mode for tick emultation + * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context + * Special mode for tick emulation and + * scheduler timer. Such timers are per + * cpu and not allowed to be migrated on + * cpu unplug. + * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context + * with timer->base lock unlocked + * used for timers which call wakeup to + * avoid lock order problems with rq->lock */ enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ, HRTIMER_CB_IRQSAFE, HRTIMER_CB_IRQSAFE_NO_RESTART, - HRTIMER_CB_IRQSAFE_NO_SOFTIRQ, + HRTIMER_CB_IRQSAFE_PERCPU, + HRTIMER_CB_IRQSAFE_UNLOCKED, }; /* @@ -67,9 +75,10 @@ enum hrtimer_cb_mode { * 0x02 callback function running * 0x04 callback pending (high resolution mode) * - * Special case: + * Special cases: * 0x03 callback function running and enqueued * (was requeued on another CPU) + * 0x09 timer was migrated on CPU hotunplug * The "callback function running and enqueued" status is only possible on * SMP. It happens for example when a posix timer expired and the callback * queued a signal. Between dropping the lock which protects the posix timer @@ -87,6 +96,7 @@ enum hrtimer_cb_mode { #define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_CALLBACK 0x02 #define HRTIMER_STATE_PENDING 0x04 +#define HRTIMER_STATE_MIGRATE 0x08 /** * struct hrtimer - the basic hrtimer structure diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 9a71d4cc88c..32e0ef0f6e1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -273,7 +273,10 @@ struct hstate {}; #define huge_page_mask(h) PAGE_MASK #define huge_page_order(h) 0 #define huge_page_shift(h) PAGE_SHIFT -#define pages_per_huge_page(h) 1 +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1; +} #endif #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 4862398e05b..bf34c5f4c05 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -39,7 +39,6 @@ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */ @@ -95,7 +94,6 @@ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index e6e9c814da6..f13255e0640 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h @@ -12,7 +12,9 @@ #ifndef __I2C_PNX_H__ #define __I2C_PNX_H__ -#include <asm/arch/i2c.h> +#include <linux/pm.h> + +struct platform_device; struct i2c_pnx_mif { int ret; /* Return value */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 08be0d21864..06115128047 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -97,7 +97,19 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client, /** * struct i2c_driver - represent an I2C device driver + * @id: Unique driver ID (optional) * @class: What kind of i2c device we instantiate (for detect) + * @attach_adapter: Callback for bus addition (for legacy drivers) + * @detach_adapter: Callback for bus removal (for legacy drivers) + * @detach_client: Callback for device removal (for legacy drivers) + * @probe: Callback for device binding (new-style drivers) + * @remove: Callback for device unbinding (new-style drivers) + * @shutdown: Callback for device shutdown + * @suspend: Callback for device suspend + * @resume: Callback for device resume + * @command: Callback for bus-wide signaling (optional) + * @driver: Device driver model driver + * @id_table: List of I2C devices supported by this driver * @detect: Callback for device detection * @address_data: The I2C addresses to probe, ignore or force (for detect) * @clients: List of detected clients we created (for i2c-core use only) diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 7d51cbca49a..75ae6d8aba4 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, } dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); - if (!dma_mapping_error(dma_addr)) { + if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); diff --git a/include/linux/ide.h b/include/linux/ide.h index b846bc44a27..a9d82d6e6bd 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -8,7 +8,7 @@ #include <linux/init.h> #include <linux/ioport.h> -#include <linux/hdreg.h> +#include <linux/ata.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> @@ -17,6 +17,7 @@ #include <linux/device.h> #include <linux/pci.h> #include <linux/completion.h> +#include <linux/pm.h> #ifdef CONFIG_BLK_DEV_IDEACPI #include <acpi/acpi.h> #endif @@ -87,12 +88,13 @@ struct ide_io_ports { }; #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) -#define BAD_R_STAT (BUSY_STAT | ERR_STAT) -#define BAD_W_STAT (BAD_R_STAT | WRERR_STAT) -#define BAD_STAT (BAD_R_STAT | DRQ_STAT) -#define DRIVE_READY (READY_STAT | SEEK_STAT) -#define BAD_CRC (ABRT_ERR | ICRC_ERR) +#define BAD_R_STAT (ATA_BUSY | ATA_ERR) +#define BAD_W_STAT (BAD_R_STAT | ATA_DF) +#define BAD_STAT (BAD_R_STAT | ATA_DRQ) +#define DRIVE_READY (ATA_DRDY | ATA_DSC) + +#define BAD_CRC (ATA_ABORTED | ATA_ICRC) #define SATA_NR_PORTS (3) /* 16 possible ?? */ @@ -125,24 +127,41 @@ struct ide_io_ports { #define PARTN_BITS 6 /* number of minor dev bits for partitions */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ #define SECTOR_SIZE 512 -#define SECTOR_WORDS (SECTOR_SIZE / 4) /* number of 32bit words per sector */ + #define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t))) /* * Timeouts for various operations: */ -#define WAIT_DRQ (HZ/10) /* 100msec - spec allows up to 20ms */ -#define WAIT_READY (5*HZ) /* 5sec - some laptops are very slow */ -#define WAIT_PIDENTIFY (10*HZ) /* 10sec - should be less than 3ms (?), if all ATAPI CD is closed at boot */ -#define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */ -#define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */ -#define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */ +enum { + /* spec allows up to 20ms */ + WAIT_DRQ = HZ / 10, /* 100ms */ + /* some laptops are very slow */ + WAIT_READY = 5 * HZ, /* 5s */ + /* should be less than 3ms (?), if all ATAPI CD is closed at boot */ + WAIT_PIDENTIFY = 10 * HZ, /* 10s */ + /* worst case when spinning up */ + WAIT_WORSTCASE = 30 * HZ, /* 30s */ + /* maximum wait for an IRQ to happen */ + WAIT_CMD = 10 * HZ, /* 10s */ + /* Some drives require a longer IRQ timeout. */ + WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */ + /* + * Some drives (for example, Seagate STT3401A Travan) require a very + * long timeout, because they don't return an interrupt or clear their + * BSY bit until after the command completes (even retension commands). + */ + WAIT_TAPE_CMD = 900 * HZ, /* 900s */ + /* minimum sleep time */ + WAIT_MIN_SLEEP = HZ / 50, /* 20ms */ +}; /* * Op codes for special requests to be handled by ide_special_rq(). * Values should be in the range of 0x20 to 0x3f. */ #define REQ_DRIVE_RESET 0x20 +#define REQ_DEVSET_EXEC 0x21 /* * Check for an interrupt and acknowledge the interrupt status @@ -219,18 +238,7 @@ static inline int __ide_default_irq(unsigned long base) #include <asm-generic/ide_iops.h> #endif -#ifndef MAX_HWIFS -#if defined(CONFIG_BLACKFIN) || defined(CONFIG_H8300) || defined(CONFIG_XTENSA) -# define MAX_HWIFS 1 -#else -# define MAX_HWIFS 10 -#endif -#endif - -#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) -#undef MAX_HWIFS -#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS -#endif +#define MAX_HWIFS 10 /* Currently only m68k, apus and m8xx need it */ #ifndef IDE_ARCH_ACK_INTR @@ -314,8 +322,8 @@ typedef enum { ide_started, /* a drive operation was started, handler was set */ } ide_startstop_t; +struct ide_devset; struct ide_driver_s; -struct ide_settings_s; #ifdef CONFIG_BLK_DEV_IDEACPI struct ide_acpi_drive_link; @@ -326,10 +334,10 @@ struct ide_acpi_hwif_link; enum { IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), IDE_AFLAG_MEDIA_CHANGED = (1 << 1), - - /* ide-cd */ /* Drive cannot lock the door. */ IDE_AFLAG_NO_DOORLOCK = (1 << 2), + + /* ide-cd */ /* Drive cannot eject the disc. */ IDE_AFLAG_NO_EJECT = (1 << 3), /* Drive is a pre ATAPI 1.2 drive. */ @@ -365,19 +373,25 @@ enum { IDE_AFLAG_CLIK_DRIVE = (1 << 19), /* Requires BH algorithm for packets */ IDE_AFLAG_ZIP_DRIVE = (1 << 20), + /* Write protect */ + IDE_AFLAG_WP = (1 << 21), + /* Supports format progress report */ + IDE_AFLAG_SRFP = (1 << 22), /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = (1 << 21), + IDE_AFLAG_IGNORE_DSC = (1 << 23), /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = (1 << 22), + IDE_AFLAG_ADDRESS_VALID = (1 << 24), /* Device already opened */ - IDE_AFLAG_BUSY = (1 << 23), + IDE_AFLAG_BUSY = (1 << 25), /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = (1 << 24), + IDE_AFLAG_DETECT_BS = (1 << 26), /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = (1 << 25), + IDE_AFLAG_FILEMARK = (1 << 27), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) + IDE_AFLAG_MEDIUM_PRESENT = (1 << 28), + + IDE_AFLAG_NO_AUTOCLOSE = (1 << 29), }; struct ide_drive_s { @@ -389,10 +403,10 @@ struct ide_drive_s { struct request *rq; /* current request */ struct ide_drive_s *next; /* circular list of hwgroup drives */ void *driver_data; /* extra driver data */ - struct hd_driveid *id; /* drive model identification info */ + u16 *id; /* identification info */ #ifdef CONFIG_IDE_PROC_FS struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ - struct ide_settings_s *settings;/* /proc/ide/ drive settings */ + const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */ #endif struct hwif_s *hwif; /* actually (ide_hwif_t *) */ @@ -404,16 +418,16 @@ struct ide_drive_s { special_t special; /* special action flags */ select_t select; /* basic drive/head select reg value */ - u8 keep_settings; /* restore settings after drive reset */ - u8 using_dma; /* disk is using dma for read/write */ u8 retry_pio; /* retrying dma capable host in pio */ u8 state; /* retry state */ u8 waiting_for_dma; /* dma currently in progress */ - u8 unmask; /* okay to unmask other irqs */ - u8 noflush; /* don't attempt flushes */ - u8 dsc_overlap; /* DSC overlap */ - u8 nice1; /* give potential excess bandwidth */ + unsigned keep_settings : 1; /* restore settings after drive reset */ + unsigned using_dma : 1; /* disk is using dma for read/write */ + unsigned unmask : 1; /* okay to unmask other irqs */ + unsigned noflush : 1; /* don't attempt flushes */ + unsigned dsc_overlap : 1; /* DSC overlap */ + unsigned nice1 : 1; /* give potential excess bandwidth */ unsigned present : 1; /* drive is physically present */ unsigned dead : 1; /* device ejected hint */ unsigned id_read : 1; /* 1=id read from disk 0 = synthetic */ @@ -423,23 +437,22 @@ struct ide_drive_s { unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */ unsigned no_unmask : 1; /* disallow setting unmask bit */ unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */ - unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */ unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ unsigned nodma : 1; /* disallow DMA */ - unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ unsigned post_reset : 1; unsigned udma33_warned : 1; + unsigned addressing : 2; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */ + unsigned wcache : 1; /* status of write cache */ + unsigned nowerr : 1; /* used for ignoring ATA_DF */ - u8 addressing; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */ u8 quirk_list; /* considered quirky, set for a specific host */ u8 init_speed; /* transfer rate set at boot */ u8 current_speed; /* current transfer rate set */ u8 desired_speed; /* desired transfer rate set */ u8 dn; /* now wide spread use */ - u8 wcache; /* status of write cache */ u8 acoustic; /* acoustic management */ u8 media; /* disk, cdrom, tape, floppy, ... */ u8 ready_stat; /* min status value for drive ready */ @@ -447,9 +460,7 @@ struct ide_drive_s { u8 mult_req; /* requested multiple sector setting */ u8 tune_req; /* requested drive tuning setting */ u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */ - u8 bad_wstat; /* used for ignoring WRERR_STAT */ - u8 nowerr; /* used for ignoring WRERR_STAT */ - u8 sect0; /* offset of first sector for DM6:DDO */ + u8 bad_wstat; /* used for ignoring ATA_DF */ u8 head; /* "real" number of heads */ u8 sect; /* "real" sectors per track */ u8 bios_head; /* BIOS/fdisk/LILO number of heads */ @@ -483,10 +494,6 @@ typedef struct ide_drive_s ide_drive_t; #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) -#define IDE_CHIPSET_PCI_MASK \ - ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) -#define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) - struct ide_task_s; struct ide_port_info; @@ -509,24 +516,33 @@ struct ide_tp_ops { extern const struct ide_tp_ops default_tp_ops; +/** + * struct ide_port_ops - IDE port operations + * + * @init_dev: host specific initialization of a device + * @set_pio_mode: routine to program host for PIO mode + * @set_dma_mode: routine to program host for DMA mode + * @selectproc: tweaks hardware to select drive + * @reset_poll: chipset polling based on hba specifics + * @pre_reset: chipset specific changes to default for device-hba resets + * @resetproc: routine to reset controller after a disk reset + * @maskproc: special host masking for drive selection + * @quirkproc: check host's drive quirk list + * + * @mdma_filter: filter MDMA modes + * @udma_filter: filter UDMA modes + * + * @cable_detect: detect cable type + */ struct ide_port_ops { - /* host specific initialization of a device */ void (*init_dev)(ide_drive_t *); - /* routine to program host for PIO mode */ void (*set_pio_mode)(ide_drive_t *, const u8); - /* routine to program host for DMA mode */ void (*set_dma_mode)(ide_drive_t *, const u8); - /* tweaks hardware to select drive */ void (*selectproc)(ide_drive_t *); - /* chipset polling based on hba specifics */ int (*reset_poll)(ide_drive_t *); - /* chipset specific changes to default for device-hba resets */ void (*pre_reset)(ide_drive_t *); - /* routine to reset controller after a disk reset */ void (*resetproc)(ide_drive_t *); - /* special host masking for drive selection */ void (*maskproc)(ide_drive_t *, int); - /* check host's drive quirk list */ void (*quirkproc)(ide_drive_t *); u8 (*mdma_filter)(ide_drive_t *); @@ -567,7 +583,6 @@ typedef struct hwif_s { u8 major; /* our major number */ u8 index; /* 0 for ide0; 1 for ide1; ... */ u8 channel; /* for dual-port chips: 0=primary, 1=secondary */ - u8 bus_state; /* power state of the IDE bus */ u32 host_flags; @@ -645,6 +660,7 @@ struct ide_host { ide_hwif_t *ports[MAX_HWIFS]; unsigned int n_ports; struct device *dev[2]; + unsigned int (*init_chipset)(struct pci_dev *); unsigned long host_flags; void *host_priv; }; @@ -692,9 +708,61 @@ typedef struct ide_driver_s ide_driver_t; extern struct mutex ide_setting_mtx; -int set_io_32bit(ide_drive_t *, int); -int set_pio_mode(ide_drive_t *, int); -int set_using_dma(ide_drive_t *, int); +/* + * configurable drive settings + */ + +#define DS_SYNC (1 << 0) + +struct ide_devset { + int (*get)(ide_drive_t *); + int (*set)(ide_drive_t *, int); + unsigned int flags; +}; + +#define __DEVSET(_flags, _get, _set) { \ + .flags = _flags, \ + .get = _get, \ + .set = _set, \ +} + +#define ide_devset_get(name, field) \ +static int get_##name(ide_drive_t *drive) \ +{ \ + return drive->field; \ +} + +#define ide_devset_set(name, field) \ +static int set_##name(ide_drive_t *drive, int arg) \ +{ \ + drive->field = arg; \ + return 0; \ +} + +#define __IDE_DEVSET(_name, _flags, _get, _set) \ +const struct ide_devset ide_devset_##_name = \ + __DEVSET(_flags, _get, _set) + +#define IDE_DEVSET(_name, _flags, _get, _set) \ +static __IDE_DEVSET(_name, _flags, _get, _set) + +#define ide_devset_rw(_name, _func) \ +IDE_DEVSET(_name, 0, get_##_func, set_##_func) + +#define ide_devset_w(_name, _func) \ +IDE_DEVSET(_name, 0, NULL, set_##_func) + +#define ide_devset_rw_sync(_name, _func) \ +IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func) + +#define ide_decl_devset(_name) \ +extern const struct ide_devset ide_devset_##_name + +ide_decl_devset(io_32bit); +ide_decl_devset(keepsettings); +ide_decl_devset(pio_mode); +ide_decl_devset(unmaskirq); +ide_decl_devset(using_dma); /* ATAPI packet command flags */ enum { @@ -710,6 +778,12 @@ enum { PC_FLAG_TIMEDOUT = (1 << 7), }; +/* + * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes. + * This is used for several packet commands (not for READ/WRITE commands). + */ +#define IDE_PC_BUFFER_SIZE 256 + struct ide_atapi_pc { /* actual packet bytes */ u8 c[12]; @@ -739,7 +813,7 @@ struct ide_atapi_pc { * those are more or less driver-specific and some of them are subject * to change/removal later. */ - u8 pc_buf[256]; + u8 pc_buf[IDE_PC_BUFFER_SIZE]; /* idetape only */ struct idetape_bh *bh; @@ -757,37 +831,34 @@ struct ide_atapi_pc { #ifdef CONFIG_IDE_PROC_FS /* - * configurable drive settings + * /proc/ide interface */ -#define TYPE_INT 0 -#define TYPE_BYTE 1 -#define TYPE_SHORT 2 +#define ide_devset_rw_field(_name, _field) \ +ide_devset_get(_name, _field); \ +ide_devset_set(_name, _field); \ +IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name) + +struct ide_proc_devset { + const char *name; + const struct ide_devset *setting; + int min, max; + int (*mulf)(ide_drive_t *); + int (*divf)(ide_drive_t *); +}; -#define SETTING_READ (1 << 0) -#define SETTING_WRITE (1 << 1) -#define SETTING_RW (SETTING_READ | SETTING_WRITE) +#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \ + .name = __stringify(_name), \ + .setting = &ide_devset_##_name, \ + .min = _min, \ + .max = _max, \ + .mulf = _mulf, \ + .divf = _divf, \ +} -typedef int (ide_procset_t)(ide_drive_t *, int); -typedef struct ide_settings_s { - char *name; - int rw; - int data_type; - int min; - int max; - int mul_factor; - int div_factor; - void *data; - ide_procset_t *set; - int auto_remove; - struct ide_settings_s *next; -} ide_settings_t; - -int ide_add_setting(ide_drive_t *, const char *, int, int, int, int, int, int, void *, ide_procset_t *set); +#define IDE_PROC_DEVSET(_name, _min, _max) \ +__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL) -/* - * /proc/ide interface - */ typedef struct { const char *name; mode_t mode; @@ -804,8 +875,6 @@ void ide_proc_unregister_port(ide_hwif_t *); void ide_proc_register_driver(ide_drive_t *, ide_driver_t *); void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *); -void ide_add_generic_settings(ide_drive_t *); - read_proc_t proc_ide_read_capacity; read_proc_t proc_ide_read_geometry; @@ -833,7 +902,6 @@ static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; } static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } -static inline void ide_add_generic_settings(ide_drive_t *drive) { ; } #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0; #endif @@ -879,7 +947,6 @@ enum { struct ide_driver_s { const char *version; u8 media; - unsigned supports_dsc_overlap : 1; ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); int (*end_request)(ide_drive_t *, int, int); ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); @@ -889,7 +956,8 @@ struct ide_driver_s { void (*resume)(ide_drive_t *); void (*shutdown)(ide_drive_t *); #ifdef CONFIG_IDE_PROC_FS - ide_proc_entry_t *proc; + ide_proc_entry_t *proc; + const struct ide_proc_devset *settings; #endif }; @@ -898,7 +966,17 @@ struct ide_driver_s { int ide_device_get(ide_drive_t *); void ide_device_put(ide_drive_t *); -int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); +struct ide_ioctl_devset { + unsigned int get_ioctl; + unsigned int set_ioctl; + const struct ide_devset *setting; +}; + +int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int, + unsigned long, const struct ide_ioctl_devset *); + +int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, + unsigned, unsigned long); extern int ide_vlb_clk; extern int ide_pci_clk; @@ -920,14 +998,19 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat); -extern void ide_fix_driveid(struct hd_driveid *); +void ide_fix_driveid(u16 *); extern void ide_fixstring(u8 *, const int, const int); +int ide_busy_sleep(ide_hwif_t *, unsigned long, int); + int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); extern ide_startstop_t ide_do_reset (ide_drive_t *); +extern int ide_devset_execute(ide_drive_t *drive, + const struct ide_devset *setting, int arg); + extern void ide_do_drive_cmd(ide_drive_t *, struct request *); extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); @@ -1051,6 +1134,8 @@ void ide_tf_read(ide_drive_t *, ide_task_t *); void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); +int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int); + extern void SELECT_DRIVE(ide_drive_t *); void SELECT_MASK(ide_drive_t *, int); @@ -1061,11 +1146,36 @@ extern int drive_is_ready(ide_drive_t *); void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); +int ide_check_atapi_device(ide_drive_t *, const char *); + +void ide_init_pc(struct ide_atapi_pc *); + +/* + * Special requests for ide-tape block device strategy routine. + * + * In order to service a character device command, we add special requests to + * the tail of our block device request queue and wait for their completion. + */ +enum { + REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */ + REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ + REQ_IDETAPE_READ = (1 << 2), + REQ_IDETAPE_WRITE = (1 << 3), +}; + +void ide_queue_pc_head(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, + struct request *); +int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *); + +int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); +int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); +int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); + ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry, void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *), void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *), - void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int, + int (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int)); ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *, ide_handler_t *, unsigned int, ide_expiry_t *); @@ -1080,8 +1190,6 @@ int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); -int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); -int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long); extern int ide_driveid_update(ide_drive_t *); extern int ide_config_drive_speed(ide_drive_t *, u8); @@ -1092,7 +1200,6 @@ extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout); extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); -extern int ide_spin_wait_hwgroup(ide_drive_t *); extern void ide_timer_expiry(unsigned long); extern irqreturn_t ide_intr(int irq, void *dev_id); extern void do_ide_request(struct request_queue *); @@ -1113,7 +1220,6 @@ void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); #ifdef CONFIG_BLK_DEV_IDEDMA_PCI int ide_pci_set_master(struct pci_dev *, const char *); unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); -extern const struct ide_dma_ops sff_dma_ops; int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); #else @@ -1230,6 +1336,14 @@ int ide_pci_init_two(struct pci_dev *, struct pci_dev *, const struct ide_port_info *, void *); void ide_pci_remove(struct pci_dev *); +#ifdef CONFIG_PM +int ide_pci_suspend(struct pci_dev *, pm_message_t); +int ide_pci_resume(struct pci_dev *); +#else +#define ide_pci_suspend NULL +#define ide_pci_resume NULL +#endif + void ide_map_sg(ide_drive_t *, struct request *); void ide_init_sg_cmd(ide_drive_t *, struct request *); @@ -1241,7 +1355,7 @@ struct drive_list_entry { const char *id_firmware; }; -int ide_in_drive_list(struct hd_driveid *, const struct drive_list_entry *); +int ide_in_drive_list(u16 *, const struct drive_list_entry *); #ifdef CONFIG_BLK_DEV_IDEDMA int __ide_dma_bad_drive(ide_drive_t *); @@ -1277,6 +1391,7 @@ extern int __ide_dma_end(ide_drive_t *); int ide_dma_test_irq(ide_drive_t *); extern void ide_dma_lost_irq(ide_drive_t *); extern void ide_dma_timeout(ide_drive_t *); +extern const struct ide_dma_ops sff_dma_ops; #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ #else @@ -1347,24 +1462,6 @@ const char *ide_xfer_verbose(u8 mode); extern void ide_toggle_bounce(ide_drive_t *drive, int on); extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate); -static inline int ide_dev_has_iordy(struct hd_driveid *id) -{ - return ((id->field_valid & 2) && (id->capability & 8)) ? 1 : 0; -} - -static inline int ide_dev_is_sata(struct hd_driveid *id) -{ - /* - * See if word 93 is 0 AND drive is at least ATA-5 compatible - * verifying that word 80 by casting it to a signed type -- - * this trick allows us to filter out the reserved values of - * 0x0000 and 0xffff along with the earlier ATA revisions... - */ - if (id->hw_config == 0 && (short)id->major_rev_num >= 0x0020) - return 1; - return 0; -} - u64 ide_get_lba_addr(struct ide_taskfile *, int); u8 ide_dump_status(ide_drive_t *, const char *, u8); @@ -1436,13 +1533,6 @@ extern struct mutex ide_cfg_mtx; extern struct bus_type ide_bus_type; extern struct class *ide_port_class; -/* check if CACHE FLUSH (EXT) command is supported (bits defined in ATA-6) */ -#define ide_id_has_flush_cache(id) ((id)->cfs_enable_2 & 0x3000) - -/* some Maxtor disks have bit 13 defined incorrectly so check bit 10 too */ -#define ide_id_has_flush_cache_ext(id) \ - (((id)->cfs_enable_2 & 0x2400) == 0x2400) - static inline void ide_dump_identify(u8 *id) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0); @@ -1450,14 +1540,13 @@ static inline void ide_dump_identify(u8 *id) static inline int hwif_to_node(ide_hwif_t *hwif) { - struct pci_dev *dev = to_pci_dev(hwif->dev); - return hwif->dev ? pcibus_to_node(dev->bus) : -1; + return hwif->dev ? dev_to_node(hwif->dev) : -1; } -static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) +static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive) { - ide_hwif_t *hwif = HWIF(drive); + ide_drive_t *peer = &drive->hwif->drives[(drive->dn ^ 1) & 1]; - return &hwif->drives[(drive->dn ^ 1) & 1]; + return peer->present ? peer : NULL; } #endif /* _IDE_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a1630ba0b87..14126bc3664 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -471,6 +471,11 @@ struct ieee80211s_hdr { u8 eaddr3[6]; } __attribute__ ((packed)); +/* Mesh flags */ +#define MESH_FLAGS_AE_A4 0x1 +#define MESH_FLAGS_AE_A5_A6 0x2 +#define MESH_FLAGS_PS_DEEP 0x4 + /** * struct ieee80211_quiet_ie * @@ -506,6 +511,19 @@ struct ieee80211_channel_sw_ie { u8 count; } __attribute__ ((packed)); +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[0]; +} __attribute__ ((packed)); + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -630,6 +648,9 @@ struct ieee80211_mgmt { } u; } __attribute__ ((packed)); +/* mgmt header + 1 byte category code */ +#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) + /* Control frames */ struct ieee80211_rts { @@ -695,12 +716,13 @@ struct ieee80211_ht_addt_info { /* 802.11n HT capabilities masks */ #define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 -#define IEEE80211_HT_CAP_MIMO_PS 0x000C +#define IEEE80211_HT_CAP_SM_PS 0x000C #define IEEE80211_HT_CAP_GRN_FLD 0x0010 #define IEEE80211_HT_CAP_SGI_20 0x0020 #define IEEE80211_HT_CAP_SGI_40 0x0040 #define IEEE80211_HT_CAP_DELAY_BA 0x0400 #define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 /* 802.11n HT capability AMPDU settings */ #define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 #define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C @@ -723,11 +745,26 @@ struct ieee80211_ht_addt_info { #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 -/* MIMO Power Save Modes */ -#define WLAN_HT_CAP_MIMO_PS_STATIC 0 -#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 -#define WLAN_HT_CAP_MIMO_PS_INVALID 2 -#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 +/* block-ack parameters */ +#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 +#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 +#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 + +/* + * A-PMDU buffer sizes + * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) + */ +#define IEEE80211_MIN_AMPDU_BUF 0x8 +#define IEEE80211_MAX_AMPDU_BUF 0x40 + + +/* Spatial Multiplexing Power Save Modes */ +#define WLAN_HT_CAP_SM_PS_STATIC 0 +#define WLAN_HT_CAP_SM_PS_DYNAMIC 1 +#define WLAN_HT_CAP_SM_PS_INVALID 2 +#define WLAN_HT_CAP_SM_PS_DISABLED 3 /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 diff --git a/include/linux/if.h b/include/linux/if.h index 5c9d1fa93fe..65246846c84 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -24,6 +24,7 @@ #include <linux/compiler.h> /* for "__user" et al */ #define IFNAMSIZ 16 +#define IFALIASZ 256 #include <linux/hdlc/ioctl.h> /* Standard interface flags (netdevice->flags). */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index e157c1399b6..bf1a53b2682 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -56,6 +56,7 @@ #define ETH_P_DIAG 0x6005 /* DEC Diagnostics */ #define ETH_P_CUST 0x6006 /* DEC Customer use */ #define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */ +#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */ #define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */ #define ETH_P_ATALK 0x809B /* Appletalk DDP */ #define ETH_P_AARP 0x80F3 /* Appletalk AARP */ @@ -74,8 +75,10 @@ #define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport * over Ethernet */ +#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ /* * Non DIX types. Won't clash for 1500 types. @@ -99,6 +102,9 @@ #define ETH_P_ECONET 0x0018 /* Acorn Econet */ #define ETH_P_HDLC 0x0019 /* HDLC frames */ #define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */ +#define ETH_P_DSA 0x001B /* Distributed Switch Arch. */ +#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ +#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ /* * This is an Ethernet frame header. diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 84c3492ae5c..f9032c88716 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -79,6 +79,7 @@ enum IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO IFLA_NET_NS_PID, + IFLA_IFALIAS, __IFLA_MAX }; diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h new file mode 100644 index 00000000000..d70034bcec0 --- /dev/null +++ b/include/linux/if_phonet.h @@ -0,0 +1,19 @@ +/* + * File: if_phonet.h + * + * Phonet interface kernel definitions + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + */ +#ifndef LINUX_IF_PHONET_H +#define LINUX_IF_PHONET_H + +#define PHONET_MIN_MTU 6 /* pn_length = 0 */ +#define PHONET_MAX_MTU 65541 /* pn_length = 0xffff */ +#define PHONET_DEV_MTU PHONET_MAX_MTU + +#ifdef __KERNEL__ +extern struct header_ops phonet_header_ops; +#endif + +#endif diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 4c6307ad9fd..8529f57ba26 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -45,6 +45,7 @@ #define TUNGETFEATURES _IOR('T', 207, unsigned int) #define TUNSETOFFLOAD _IOW('T', 208, unsigned int) #define TUNSETTXFILTER _IOW('T', 209, unsigned int) +#define TUNGETIFF _IOR('T', 210, unsigned int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index d4efe401470..aeab2cb32a9 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -2,6 +2,7 @@ #define _IF_TUNNEL_H_ #include <linux/types.h> +#include <linux/ip.h> #define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) @@ -47,4 +48,22 @@ struct ip_tunnel_prl { /* PRL flags */ #define PRL_DEFAULT 0x0001 +enum +{ + IFLA_GRE_UNSPEC, + IFLA_GRE_LINK, + IFLA_GRE_IFLAGS, + IFLA_GRE_OFLAGS, + IFLA_GRE_IKEY, + IFLA_GRE_OKEY, + IFLA_GRE_LOCAL, + IFLA_GRE_REMOTE, + IFLA_GRE_TTL, + IFLA_GRE_TOS, + IFLA_GRE_PMTUDISC, + __IFLA_GRE_MAX, +}; + +#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1) + #endif /* _IF_TUNNEL_H_ */ diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 2baace2788a..31d8629e75a 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -18,7 +18,7 @@ struct ihex_binrec { __be32 addr; __be16 len; uint8_t data[0]; -} __attribute__((aligned(4))); +} __attribute__((packed)); /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * diff --git a/include/linux/in.h b/include/linux/in.h index 4065313cd7e..db458beef19 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -75,6 +75,7 @@ struct in_addr { #define IP_IPSEC_POLICY 16 #define IP_XFRM_POLICY 17 #define IP_PASSSEC 18 +#define IP_TRANSPARENT 19 /* BSD compatibility */ #define IP_RECVRETOPTS IP_RETOPTS diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index c6f51ad52d5..06fcdb45106 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -25,6 +25,7 @@ struct in_device struct in_ifaddr *ifa_list; /* IP ifaddr chain */ rwlock_t mc_list_lock; struct ip_mc_list *mc_list; /* IP multicast filter chain */ + int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; unsigned long mr_v1_seen; diff --git a/include/linux/init.h b/include/linux/init.h index 42ae95411a9..93538b696e3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; @@ -170,6 +171,13 @@ extern void (*late_time_init)(void); __attribute__((__section__(".initcall" level ".init"))) = fn /* + * Early initcalls run before initializing SMP. + * + * Only for built-in code, not modules. + */ +#define early_initcall(fn) __define_initcall("early",fn,early) + +/* * A "pure" initcall has no dependencies on anything else, and purely * initializes variables that couldn't be statically initialized. * diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 62aa4f895ab..58ff4e74b2f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -223,35 +223,6 @@ static inline int disable_irq_wake(unsigned int irq) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif -/* - * Temporary defines for UP kernels, until all code gets fixed. - */ -#ifndef CONFIG_SMP -static inline void __deprecated cli(void) -{ - local_irq_disable(); -} -static inline void __deprecated sti(void) -{ - local_irq_enable(); -} -static inline void __deprecated save_flags(unsigned long *x) -{ - local_save_flags(*x); -} -#define save_flags(x) save_flags(&x) -static inline void __deprecated restore_flags(unsigned long x) -{ - local_irq_restore(x); -} - -static inline void __deprecated save_and_cli(unsigned long *x) -{ - local_irq_save(*x); -} -#define save_and_cli(x) save_and_cli(&x) -#endif /* CONFIG_SMP */ - /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index c975caf7538..a6d0586e2bf 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h @@ -1,6 +1,20 @@ +#ifndef _LINUX_IOMMU_HELPER_H +#define _LINUX_IOMMU_HELPER_H + +static inline unsigned long iommu_device_max_index(unsigned long size, + unsigned long offset, + u64 dma_mask) +{ + if (size + offset > dma_mask) + return dma_mask - offset + 1; + else + return size; +} + extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, unsigned long shift, unsigned long boundary_size); +extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len); extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long shift, @@ -8,3 +22,5 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long align_mask); extern void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr); + +#endif diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 2cd07cc2968..ee9bcc6f32b 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -108,7 +108,11 @@ extern struct resource iomem_resource; extern int request_resource(struct resource *root, struct resource *new); extern int release_resource(struct resource *new); +extern void reserve_region_with_split(struct resource *root, + resource_size_t start, resource_size_t end, + const char *name); extern int insert_resource(struct resource *parent, struct resource *new); +extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); extern int allocate_resource(struct resource *root, struct resource *new, resource_size_t size, resource_size_t min, resource_size_t max, resource_size_t align, @@ -118,6 +122,10 @@ extern int allocate_resource(struct resource *root, struct resource *new, int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(struct resource *res) +{ + return res->end - res->start + 1; +} /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) @@ -154,9 +162,9 @@ extern struct resource * __devm_request_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n, const char *name); -#define devm_release_region(start,n) \ +#define devm_release_region(dev, start, n) \ __devm_release_region(dev, &ioport_resource, (start), (n)) -#define devm_release_mem_region(start,n) \ +#define devm_release_mem_region(dev, start, n) \ __devm_release_region(dev, &iomem_resource, (start), (n)) extern void __devm_release_region(struct device *dev, struct resource *parent, diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h new file mode 100644 index 00000000000..0f434a28fb5 --- /dev/null +++ b/include/linux/ip_vs.h @@ -0,0 +1,405 @@ +/* + * IP Virtual Server + * data structure and functionality definitions + */ + +#ifndef _IP_VS_H +#define _IP_VS_H + +#include <linux/types.h> /* For __beXX types in userland */ + +#define IP_VS_VERSION_CODE 0x010201 +#define NVERSION(version) \ + (version >> 16) & 0xFF, \ + (version >> 8) & 0xFF, \ + version & 0xFF + +/* + * Virtual Service Flags + */ +#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ +#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ + +/* + * Destination Server Flags + */ +#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ +#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ + +/* + * IPVS sync daemon states + */ +#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ +#define IP_VS_STATE_MASTER 0x0001 /* started as master */ +#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ + +/* + * IPVS socket options + */ +#define IP_VS_BASE_CTL (64+1024+64) /* base */ + +#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ +#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) +#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) +#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) +#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) +#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) +#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) +#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) +#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) +#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) +#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) +#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) +#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) +#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) +#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) +#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO + +#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL +#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) +#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) +#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) +#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) +#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ +#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) +#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) +#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON + + +/* + * IPVS Connection Flags + */ +#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ +#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ +#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ +#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ +#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ +#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ +#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ +#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ +#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ +#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ +#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ +#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ +#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ +#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ +#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ + +#define IP_VS_SCHEDNAME_MAXLEN 16 +#define IP_VS_IFNAME_MAXLEN 16 + + +/* + * The struct ip_vs_service_user and struct ip_vs_dest_user are + * used to set IPVS rules through setsockopt. + */ +struct ip_vs_service_user { + /* virtual service addresses */ + u_int16_t protocol; + __be32 addr; /* virtual ip address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* virtual service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout in sec */ + __be32 netmask; /* persistent netmask */ +}; + + +struct ip_vs_dest_user { + /* destination server address */ + __be32 addr; + __be16 port; + + /* real server options */ + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + /* thresholds for active connections */ + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ +}; + + +/* + * IPVS statistics object (for user space) + */ +struct ip_vs_stats_user +{ + __u32 conns; /* connections scheduled */ + __u32 inpkts; /* incoming packets */ + __u32 outpkts; /* outgoing packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outbytes; /* outgoing bytes */ + + __u32 cps; /* current connection rate */ + __u32 inpps; /* current in packet rate */ + __u32 outpps; /* current out packet rate */ + __u32 inbps; /* current in byte rate */ + __u32 outbps; /* current out byte rate */ +}; + + +/* The argument to IP_VS_SO_GET_INFO */ +struct ip_vs_getinfo { + /* version number */ + unsigned int version; + + /* size of connection hash table */ + unsigned int size; + + /* number of virtual services */ + unsigned int num_services; +}; + + +/* The argument to IP_VS_SO_GET_SERVICE */ +struct ip_vs_service_entry { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout */ + __be32 netmask; /* persistent netmask */ + + /* number of real servers */ + unsigned int num_dests; + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +struct ip_vs_dest_entry { + __be32 addr; /* destination address */ + __be16 port; + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ + + u_int32_t activeconns; /* active connections */ + u_int32_t inactconns; /* inactive connections */ + u_int32_t persistconns; /* persistent connections */ + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +/* The argument to IP_VS_SO_GET_DESTS */ +struct ip_vs_get_dests { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* number of real servers */ + unsigned int num_dests; + + /* the real servers */ + struct ip_vs_dest_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_SERVICES */ +struct ip_vs_get_services { + /* number of virtual services */ + unsigned int num_services; + + /* service table */ + struct ip_vs_service_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_TIMEOUT */ +struct ip_vs_timeout_user { + int tcp_timeout; + int tcp_fin_timeout; + int udp_timeout; +}; + + +/* The argument to IP_VS_SO_GET_DAEMON */ +struct ip_vs_daemon_user { + /* sync daemon state (master/backup) */ + int state; + + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + + /* SyncID we belong to */ + int syncid; +}; + +/* + * + * IPVS Generic Netlink interface definitions + * + */ + +/* Generic Netlink family info */ + +#define IPVS_GENL_NAME "IPVS" +#define IPVS_GENL_VERSION 0x1 + +struct ip_vs_flags { + __be32 flags; + __be32 mask; +}; + +/* Generic Netlink command attributes */ +enum { + IPVS_CMD_UNSPEC = 0, + + IPVS_CMD_NEW_SERVICE, /* add service */ + IPVS_CMD_SET_SERVICE, /* modify service */ + IPVS_CMD_DEL_SERVICE, /* delete service */ + IPVS_CMD_GET_SERVICE, /* get service info */ + + IPVS_CMD_NEW_DEST, /* add destination */ + IPVS_CMD_SET_DEST, /* modify destination */ + IPVS_CMD_DEL_DEST, /* delete destination */ + IPVS_CMD_GET_DEST, /* get destination info */ + + IPVS_CMD_NEW_DAEMON, /* start sync daemon */ + IPVS_CMD_DEL_DAEMON, /* stop sync daemon */ + IPVS_CMD_GET_DAEMON, /* get sync daemon status */ + + IPVS_CMD_SET_CONFIG, /* set config settings */ + IPVS_CMD_GET_CONFIG, /* get config settings */ + + IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */ + IPVS_CMD_GET_INFO, /* get general IPVS info */ + + IPVS_CMD_ZERO, /* zero all counters and stats */ + IPVS_CMD_FLUSH, /* flush services and dests */ + + __IPVS_CMD_MAX, +}; + +#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1) + +/* Attributes used in the first level of commands */ +enum { + IPVS_CMD_ATTR_UNSPEC = 0, + IPVS_CMD_ATTR_SERVICE, /* nested service attribute */ + IPVS_CMD_ATTR_DEST, /* nested destination attribute */ + IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */ + IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */ + IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */ + IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */ + __IPVS_CMD_ATTR_MAX, +}; + +#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) + +/* + * Attributes used to describe a service + * + * Used inside nested attribute IPVS_CMD_ATTR_SERVICE + */ +enum { + IPVS_SVC_ATTR_UNSPEC = 0, + IPVS_SVC_ATTR_AF, /* address family */ + IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */ + IPVS_SVC_ATTR_ADDR, /* virtual service address */ + IPVS_SVC_ATTR_PORT, /* virtual service port */ + IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */ + + IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */ + IPVS_SVC_ATTR_FLAGS, /* virtual service flags */ + IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */ + IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ + + IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ + __IPVS_SVC_ATTR_MAX, +}; + +#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1) + +/* + * Attributes used to describe a destination (real server) + * + * Used inside nested attribute IPVS_CMD_ATTR_DEST + */ +enum { + IPVS_DEST_ATTR_UNSPEC = 0, + IPVS_DEST_ATTR_ADDR, /* real server address */ + IPVS_DEST_ATTR_PORT, /* real server port */ + + IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */ + IPVS_DEST_ATTR_WEIGHT, /* destination weight */ + + IPVS_DEST_ATTR_U_THRESH, /* upper threshold */ + IPVS_DEST_ATTR_L_THRESH, /* lower threshold */ + + IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */ + IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */ + IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */ + + IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */ + __IPVS_DEST_ATTR_MAX, +}; + +#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1) + +/* + * Attributes describing a sync daemon + * + * Used inside nested attribute IPVS_CMD_ATTR_DAEMON + */ +enum { + IPVS_DAEMON_ATTR_UNSPEC = 0, + IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */ + IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */ + IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */ + __IPVS_DAEMON_ATTR_MAX, +}; + +#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1) + +/* + * Attributes used to describe service or destination entry statistics + * + * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS + */ +enum { + IPVS_STATS_ATTR_UNSPEC = 0, + IPVS_STATS_ATTR_CONNS, /* connections scheduled */ + IPVS_STATS_ATTR_INPKTS, /* incoming packets */ + IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */ + IPVS_STATS_ATTR_INBYTES, /* incoming bytes */ + IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */ + + IPVS_STATS_ATTR_CPS, /* current connection rate */ + IPVS_STATS_ATTR_INPPS, /* current in packet rate */ + IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */ + IPVS_STATS_ATTR_INBPS, /* current in byte rate */ + IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */ + __IPVS_STATS_ATTR_MAX, +}; + +#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1) + +/* Attributes used in response to IPVS_CMD_GET_INFO command */ +enum { + IPVS_INFO_ATTR_UNSPEC = 0, + IPVS_INFO_ATTR_VERSION, /* IPVS version number */ + IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */ + __IPVS_INFO_ATTR_MAX, +}; + +#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1) + +#endif /* _IP_VS_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 8ccb462ea42..8d9411bc60f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -62,6 +62,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ +#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h index 8687a7dc063..4c218ee7587 100644 --- a/include/linux/isdn_ppp.h +++ b/include/linux/isdn_ppp.h @@ -157,7 +157,7 @@ typedef struct { typedef struct { int mp_mrru; /* unused */ - struct sk_buff * frags; /* fragments sl list -- use skb->next */ + struct sk_buff_head frags; /* fragments sl list */ long frames; /* number of frames in the frame list */ unsigned int seq; /* last processed packet seq #: any packets * with smaller seq # will be dropped diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h index 794b8daa937..17ca64b5a66 100644 --- a/include/linux/ivtv.h +++ b/include/linux/ivtv.h @@ -21,11 +21,7 @@ #ifndef __LINUX_IVTV_H__ #define __LINUX_IVTV_H__ -#ifdef __KERNEL__ -#include <linux/compiler.h> /* need __user */ -#else -#define __user -#endif +#include <linux/compiler.h> #include <linux/types.h> /* ivtv knows several distinct output modes: MPEG streaming, diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h index e980ba62ddc..e20af47b59a 100644 --- a/include/linux/ivtvfb.h +++ b/include/linux/ivtvfb.h @@ -21,11 +21,7 @@ #ifndef __LINUX_IVTVFB_H__ #define __LINUX_IVTVFB_H__ -#ifdef __KERNEL__ -#include <linux/compiler.h> /* need __user */ -#else -#define __user -#endif +#include <linux/compiler.h> #include <linux/types.h> /* Framebuffer external API */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 3dd20900709..d2e91ea998f 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -850,7 +850,8 @@ struct journal_s */ struct block_device *j_dev; int j_blocksize; - unsigned long long j_blk_offset; + unsigned long long j_blk_offset; + char j_devname[BDEVNAME_SIZE+24]; /* * Device which holds the client fs. For internal journal this will be @@ -966,6 +967,9 @@ struct journal_s #define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */ #define JBD2_LOADED 0x010 /* The journal superblock has been loaded */ #define JBD2_BARRIER 0x020 /* Use IDE barriers */ +#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file + * data write error in ordered + * mode */ /* * Function declarations for the journaling transaction and buffer @@ -1059,7 +1063,7 @@ extern void jbd2_journal_clear_features (journal_t *, unsigned long, unsigned long, unsigned long); extern int jbd2_journal_create (journal_t *); extern int jbd2_journal_load (journal_t *journal); -extern void jbd2_journal_destroy (journal_t *); +extern int jbd2_journal_destroy (journal_t *); extern int jbd2_journal_recover (journal_t *journal); extern int jbd2_journal_wipe (journal_t *, int); extern int jbd2_journal_skip_recovery (journal_t *); diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index 6b563cae23d..da720bc3eb1 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h @@ -7,9 +7,6 @@ * * For licensing information, see the file 'LICENCE' in the * jffs2 directory. - * - * $Id: jffs2.h,v 1.38 2005/09/26 11:37:23 havasi Exp $ - * */ #ifndef __LINUX_JFFS2_H__ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 57aefa160a9..b9614488744 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -108,8 +108,7 @@ static inline void print_fn_descriptor_symbol(const char *fmt, void *addr) static inline void print_ip_sym(unsigned long ip) { - printk("[<%p>]", (void *) ip); - print_symbol(" %s\n", ip); + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index fdbbf72ca2e..75d81f157d2 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -75,6 +75,12 @@ extern const char linux_proc_banner[]; */ #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ #define KERN_CRIT "<2>" /* critical conditions */ @@ -102,6 +108,13 @@ struct completion; struct pt_regs; struct user; +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + /** * might_sleep - annotation for functions that can sleep * @@ -112,13 +125,6 @@ struct user; * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() -#else -# define might_resched() do { } while (0) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ @@ -176,7 +182,7 @@ extern int vsscanf(const char *, const char *, va_list) extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); -extern unsigned long long memparse(char *ptr, char **retptr); +extern unsigned long long memparse(const char *ptr, char **retptr); extern int core_kernel_text(unsigned long addr); extern int __kernel_text_address(unsigned long addr); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 3265968cd2c..17f76fc0517 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -25,8 +25,8 @@ #error KEXEC_CONTROL_MEMORY_LIMIT not defined #endif -#ifndef KEXEC_CONTROL_CODE_SIZE -#error KEXEC_CONTROL_CODE_SIZE not defined +#ifndef KEXEC_CONTROL_PAGE_SIZE +#error KEXEC_CONTROL_PAGE_SIZE not defined #endif #ifndef KEXEC_ARCH @@ -83,6 +83,7 @@ struct kimage { unsigned long start; struct page *control_code_page; + struct page *swap_page; unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; @@ -98,18 +99,20 @@ struct kimage { unsigned int type : 1; #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 + unsigned int preserve_context : 1; }; /* kexec interface functions */ -extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; +extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); extern asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, unsigned long flags); +extern int kernel_kexec(void); #ifdef CONFIG_COMPAT extern asmlinkage long compat_sys_kexec_load(unsigned long entry, unsigned long nr_segments, @@ -127,8 +130,8 @@ void vmcoreinfo_append_str(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); unsigned long paddr_vmcoreinfo_note(void); -#define VMCOREINFO_OSRELEASE(name) \ - vmcoreinfo_append_str("OSRELEASE=%s\n", #name) +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image; #define kexec_flush_icache_page(page) #endif -#define KEXEC_ON_CRASH 0x00000001 -#define KEXEC_ARCH_MASK 0xffff0000 +#define KEXEC_ON_CRASH 0x00000001 +#define KEXEC_PRESERVE_CONTEXT 0x00000002 +#define KEXEC_ARCH_MASK 0xffff0000 /* These values match the ELF architecture values. * Unless there is a good reason that should continue to be the case. @@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image; #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) -#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ +/* List of defined/legal kexec flags */ +#ifndef CONFIG_KEXEC_JUMP +#define KEXEC_FLAGS KEXEC_ON_CRASH +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#endif #define VMCOREINFO_BYTES (4096) #define VMCOREINFO_NOTE_NAME "VMCOREINFO" diff --git a/include/linux/key.h b/include/linux/key.h index c45c962d1cc..1b70e35a71e 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -299,6 +299,7 @@ extern void key_init(void); #define key_validate(k) 0 #define key_serial(k) 0 #define key_get(k) ({ NULL; }) +#define key_revoke(k) do { } while(0) #define key_put(k) do { } while(0) #define key_ref_put(k) do { } while(0) #define make_key_ref(k, p) ({ NULL; }) diff --git a/include/linux/klist.h b/include/linux/klist.h index 06c338ef7f1..8ea98db223e 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -38,7 +38,7 @@ extern void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)); struct klist_node { - struct klist *n_klist; + void *n_klist; /* never access directly */ struct list_head n_node; struct kref n_ref; struct completion n_removed; @@ -57,7 +57,6 @@ extern int klist_node_attached(struct klist_node *n); struct klist_iter { struct klist *i_klist; - struct list_head *i_head; struct klist_node *i_cur; }; diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 0ea064cbfbc..70a30651cd1 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -320,12 +320,12 @@ struct kvm_trace_rec { struct { __u64 cycle_u64; __u32 extra_u32[KVM_TRC_EXTRA_MAX]; - } cycle; + } __attribute__((packed)) cycle; struct { __u32 extra_u32[KVM_TRC_EXTRA_MAX]; } nocycle; } u; -} __attribute__((packed)); +}; #define KVMIO 0xAE @@ -371,6 +371,7 @@ struct kvm_trace_rec { #define KVM_CAP_PV_MMU 13 #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 +#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ /* * ioctls for VM fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 07d68a8ae8e..8525afc5310 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -121,6 +121,12 @@ struct kvm { struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; #endif + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif }; /* The guest did something we don't support. */ @@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) #define kvm_trace_cleanup() ((void)0) #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return 1; + /* + * Both reads happen under the mmu_lock and both values are + * modified under mmu_lock, so there's no need of smb_rmb() + * here in between, otherwise mmu_notifier_count should be + * read before mmu_notifier_seq, see + * mmu_notifier_invalidate_range_end write side. + */ + if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + #endif diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 173febac665..c67fecafff9 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/notifier.h> +#include <linux/fb.h> /* Notes on locking: * @@ -45,6 +46,8 @@ struct lcd_ops { int (*get_contrast)(struct lcd_device *); /* Set LCD panel contrast */ int (*set_contrast)(struct lcd_device *, int contrast); + /* Set LCD panel mode (resolutions ...) */ + int (*set_mode)(struct lcd_device *, struct fb_videomode *); /* Check if given framebuffer device is the one LCD is bound to; return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ int (*check_fb)(struct lcd_device *, struct fb_info *); diff --git a/include/linux/libata.h b/include/linux/libata.h index 5b247b8a6b3..947cf84e555 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -60,9 +60,9 @@ /* note: prints function name for you */ #ifdef ATA_DEBUG -#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #ifdef ATA_VERBOSE_DEBUG -#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define VPRINTK(fmt, args...) #endif /* ATA_VERBOSE_DEBUG */ @@ -71,7 +71,7 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ -#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) /* NEW: debug levels */ #define HAVE_LIBATA_MSG 1 @@ -146,6 +146,7 @@ enum { ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ + ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ ATA_DFLAG_INIT_MASK = (1 << 24) - 1, ATA_DFLAG_DETACH = (1 << 24), @@ -163,6 +164,7 @@ enum { ATA_DEV_NONE = 9, /* no device */ /* struct ata_link flags */ + ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ @@ -243,6 +245,7 @@ enum { ATA_TMOUT_BOOT = 30000, /* heuristic */ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ ATA_TMOUT_INTERNAL_QUICK = 5000, + ATA_TMOUT_MAX_PARK = 30000, /* FIXME: GoVault needs 2s but we can't afford that without * parallel probing. 800ms is enough for iVDR disk @@ -318,8 +321,11 @@ enum { ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, ATA_EH_ENABLE_LINK = (1 << 3), ATA_EH_LPM = (1 << 4), /* link power management action */ + ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ - ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, + ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | + ATA_EH_ENABLE_LINK | ATA_EH_LPM, /* ata_eh_info->flags */ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ @@ -451,6 +457,7 @@ enum link_pm { MEDIUM_POWER, }; extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_unload_heads; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; extern struct device_attribute dev_attr_sw_activity; @@ -553,8 +560,8 @@ struct ata_ering { struct ata_device { struct ata_link *link; unsigned int devno; /* 0 or 1 */ - unsigned long flags; /* ATA_DFLAG_xxx */ unsigned int horkage; /* List of broken features */ + unsigned long flags; /* ATA_DFLAG_xxx */ struct scsi_device *sdev; /* attached SCSI device */ #ifdef CONFIG_ATA_ACPI acpi_handle acpi_handle; @@ -563,6 +570,7 @@ struct ata_device { /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ u64 n_sectors; /* size of device, if ATA */ unsigned int class; /* ATA_DEV_xxx */ + unsigned long unpark_deadline; u8 pio_mode; u8 dma_mode; @@ -620,6 +628,7 @@ struct ata_eh_context { [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; unsigned int classes[ATA_MAX_DEVICES]; unsigned int did_probe_mask; + unsigned int unloaded_mask; unsigned int saved_ncq_enabled; u8 saved_xfer_mode[ATA_MAX_DEVICES]; /* timestamp for the last reset attempt or success */ @@ -646,6 +655,7 @@ struct ata_link { unsigned int flags; /* ATA_LFLAG_xxx */ + u32 saved_scontrol; /* SControl on probe */ unsigned int hw_sata_spd_limit; unsigned int sata_spd_limit; unsigned int sata_spd; /* current SATA PHY speed */ @@ -686,7 +696,8 @@ struct ata_port { unsigned int qc_active; int nr_active_links; /* #links with active qcs */ - struct ata_link link; /* host default link */ + struct ata_link link; /* host default link */ + struct ata_link *slave_link; /* see ata_slave_link_init() */ int nr_pmp_links; /* nr of available PMP links */ struct ata_link *pmp_link; /* array of PMP links */ @@ -707,6 +718,7 @@ struct ata_port { struct list_head eh_done_q; wait_queue_head_t eh_wait_q; int eh_tries; + struct completion park_req_pending; pm_message_t pm_mesg; int *pm_result; @@ -750,6 +762,7 @@ struct ata_port_operations { void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); + unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); void (*dev_config)(struct ata_device *dev); @@ -769,8 +782,8 @@ struct ata_port_operations { /* * Optional features */ - int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); - int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); + int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val); + int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); void (*pmp_attach)(struct ata_port *ap); void (*pmp_detach)(struct ata_port *ap); int (*enable_pm)(struct ata_port *ap, enum link_pm policy); @@ -892,6 +905,7 @@ extern void ata_port_disable(struct ata_port *); extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports); +extern int ata_slave_link_init(struct ata_port *ap); extern int ata_host_start(struct ata_host *host); extern int ata_host_register(struct ata_host *host, struct scsi_host_template *sht); @@ -917,8 +931,8 @@ extern int sata_scr_valid(struct ata_link *link); extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_scr_write(struct ata_link *link, int reg, u32 val); extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); -extern int ata_link_online(struct ata_link *link); -extern int ata_link_offline(struct ata_link *link); +extern bool ata_link_online(struct ata_link *link); +extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); extern void ata_host_resume(struct ata_host *host); @@ -951,6 +965,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); +extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, @@ -1093,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap); */ extern const struct ata_port_operations ata_base_port_ops; extern const struct ata_port_operations sata_port_ops; +extern struct device_attribute *ata_common_sdev_attrs[]; #define ATA_BASE_SHT(drv_name) \ .module = THIS_MODULE, \ @@ -1107,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops; .proc_name = drv_name, \ .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ - .bios_param = ata_std_bios_param + .bios_param = ata_std_bios_param, \ + .sdev_attrs = ata_common_sdev_attrs #define ATA_NCQ_SHT(drv_name) \ ATA_BASE_SHT(drv_name), \ @@ -1129,7 +1147,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) static inline int ata_is_host_link(const struct ata_link *link) { - return link == &link->ap->link; + return link == &link->ap->link || link == link->ap->slave_link; } #else /* CONFIG_SATA_PMP */ static inline bool sata_pmp_supported(struct ata_port *ap) @@ -1162,7 +1180,7 @@ static inline int sata_srst_pmp(struct ata_link *link) printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) #define ata_link_printk(link, lv, fmt, args...) do { \ - if (sata_pmp_attached((link)->ap)) \ + if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \ printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ (link)->pmp , ##args); \ else \ @@ -1260,34 +1278,17 @@ static inline int ata_link_active(struct ata_link *link) return ata_tag_valid(link->active_tag) || link->sactive; } -static inline struct ata_link *ata_port_first_link(struct ata_port *ap) -{ - if (sata_pmp_attached(ap)) - return ap->pmp_link; - return &ap->link; -} - -static inline struct ata_link *ata_port_next_link(struct ata_link *link) -{ - struct ata_port *ap = link->ap; - - if (ata_is_host_link(link)) { - if (!sata_pmp_attached(ap)) - return NULL; - return ap->pmp_link; - } - - if (++link < ap->nr_pmp_links + ap->pmp_link) - return link; - return NULL; -} +extern struct ata_link *__ata_port_next_link(struct ata_port *ap, + struct ata_link *link, + bool dev_only); -#define __ata_port_for_each_link(lk, ap) \ - for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk)) +#define __ata_port_for_each_link(link, ap) \ + for ((link) = __ata_port_next_link((ap), NULL, false); (link); \ + (link) = __ata_port_next_link((ap), (link), false)) #define ata_port_for_each_link(link, ap) \ - for ((link) = ata_port_first_link(ap); (link); \ - (link) = ata_port_next_link(link)) + for ((link) = __ata_port_next_link((ap), NULL, true); (link); \ + (link) = __ata_port_next_link((ap), (link), true)) #define ata_link_for_each_dev(dev, link) \ for ((dev) = (link)->device; \ @@ -1424,6 +1425,28 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies, return from_jiffies + msecs_to_jiffies(timeout_msecs); } +/* Don't open code these in drivers as there are traps. Firstly the range may + change in future hardware and specs, secondly 0xFF means 'no DMA' but is + > UDMA_0. Dyma ddreigiau */ + +static inline int ata_using_mwdma(struct ata_device *adev) +{ + if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) + return 1; + return 0; +} + +static inline int ata_using_udma(struct ata_device *adev) +{ + if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) + return 1; + return 0; +} + +static inline int ata_dma_enabled(struct ata_device *adev) +{ + return (adev->dma_mode == 0xFF ? 0 : 1); +} /************************************************************************** * PMP - drivers/ata/libata-pmp.c diff --git a/include/linux/list.h b/include/linux/list.h index 453916bc041..969f6e92d08 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -214,22 +214,62 @@ static inline int list_is_singular(const struct list_head *head) return !list_empty(head) && (head->next == head->prev); } +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + static inline void __list_splice(const struct list_head *list, - struct list_head *head) + struct list_head *prev, + struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - first->prev = head; - head->next = first; + first->prev = prev; + prev->next = first; - last->next = at; - at->prev = last; + last->next = next; + next->prev = last; } /** - * list_splice - join two lists + * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ @@ -237,7 +277,19 @@ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) - __list_splice(list, head); + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); } /** @@ -251,7 +303,24 @@ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { - __list_splice(list, head); + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } @@ -550,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n, next->next->pprev = &next->next; } +/* + * Move a list from one list head to another. Fixup the pprev + * reference of the first entry if it exists. + */ +static inline void hlist_move_list(struct hlist_head *old, + struct hlist_head *new) +{ + new->first = old->first; + if (new->first) + new->first->pprev = &new->first; + old->first = NULL; +} + #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf..331e5f1c2d8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -89,6 +89,7 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; + unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: @@ -189,6 +190,14 @@ struct lock_chain { u64 chain_key; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -205,14 +214,14 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; - struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - + struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -226,11 +235,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - int irq_context; - int trylock; - int read; - int check; - int hardirqs_off; + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; }; /* @@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip); + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -313,8 +326,9 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) @@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# else +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# endif +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define lock_map_acquire(l) do { } while (0) +# define lock_map_release(l) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h new file mode 100644 index 00000000000..6b71d2dce50 --- /dev/null +++ b/include/linux/mISDNdsp.h @@ -0,0 +1,37 @@ +#ifndef __mISDNdsp_H__ +#define __mISDNdsp_H__ + +struct mISDN_dsp_element_arg { + char *name; + char *def; + char *desc; +}; + +struct mISDN_dsp_element { + char *name; + void *(*new)(const char *arg); + void (*free)(void *p); + void (*process_tx)(void *p, unsigned char *data, int len); + void (*process_rx)(void *p, unsigned char *data, int len); + int num_args; + struct mISDN_dsp_element_arg + *args; +}; + +extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem); +extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); + +struct dsp_features { + int hfc_id; /* unique id to identify the chip (or -1) */ + int hfc_dtmf; /* set if HFCmulti card supports dtmf */ + int hfc_loops; /* set if card supports tone loops */ + int hfc_echocanhw; /* set if card supports echocancelation*/ + int pcm_id; /* unique id to identify the pcm bus (or -1) */ + int pcm_slots; /* number of slots on the pcm bus */ + int pcm_banks; /* number of IO banks of pcm bus */ + int unclocked; /* data is not clocked (has jitter/loss) */ + int unordered; /* data is unordered (packets have index) */ +}; + +#endif + diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h new file mode 100644 index 00000000000..e794dfb8750 --- /dev/null +++ b/include/linux/mISDNhw.h @@ -0,0 +1,193 @@ +/* + * + * Author Karsten Keil <kkeil@novell.com> + * + * Basic declarations for the mISDN HW channels + * + * Copyright 2008 by Karsten Keil <kkeil@novell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MISDNHW_H +#define MISDNHW_H +#include <linux/mISDNif.h> +#include <linux/timer.h> + +/* + * HW DEBUG 0xHHHHGGGG + * H - hardware driver specific bits + * G - for all drivers + */ + +#define DEBUG_HW 0x00000001 +#define DEBUG_HW_OPEN 0x00000002 +#define DEBUG_HW_DCHANNEL 0x00000100 +#define DEBUG_HW_DFIFO 0x00000200 +#define DEBUG_HW_BCHANNEL 0x00001000 +#define DEBUG_HW_BFIFO 0x00002000 + +#define MAX_DFRAME_LEN_L1 300 +#define MAX_MON_FRAME 32 +#define MAX_LOG_SPACE 2048 +#define MISDN_COPY_SIZE 32 + +/* channel->Flags bit field */ +#define FLG_TX_BUSY 0 /* tx_buf in use */ +#define FLG_TX_NEXT 1 /* next_skb in use */ +#define FLG_L1_BUSY 2 /* L1 is permanent busy */ +#define FLG_L2_ACTIVATED 3 /* activated from L2 */ +#define FLG_OPEN 5 /* channel is in use */ +#define FLG_ACTIVE 6 /* channel is activated */ +#define FLG_BUSY_TIMER 7 +/* channel type */ +#define FLG_DCHANNEL 8 /* channel is D-channel */ +#define FLG_BCHANNEL 9 /* channel is B-channel */ +#define FLG_ECHANNEL 10 /* channel is E-channel */ +#define FLG_TRANSPARENT 12 /* channel use transparent data */ +#define FLG_HDLC 13 /* channel use hdlc data */ +#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ +#define FLG_ORIGIN 15 /* channel is on origin site */ +/* channel specific stuff */ +/* arcofi specific */ +#define FLG_ARCOFI_TIMER 16 +#define FLG_ARCOFI_ERROR 17 +/* isar specific */ +#define FLG_INITIALIZED 16 +#define FLG_DLEETX 17 +#define FLG_LASTDLE 18 +#define FLG_FIRST 19 +#define FLG_LASTDATA 20 +#define FLG_NMD_DATA 21 +#define FLG_FTI_RUN 22 +#define FLG_LL_OK 23 +#define FLG_LL_CONN 24 +#define FLG_DTMFSEND 25 + +/* workq events */ +#define FLG_RECVQUEUE 30 +#define FLG_PHCHANGE 31 + +#define schedule_event(s, ev) do { \ + test_and_set_bit(ev, &((s)->Flags)); \ + schedule_work(&((s)->workq)); \ + } while (0) + +struct dchannel { + struct mISDNdevice dev; + u_long Flags; + struct work_struct workq; + void (*phfunc) (struct dchannel *); + u_int state; + void *l1; + /* HW access */ + u_char (*read_reg) (void *, u_char); + void (*write_reg) (void *, u_char, u_char); + void (*read_fifo) (void *, u_char *, int); + void (*write_fifo) (void *, u_char *, int); + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff_head squeue; + struct sk_buff_head rqueue; + struct sk_buff *tx_skb; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +typedef int (dchannel_l1callback)(struct dchannel *, u_int); +extern int create_l1(struct dchannel *, dchannel_l1callback *); + +/* private L1 commands */ +#define INFO0 0x8002 +#define INFO1 0x8102 +#define INFO2 0x8202 +#define INFO3_P8 0x8302 +#define INFO3_P10 0x8402 +#define INFO4_P8 0x8502 +#define INFO4_P10 0x8602 +#define LOSTFRAMING 0x8702 +#define ANYSIGNAL 0x8802 +#define HW_POWERDOWN 0x8902 +#define HW_RESET_REQ 0x8a02 +#define HW_POWERUP_REQ 0x8b02 +#define HW_DEACT_REQ 0x8c02 +#define HW_ACTIVATE_REQ 0x8e02 +#define HW_D_NOBLOCKED 0x8f02 +#define HW_RESET_IND 0x9002 +#define HW_POWERUP_IND 0x9102 +#define HW_DEACT_IND 0x9202 +#define HW_ACTIVATE_IND 0x9302 +#define HW_DEACT_CNF 0x9402 +#define HW_TESTLOOP 0x9502 +#define HW_TESTRX_RAW 0x9602 +#define HW_TESTRX_HDLC 0x9702 +#define HW_TESTRX_OFF 0x9802 + +struct layer1; +extern int l1_event(struct layer1 *, u_int); + + +struct bchannel { + struct mISDNchannel ch; + int nr; + u_long Flags; + struct work_struct workq; + u_int state; + /* HW access */ + u_char (*read_reg) (void *, u_char); + void (*write_reg) (void *, u_char, u_char); + void (*read_fifo) (void *, u_char *, int); + void (*write_fifo) (void *, u_char *, int); + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff *next_skb; + struct sk_buff *tx_skb; + struct sk_buff_head rqueue; + int rcount; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +extern int mISDN_initdchannel(struct dchannel *, int, void *); +extern int mISDN_initbchannel(struct bchannel *, int); +extern int mISDN_freedchannel(struct dchannel *); +extern int mISDN_freebchannel(struct bchannel *); +extern void queue_ch_frame(struct mISDNchannel *, u_int, + int, struct sk_buff *); +extern int dchannel_senddata(struct dchannel *, struct sk_buff *); +extern int bchannel_senddata(struct bchannel *, struct sk_buff *); +extern void recv_Dchannel(struct dchannel *); +extern void recv_Bchannel(struct bchannel *); +extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); +extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); +extern void confirm_Bsend(struct bchannel *bch); +extern int get_next_bframe(struct bchannel *); +extern int get_next_dframe(struct dchannel *); + +#endif diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h new file mode 100644 index 00000000000..8f2d60da04e --- /dev/null +++ b/include/linux/mISDNif.h @@ -0,0 +1,509 @@ +/* + * + * Author Karsten Keil <kkeil@novell.com> + * + * Copyright 2008 by Karsten Keil <kkeil@novell.com> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE + * version 2.1 as published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU LESSER GENERAL PUBLIC LICENSE for more details. + * + */ + +#ifndef mISDNIF_H +#define mISDNIF_H + +#include <stdarg.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/socket.h> + +/* + * ABI Version 32 bit + * + * <8 bit> Major version + * - changed if any interface become backwards incompatible + * + * <8 bit> Minor version + * - changed if any interface is extended but backwards compatible + * + * <16 bit> Release number + * - should be incremented on every checkin + */ +#define MISDN_MAJOR_VERSION 1 +#define MISDN_MINOR_VERSION 0 +#define MISDN_RELEASE 19 + +/* primitives for information exchange + * generell format + * <16 bit 0 > + * <8 bit command> + * BIT 8 = 1 LAYER private + * BIT 7 = 1 answer + * BIT 6 = 1 DATA + * <8 bit target layer mask> + * + * Layer = 00 is reserved for general commands + Layer = 01 L2 -> HW + Layer = 02 HW -> L2 + Layer = 04 L3 -> L2 + Layer = 08 L2 -> L3 + * Layer = FF is reserved for broadcast commands + */ + +#define MISDN_CMDMASK 0xff00 +#define MISDN_LAYERMASK 0x00ff + +/* generell commands */ +#define OPEN_CHANNEL 0x0100 +#define CLOSE_CHANNEL 0x0200 +#define CONTROL_CHANNEL 0x0300 +#define CHECK_DATA 0x0400 + +/* layer 2 -> layer 1 */ +#define PH_ACTIVATE_REQ 0x0101 +#define PH_DEACTIVATE_REQ 0x0201 +#define PH_DATA_REQ 0x2001 +#define MPH_ACTIVATE_REQ 0x0501 +#define MPH_DEACTIVATE_REQ 0x0601 +#define MPH_INFORMATION_REQ 0x0701 +#define PH_CONTROL_REQ 0x0801 + +/* layer 1 -> layer 2 */ +#define PH_ACTIVATE_IND 0x0102 +#define PH_ACTIVATE_CNF 0x4102 +#define PH_DEACTIVATE_IND 0x0202 +#define PH_DEACTIVATE_CNF 0x4202 +#define PH_DATA_IND 0x2002 +#define MPH_ACTIVATE_IND 0x0502 +#define MPH_DEACTIVATE_IND 0x0602 +#define MPH_INFORMATION_IND 0x0702 +#define PH_DATA_CNF 0x6002 +#define PH_CONTROL_IND 0x0802 +#define PH_CONTROL_CNF 0x4802 + +/* layer 3 -> layer 2 */ +#define DL_ESTABLISH_REQ 0x1004 +#define DL_RELEASE_REQ 0x1104 +#define DL_DATA_REQ 0x3004 +#define DL_UNITDATA_REQ 0x3104 +#define DL_INFORMATION_REQ 0x0004 + +/* layer 2 -> layer 3 */ +#define DL_ESTABLISH_IND 0x1008 +#define DL_ESTABLISH_CNF 0x5008 +#define DL_RELEASE_IND 0x1108 +#define DL_RELEASE_CNF 0x5108 +#define DL_DATA_IND 0x3008 +#define DL_UNITDATA_IND 0x3108 +#define DL_INFORMATION_IND 0x0008 + +/* intern layer 2 managment */ +#define MDL_ASSIGN_REQ 0x1804 +#define MDL_ASSIGN_IND 0x1904 +#define MDL_REMOVE_REQ 0x1A04 +#define MDL_REMOVE_IND 0x1B04 +#define MDL_STATUS_UP_IND 0x1C04 +#define MDL_STATUS_DOWN_IND 0x1D04 +#define MDL_STATUS_UI_IND 0x1E04 +#define MDL_ERROR_IND 0x1F04 +#define MDL_ERROR_RSP 0x5F04 + +/* DL_INFORMATION_IND types */ +#define DL_INFO_L2_CONNECT 0x0001 +#define DL_INFO_L2_REMOVED 0x0002 + +/* PH_CONTROL types */ +/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ +#define DTMF_TONE_VAL 0x2000 +#define DTMF_TONE_MASK 0x007F +#define DTMF_TONE_START 0x2100 +#define DTMF_TONE_STOP 0x2200 +#define DTMF_HFC_COEF 0x4000 +#define DSP_CONF_JOIN 0x2403 +#define DSP_CONF_SPLIT 0x2404 +#define DSP_RECEIVE_OFF 0x2405 +#define DSP_RECEIVE_ON 0x2406 +#define DSP_ECHO_ON 0x2407 +#define DSP_ECHO_OFF 0x2408 +#define DSP_MIX_ON 0x2409 +#define DSP_MIX_OFF 0x240a +#define DSP_DELAY 0x240b +#define DSP_JITTER 0x240c +#define DSP_TXDATA_ON 0x240d +#define DSP_TXDATA_OFF 0x240e +#define DSP_TX_DEJITTER 0x240f +#define DSP_TX_DEJ_OFF 0x2410 +#define DSP_TONE_PATT_ON 0x2411 +#define DSP_TONE_PATT_OFF 0x2412 +#define DSP_VOL_CHANGE_TX 0x2413 +#define DSP_VOL_CHANGE_RX 0x2414 +#define DSP_BF_ENABLE_KEY 0x2415 +#define DSP_BF_DISABLE 0x2416 +#define DSP_BF_ACCEPT 0x2416 +#define DSP_BF_REJECT 0x2417 +#define DSP_PIPELINE_CFG 0x2418 +#define HFC_VOL_CHANGE_TX 0x2601 +#define HFC_VOL_CHANGE_RX 0x2602 +#define HFC_SPL_LOOP_ON 0x2603 +#define HFC_SPL_LOOP_OFF 0x2604 + +/* DSP_TONE_PATT_ON parameter */ +#define TONE_OFF 0x0000 +#define TONE_GERMAN_DIALTONE 0x0001 +#define TONE_GERMAN_OLDDIALTONE 0x0002 +#define TONE_AMERICAN_DIALTONE 0x0003 +#define TONE_GERMAN_DIALPBX 0x0004 +#define TONE_GERMAN_OLDDIALPBX 0x0005 +#define TONE_AMERICAN_DIALPBX 0x0006 +#define TONE_GERMAN_RINGING 0x0007 +#define TONE_GERMAN_OLDRINGING 0x0008 +#define TONE_AMERICAN_RINGPBX 0x000b +#define TONE_GERMAN_RINGPBX 0x000c +#define TONE_GERMAN_OLDRINGPBX 0x000d +#define TONE_AMERICAN_RINGING 0x000e +#define TONE_GERMAN_BUSY 0x000f +#define TONE_GERMAN_OLDBUSY 0x0010 +#define TONE_AMERICAN_BUSY 0x0011 +#define TONE_GERMAN_HANGUP 0x0012 +#define TONE_GERMAN_OLDHANGUP 0x0013 +#define TONE_AMERICAN_HANGUP 0x0014 +#define TONE_SPECIAL_INFO 0x0015 +#define TONE_GERMAN_GASSENBESETZT 0x0016 +#define TONE_GERMAN_AUFSCHALTTON 0x0016 + +/* MPH_INFORMATION_IND */ +#define L1_SIGNAL_LOS_OFF 0x0010 +#define L1_SIGNAL_LOS_ON 0x0011 +#define L1_SIGNAL_AIS_OFF 0x0012 +#define L1_SIGNAL_AIS_ON 0x0013 +#define L1_SIGNAL_RDI_OFF 0x0014 +#define L1_SIGNAL_RDI_ON 0x0015 +#define L1_SIGNAL_SLIP_RX 0x0020 +#define L1_SIGNAL_SLIP_TX 0x0021 + +/* + * protocol ids + * D channel 1-31 + * B channel 33 - 63 + */ + +#define ISDN_P_NONE 0 +#define ISDN_P_BASE 0 +#define ISDN_P_TE_S0 0x01 +#define ISDN_P_NT_S0 0x02 +#define ISDN_P_TE_E1 0x03 +#define ISDN_P_NT_E1 0x04 +#define ISDN_P_LAPD_TE 0x10 +#define ISDN_P_LAPD_NT 0x11 + +#define ISDN_P_B_MASK 0x1f +#define ISDN_P_B_START 0x20 + +#define ISDN_P_B_RAW 0x21 +#define ISDN_P_B_HDLC 0x22 +#define ISDN_P_B_X75SLP 0x23 +#define ISDN_P_B_L2DTMF 0x24 +#define ISDN_P_B_L2DSP 0x25 +#define ISDN_P_B_L2DSPHDLC 0x26 + +#define OPTION_L2_PMX 1 +#define OPTION_L2_PTP 2 +#define OPTION_L2_FIXEDTEI 3 +#define OPTION_L2_CLEANUP 4 + +/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ +#define MISDN_MAX_IDLEN 20 + +struct mISDNhead { + unsigned int prim; + unsigned int id; +} __attribute__((packed)); + +#define MISDN_HEADER_LEN sizeof(struct mISDNhead) +#define MAX_DATA_SIZE 2048 +#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) +#define MAX_DFRAME_LEN 260 + +#define MISDN_ID_ADDR_MASK 0xFFFF +#define MISDN_ID_TEI_MASK 0xFF00 +#define MISDN_ID_SAPI_MASK 0x00FF +#define MISDN_ID_TEI_ANY 0x7F00 + +#define MISDN_ID_ANY 0xFFFF +#define MISDN_ID_NONE 0xFFFE + +#define GROUP_TEI 127 +#define TEI_SAPI 63 +#define CTRL_SAPI 0 + +#define MISDN_MAX_CHANNEL 127 +#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) + +#define SOL_MISDN 0 + +struct sockaddr_mISDN { + sa_family_t family; + unsigned char dev; + unsigned char channel; + unsigned char sapi; + unsigned char tei; +}; + +/* timer device ioctl */ +#define IMADDTIMER _IOR('I', 64, int) +#define IMDELTIMER _IOR('I', 65, int) +/* socket ioctls */ +#define IMGETVERSION _IOR('I', 66, int) +#define IMGETCOUNT _IOR('I', 67, int) +#define IMGETDEVINFO _IOR('I', 68, int) +#define IMCTRLREQ _IOR('I', 69, int) +#define IMCLEAR_L2 _IOR('I', 70, int) + +struct mISDNversion { + unsigned char major; + unsigned char minor; + unsigned short release; +}; + +struct mISDN_devinfo { + u_int id; + u_int Dprotocols; + u_int Bprotocols; + u_int protocol; + u_char channelmap[MISDN_CHMAP_SIZE]; + u_int nrbchan; + char name[MISDN_MAX_IDLEN]; +}; + +static inline int +test_channelmap(u_int nr, u_char *map) +{ + if (nr <= MISDN_MAX_CHANNEL) + return map[nr >> 3] & (1 << (nr & 7)); + else + return 0; +} + +static inline void +set_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] |= (1 << (nr & 7)); +} + +static inline void +clear_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] &= ~(1 << (nr & 7)); +} + +/* CONTROL_CHANNEL parameters */ +#define MISDN_CTRL_GETOP 0x0000 +#define MISDN_CTRL_LOOP 0x0001 +#define MISDN_CTRL_CONNECT 0x0002 +#define MISDN_CTRL_DISCONNECT 0x0004 +#define MISDN_CTRL_PCMCONNECT 0x0010 +#define MISDN_CTRL_PCMDISCONNECT 0x0020 +#define MISDN_CTRL_SETPEER 0x0040 +#define MISDN_CTRL_UNSETPEER 0x0080 +#define MISDN_CTRL_RX_OFF 0x0100 +#define MISDN_CTRL_HW_FEATURES_OP 0x2000 +#define MISDN_CTRL_HW_FEATURES 0x2001 +#define MISDN_CTRL_HFC_OP 0x4000 +#define MISDN_CTRL_HFC_PCM_CONN 0x4001 +#define MISDN_CTRL_HFC_PCM_DISC 0x4002 +#define MISDN_CTRL_HFC_CONF_JOIN 0x4003 +#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 +#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 +#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 +#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 +#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 + + +/* socket options */ +#define MISDN_TIME_STAMP 0x0001 + +struct mISDN_ctrl_req { + int op; + int channel; + int p1; + int p2; +}; + +/* muxer options */ +#define MISDN_OPT_ALL 1 +#define MISDN_OPT_TEIMGR 2 + +#ifdef __KERNEL__ +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/net.h> +#include <net/sock.h> +#include <linux/completion.h> + +#define DEBUG_CORE 0x000000ff +#define DEBUG_CORE_FUNC 0x00000002 +#define DEBUG_SOCKET 0x00000004 +#define DEBUG_MANAGER 0x00000008 +#define DEBUG_SEND_ERR 0x00000010 +#define DEBUG_MSG_THREAD 0x00000020 +#define DEBUG_QUEUE_FUNC 0x00000040 +#define DEBUG_L1 0x0000ff00 +#define DEBUG_L1_FSM 0x00000200 +#define DEBUG_L2 0x00ff0000 +#define DEBUG_L2_FSM 0x00020000 +#define DEBUG_L2_CTRL 0x00040000 +#define DEBUG_L2_RECV 0x00080000 +#define DEBUG_L2_TEI 0x00100000 +#define DEBUG_L2_TEIFSM 0x00200000 +#define DEBUG_TIMER 0x01000000 + +#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) +#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) +#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) + +/* socket states */ +#define MISDN_OPEN 1 +#define MISDN_BOUND 2 +#define MISDN_CLOSED 3 + +struct mISDNchannel; +struct mISDNdevice; +struct mISDNstack; + +struct channel_req { + u_int protocol; + struct sockaddr_mISDN adr; + struct mISDNchannel *ch; +}; + +typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); +typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); +typedef int (create_func_t)(struct channel_req *); + +struct Bprotocol { + struct list_head list; + char *name; + u_int Bprotocols; + create_func_t *create; +}; + +struct mISDNchannel { + struct list_head list; + u_int protocol; + u_int nr; + u_long opt; + u_int addr; + struct mISDNstack *st; + struct mISDNchannel *peer; + send_func_t *send; + send_func_t *recv; + ctrl_func_t *ctrl; +}; + +struct mISDN_sock_list { + struct hlist_head head; + rwlock_t lock; +}; + +struct mISDN_sock { + struct sock sk; + struct mISDNchannel ch; + u_int cmask; + struct mISDNdevice *dev; +}; + + + +struct mISDNdevice { + struct mISDNchannel D; + u_int id; + char name[MISDN_MAX_IDLEN]; + u_int Dprotocols; + u_int Bprotocols; + u_int nrbchan; + u_char channelmap[MISDN_CHMAP_SIZE]; + struct list_head bchannels; + struct mISDNchannel *teimgr; + struct device dev; +}; + +struct mISDNstack { + u_long status; + struct mISDNdevice *dev; + struct task_struct *thread; + struct completion *notify; + wait_queue_head_t workq; + struct sk_buff_head msgq; + struct list_head layer2; + struct mISDNchannel *layer1; + struct mISDNchannel own; + struct mutex lmutex; /* protect lists */ + struct mISDN_sock_list l1sock; +#ifdef MISDN_MSG_STATS + u_int msg_cnt; + u_int sleep_cnt; + u_int stopped_cnt; +#endif +}; + +/* global alloc/queue functions */ + +static inline struct sk_buff * +mI_alloc_skb(unsigned int len, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); + if (likely(skb)) + skb_reserve(skb, MISDN_HEADER_LEN); + return skb; +} + +static inline struct sk_buff * +_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); + struct mISDNhead *hh; + + if (!skb) + return NULL; + if (len) + memcpy(skb_put(skb, len), dp, len); + hh = mISDN_HEAD_P(skb); + hh->prim = prim; + hh->id = id; + return skb; +} + +static inline void +_queue_data(struct mISDNchannel *ch, u_int prim, + u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + if (!ch->peer) + return; + skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); + if (!skb) + return; + if (ch->recv(ch->peer, skb)) + dev_kfree_skb(skb); +} + +/* global register/unregister functions */ + +extern int mISDN_register_device(struct mISDNdevice *, char *name); +extern void mISDN_unregister_device(struct mISDNdevice *); +extern int mISDN_register_Bprotocol(struct Bprotocol *); +extern void mISDN_unregister_Bprotocol(struct Bprotocol *); + +extern void set_channel_address(struct mISDNchannel *, u_int, u_int); + +#endif /* __KERNEL__ */ +#endif /* mISDNIF_H */ diff --git a/include/linux/major.h b/include/linux/major.h index 53d5fafd85c..88249452b93 100644 --- a/include/linux/major.h +++ b/include/linux/major.h @@ -170,4 +170,6 @@ #define VIOTAPE_MAJOR 230 +#define BLOCK_EXT_MAJOR 259 + #endif diff --git a/include/linux/maple.h b/include/linux/maple.h index d31e36ebb43..c23d3f51ba4 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -2,6 +2,7 @@ #define __LINUX_MAPLE_H #include <linux/device.h> +#include <mach/maple.h> extern struct bus_type maple_bus_type; @@ -33,6 +34,7 @@ struct mapleq { void *sendbuf, *recvbuf, *recvbufdcsp; unsigned char length; enum maple_code command; + struct mutex mutex; }; struct maple_devinfo { @@ -49,7 +51,6 @@ struct maple_devinfo { struct maple_device { struct maple_driver *driver; struct mapleq *mq; - void *private_data; void (*callback) (struct mapleq * mq); unsigned long when, interval, function; struct maple_devinfo devinfo; @@ -61,8 +62,6 @@ struct maple_device { struct maple_driver { unsigned long function; - int (*connect) (struct maple_device * dev); - void (*disconnect) (struct maple_device * dev); struct device_driver drv; }; @@ -70,10 +69,17 @@ void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq * mq), unsigned long interval, unsigned long function); -int maple_driver_register(struct device_driver *drv); -void maple_add_packet(struct mapleq *mq); +int maple_driver_register(struct maple_driver *); +void maple_driver_unregister(struct maple_driver *); + +int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, + u32 command, u32 length, void *data); +void maple_clear_dev(struct maple_device *mdev); #define to_maple_dev(n) container_of(n, struct maple_device, dev) #define to_maple_driver(n) container_of(n, struct maple_driver, drv) +#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + #endif /* __LINUX_MAPLE_H */ diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 37a5cdb0391..d0c37e68223 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -21,30 +21,30 @@ struct ms_status_register { unsigned char reserved; unsigned char interrupt; -#define MEMSTICK_INT_CMDNAK 0x0001 -#define MEMSTICK_INT_IOREQ 0x0008 -#define MEMSTICK_INT_IOBREQ 0x0010 -#define MEMSTICK_INT_BREQ 0x0020 -#define MEMSTICK_INT_ERR 0x0040 -#define MEMSTICK_INT_CED 0x0080 +#define MEMSTICK_INT_CMDNAK 0x01 +#define MEMSTICK_INT_IOREQ 0x08 +#define MEMSTICK_INT_IOBREQ 0x10 +#define MEMSTICK_INT_BREQ 0x20 +#define MEMSTICK_INT_ERR 0x40 +#define MEMSTICK_INT_CED 0x80 unsigned char status0; -#define MEMSTICK_STATUS0_WP 0x0001 -#define MEMSTICK_STATUS0_SL 0x0002 -#define MEMSTICK_STATUS0_BF 0x0010 -#define MEMSTICK_STATUS0_BE 0x0020 -#define MEMSTICK_STATUS0_FB0 0x0040 -#define MEMSTICK_STATUS0_MB 0x0080 +#define MEMSTICK_STATUS0_WP 0x01 +#define MEMSTICK_STATUS0_SL 0x02 +#define MEMSTICK_STATUS0_BF 0x10 +#define MEMSTICK_STATUS0_BE 0x20 +#define MEMSTICK_STATUS0_FB0 0x40 +#define MEMSTICK_STATUS0_MB 0x80 unsigned char status1; -#define MEMSTICK_STATUS1_UCFG 0x0001 -#define MEMSTICK_STATUS1_FGER 0x0002 -#define MEMSTICK_STATUS1_UCEX 0x0004 -#define MEMSTICK_STATUS1_EXER 0x0008 -#define MEMSTICK_STATUS1_UCDT 0x0010 -#define MEMSTICK_STATUS1_DTER 0x0020 -#define MEMSTICK_STATUS1_FBI 0x0040 -#define MEMSTICK_STATUS1_MB 0x0080 +#define MEMSTICK_STATUS1_UCFG 0x01 +#define MEMSTICK_STATUS1_FGER 0x02 +#define MEMSTICK_STATUS1_UCEX 0x04 +#define MEMSTICK_STATUS1_EXER 0x08 +#define MEMSTICK_STATUS1_UCDT 0x10 +#define MEMSTICK_STATUS1_DTER 0x20 +#define MEMSTICK_STATUS1_FB1 0x40 +#define MEMSTICK_STATUS1_MB 0x80 } __attribute__((packed)); struct ms_id_register { @@ -56,32 +56,32 @@ struct ms_id_register { struct ms_param_register { unsigned char system; -#define MEMSTICK_SYS_ATEN 0xc0 -#define MEMSTICK_SYS_BAMD 0x80 #define MEMSTICK_SYS_PAM 0x08 +#define MEMSTICK_SYS_BAMD 0x80 unsigned char block_address_msb; unsigned short block_address; unsigned char cp; -#define MEMSTICK_CP_BLOCK 0x0000 -#define MEMSTICK_CP_PAGE 0x0020 -#define MEMSTICK_CP_EXTRA 0x0040 -#define MEMSTICK_CP_OVERWRITE 0x0080 +#define MEMSTICK_CP_BLOCK 0x00 +#define MEMSTICK_CP_PAGE 0x20 +#define MEMSTICK_CP_EXTRA 0x40 +#define MEMSTICK_CP_OVERWRITE 0x80 unsigned char page_address; } __attribute__((packed)); struct ms_extra_data_register { unsigned char overwrite_flag; -#define MEMSTICK_OVERWRITE_UPDATA 0x0010 -#define MEMSTICK_OVERWRITE_PAGE 0x0060 -#define MEMSTICK_OVERWRITE_BLOCK 0x0080 +#define MEMSTICK_OVERWRITE_UDST 0x10 +#define MEMSTICK_OVERWRITE_PGST1 0x20 +#define MEMSTICK_OVERWRITE_PGST0 0x40 +#define MEMSTICK_OVERWRITE_BKST 0x80 unsigned char management_flag; -#define MEMSTICK_MANAGEMENT_SYSTEM 0x0004 -#define MEMSTICK_MANAGEMENT_TRANS_TABLE 0x0008 -#define MEMSTICK_MANAGEMENT_COPY 0x0010 -#define MEMSTICK_MANAGEMENT_ACCESS 0x0020 +#define MEMSTICK_MANAGEMENT_SYSFLG 0x04 +#define MEMSTICK_MANAGEMENT_ATFLG 0x08 +#define MEMSTICK_MANAGEMENT_SCMS1 0x10 +#define MEMSTICK_MANAGEMENT_SCMS0 0x20 unsigned short logical_address; } __attribute__((packed)); @@ -96,9 +96,9 @@ struct ms_register { struct mspro_param_register { unsigned char system; -#define MEMSTICK_SYS_SERIAL 0x80 #define MEMSTICK_SYS_PAR4 0x00 #define MEMSTICK_SYS_PAR8 0x40 +#define MEMSTICK_SYS_SERIAL 0x80 unsigned short data_count; unsigned int data_address; @@ -147,7 +147,7 @@ struct ms_register_addr { unsigned char w_length; } __attribute__((packed)); -enum { +enum memstick_tpc { MS_TPC_READ_MG_STATUS = 0x01, MS_TPC_READ_LONG_DATA = 0x02, MS_TPC_READ_SHORT_DATA = 0x03, @@ -167,7 +167,7 @@ enum { MS_TPC_SET_CMD = 0x0e }; -enum { +enum memstick_command { MS_CMD_BLOCK_END = 0x33, MS_CMD_RESET = 0x3c, MS_CMD_BLOCK_WRITE = 0x55, @@ -201,8 +201,6 @@ enum { /*** Driver structures and functions ***/ -#define MEMSTICK_PART_SHIFT 3 - enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE }; #define MEMSTICK_POWER_OFF 0 @@ -215,24 +213,27 @@ enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE }; struct memstick_host; struct memstick_driver; +struct memstick_device_id { + unsigned char match_flags; #define MEMSTICK_MATCH_ALL 0x01 + unsigned char type; #define MEMSTICK_TYPE_LEGACY 0xff #define MEMSTICK_TYPE_DUO 0x00 #define MEMSTICK_TYPE_PRO 0x01 + unsigned char category; #define MEMSTICK_CATEGORY_STORAGE 0xff #define MEMSTICK_CATEGORY_STORAGE_DUO 0x00 +#define MEMSTICK_CATEGORY_IO 0x01 +#define MEMSTICK_CATEGORY_IO_PRO 0x10 -#define MEMSTICK_CLASS_GENERIC 0xff -#define MEMSTICK_CLASS_GENERIC_DUO 0x00 - - -struct memstick_device_id { - unsigned char match_flags; - unsigned char type; - unsigned char category; unsigned char class; +#define MEMSTICK_CLASS_FLASH 0xff +#define MEMSTICK_CLASS_DUO 0x00 +#define MEMSTICK_CLASS_ROM 0x01 +#define MEMSTICK_CLASS_RO 0x02 +#define MEMSTICK_CLASS_WP 0x03 }; struct memstick_request { @@ -263,6 +264,10 @@ struct memstick_dev { /* Get next request from the media driver. */ int (*next_request)(struct memstick_dev *card, struct memstick_request **mrq); + /* Tell the media driver to stop doing things */ + void (*stop)(struct memstick_dev *card); + /* Allow the media driver to continue */ + void (*start)(struct memstick_dev *card); struct device dev; }; @@ -284,7 +289,7 @@ struct memstick_host { /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); /* Set host IO parameters (power, clock, etc). */ - void (*set_param)(struct memstick_host *host, + int (*set_param)(struct memstick_host *host, enum memstick_param param, int value); unsigned long private[0] ____cacheline_aligned; @@ -315,9 +320,9 @@ void memstick_suspend_host(struct memstick_host *host); void memstick_resume_host(struct memstick_host *host); void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc, - struct scatterlist *sg); + const struct scatterlist *sg); void memstick_init_req(struct memstick_request *mrq, unsigned char tpc, - void *buf, size_t length); + const void *buf, size_t length); int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq); void memstick_new_req(struct memstick_host *host); diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index bb3dd054592..49ef857cdb2 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -1,5 +1,3 @@ -#ifndef MFD_CORE_H -#define MFD_CORE_H /* * drivers/mfd/mfd-core.h * @@ -13,6 +11,9 @@ * */ +#ifndef MFD_CORE_H +#define MFD_CORE_H + #include <linux/platform_device.h> /* @@ -28,7 +29,13 @@ struct mfd_cell { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - void *driver_data; /* driver-specific data */ + /* driver-specific data for MFD-aware "cell" drivers */ + void *driver_data; + + /* platform_data can be used to either pass data to "generic" + driver or as a hook to mfd_cell for the "cell" drivers */ + void *platform_data; + size_t data_size; /* * This resources can be specified relatievly to the parent device. @@ -38,18 +45,11 @@ struct mfd_cell { const struct resource *resources; }; -static inline struct mfd_cell * -mfd_get_cell(struct platform_device *pdev) -{ - return (struct mfd_cell *)pdev->dev.platform_data; -} - -extern int mfd_add_devices( - struct platform_device *parent, - const struct mfd_cell *cells, int n_devs, - struct resource *mem_base, - int irq_base); +extern int mfd_add_devices(struct device *parent, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); -extern void mfd_remove_devices(struct platform_device *parent); +extern void mfd_remove_devices(struct device *parent); #endif diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 00000000000..e83c7f2036f --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,36 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include <linux/mfd/core.h> +#include <linux/mfd/tmio.h> + +struct t7l66xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 00000000000..fa06e0610b8 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,23 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index 7cc824a58f7..fec7b3f7a81 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -14,8 +14,8 @@ * published by the Free Software Foundation. */ -#ifndef TC6393XB_H -#define TC6393XB_H +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H /* Also one should provide the CK3P6MI clock */ struct tc6393xb_platform_data { @@ -29,7 +29,7 @@ struct tc6393xb_platform_data { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - int irq_base; /* a base for cascaded irq */ + int irq_base; /* base for subdevice irqs */ int gpio_base; struct tmio_nand_data *nand_data; @@ -40,9 +40,6 @@ struct tc6393xb_platform_data { */ #define IRQ_TC6393_NAND 0 #define IRQ_TC6393_MMC 1 -#define IRQ_TC6393_OHCI 2 -#define IRQ_TC6393_SERIAL 3 -#define IRQ_TC6393_FB 4 #define TC6393XB_NR_IRQS 8 diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 9438d8c9ac1..ec612e66391 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -1,6 +1,21 @@ #ifndef MFD_TMIO_H #define MFD_TMIO_H +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + /* * data for the NAND controller */ @@ -10,8 +25,4 @@ struct tmio_nand_data { unsigned int num_partitions; }; -#define TMIO_NAND_CONFIG "tmio-nand-config" -#define TMIO_NAND_CONTROL "tmio-nand-control" -#define TMIO_NAND_IRQ "tmio-nand" - #endif diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 071cf96cf01..6f65b2c8bb8 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -39,17 +39,18 @@ #include <linux/mlx4/doorbell.h> struct mlx4_cqe { - __be32 my_qpn; + __be32 vlan_my_qpn; __be32 immed_rss_invalid; __be32 g_mlpath_rqpn; - u8 sl; - u8 reserved1; + __be16 sl_vid; __be16 rlid; - __be32 ipoib_status; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; __be32 byte_cnt; __be16 wqe_index; __be16 checksum; - u8 reserved2[3]; + u8 reserved[3]; u8 owner_sr_opcode; }; @@ -64,6 +65,11 @@ struct mlx4_err_cqe { }; enum { + MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_QPN_MASK = 0xffffff, +}; + +enum { MLX4_CQE_OWNER_MASK = 0x80, MLX4_CQE_IS_SEND_MASK = 0x40, MLX4_CQE_OPCODE_MASK = 0x1f @@ -86,13 +92,19 @@ enum { }; enum { - MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, - MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, - MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, - MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, - MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, - MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, - MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, + MLX4_CQE_STATUS_IPV4 = 1 << 6, + MLX4_CQE_STATUS_IPV4F = 1 << 7, + MLX4_CQE_STATUS_IPV6 = 1 << 8, + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, + MLX4_CQE_STATUS_TCP = 1 << 10, + MLX4_CQE_STATUS_UDP = 1 << 11, + MLX4_CQE_STATUS_IPOK = 1 << 12, +}; + +enum { + MLX4_CQE_LLC = 1, + MLX4_CQE_SNAP = 1 << 1, + MLX4_CQE_BAD_FCS = 1 << 4, }; static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 655ea0d1ee1..b2f94446831 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -141,6 +141,10 @@ enum { MLX4_STAT_RATE_OFFSET = 5 }; +enum { + MLX4_MTT_FLAG_PRESENT = 1 +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index e27082cd650..bf8f11982da 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -164,11 +164,13 @@ enum { MLX4_WQE_CTRL_SOLICITED = 1 << 1, MLX4_WQE_CTRL_IP_CSUM = 1 << 4, MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, + MLX4_WQE_CTRL_INS_VLAN = 1 << 6, }; struct mlx4_wqe_ctrl_seg { __be32 owner_opcode; - u8 reserved2[3]; + __be16 vlan_tag; + u8 ins_vlan; u8 fence_size; /* * High 24 bits are SRC remote buffer; low 8 bits are flags: diff --git a/include/linux/mm.h b/include/linux/mm.h index d87a5a5fe87..c61ba10768e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -7,6 +7,7 @@ #include <linux/gfp.h> #include <linux/list.h> +#include <linux/mmdebug.h> #include <linux/mmzone.h> #include <linux/rbtree.h> #include <linux/prio_tree.h> @@ -73,7 +74,7 @@ extern unsigned int kobjsize(const void *objp); #endif /* - * vm_flags.. + * vm_flags in vm_area_struct, see mm_types.h. */ #define VM_READ 0x00000001 /* currently active flags */ #define VM_WRITE 0x00000002 @@ -219,12 +220,6 @@ struct inode; */ #include <linux/page-flags.h> -#ifdef CONFIG_DEBUG_VM -#define VM_BUG_ON(cond) BUG_ON(cond) -#else -#define VM_BUG_ON(condition) do { } while(0) -#endif - /* * Methods to modify the page usage count. * @@ -744,6 +739,8 @@ struct zap_details { struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, @@ -810,7 +807,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); -void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -834,6 +830,19 @@ extern int mprotect_fixup(struct vm_area_struct *vma, unsigned long end, unsigned long newflags); /* + * get_user_pages_fast provides equivalent functionality to get_user_pages, + * operating on current and current->mm (force=0 and doesn't return any vmas). + * + * get_user_pages_fast may take mmap_sem and page tables, so no assumptions + * can be made about locking. get_user_pages_fast is to be implemented in a + * way that is advantageous (vs get_user_pages()) when the user memory area is + * already faulted in and present in ptes. However if the pages have to be + * faulted in, it may turn out to be slightly slower). + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); + +/* * A callback you can register to apply pressure to ageable caches. * * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should @@ -905,7 +914,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ -#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +#if USE_SPLIT_PTLOCKS /* * We tuck a spinlock to guard each pagetable page into its struct page, * at page->private, with BUILD_BUG_ON to make sure that this will not @@ -918,14 +927,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } while (0) #define pte_lock_deinit(page) ((page)->mapping = NULL) #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) -#else +#else /* !USE_SPLIT_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ #define pte_lock_init(page) do {} while (0) #define pte_lock_deinit(page) do {} while (0) #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) -#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#endif /* USE_SPLIT_PTLOCKS */ static inline void pgtable_page_ctor(struct page *page) { @@ -1009,7 +1018,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); @@ -1072,6 +1080,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + #ifdef CONFIG_PROC_FS /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ extern void added_exe_file_vma(struct mm_struct *mm); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 746f975b58e..9d49fa36bbe 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,6 +10,7 @@ #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/completion.h> +#include <linux/cpumask.h> #include <asm/page.h> #include <asm/mmu.h> @@ -20,11 +21,13 @@ struct address_space; -#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) + +#if USE_SPLIT_PTLOCKS typedef atomic_long_t mm_counter_t; -#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#else /* !USE_SPLIT_PTLOCKS */ typedef unsigned long mm_counter_t; -#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#endif /* !USE_SPLIT_PTLOCKS */ /* * Each physical page in the system has a struct page associated with @@ -64,7 +67,7 @@ struct page { * see PAGE_MAPPING_ANON below. */ }; -#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +#if USE_SPLIT_PTLOCKS spinlock_t ptl; #endif struct kmem_cache *slab; /* SLUB: Pointer to slab */ @@ -112,7 +115,7 @@ struct vm_area_struct { struct vm_area_struct *vm_next; pgprot_t vm_page_prot; /* Access permissions of this VMA. */ - unsigned long vm_flags; /* Flags, listed below. */ + unsigned long vm_flags; /* Flags, see mm.h. */ struct rb_node vm_rb; @@ -253,6 +256,9 @@ struct mm_struct { struct file *exe_file; unsigned long num_exe_file_vmas; #endif +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif }; #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 0d508ac17d6..ee6e822d599 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -111,6 +111,8 @@ struct mmc_card { unsigned num_info; /* number of info strings */ const char **info; /* info strings */ struct sdio_func_tuple *tuples; /* unknown common tuples */ + + struct dentry *debugfs_root; }; #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 10a2080086c..bde891f6459 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -65,7 +65,7 @@ struct mmc_host_ops { * -ENOSYS when not supported (equal to NULL callback) * or a negative errno value when something bad happened * - * Return values for the get_ro callback should be: + * Return values for the get_cd callback should be: * 0 for a absent card * 1 for a present card * -ENOSYS when not supported (equal to NULL callback) @@ -157,6 +157,8 @@ struct mmc_host { struct led_trigger *led; /* activity led */ #endif + struct dentry *debugfs_root; + unsigned long private[0] ____cacheline_aligned; }; diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h new file mode 100644 index 00000000000..8a550987719 --- /dev/null +++ b/include/linux/mmdebug.h @@ -0,0 +1,18 @@ +#ifndef LINUX_MM_DEBUG_H +#define LINUX_MM_DEBUG_H 1 + +#include <linux/autoconf.h> + +#ifdef CONFIG_DEBUG_VM +#define VM_BUG_ON(cond) BUG_ON(cond) +#else +#define VM_BUG_ON(cond) do { } while (0) +#endif + +#ifdef CONFIG_DEBUG_VIRTUAL +#define VIRTUAL_BUG_ON(cond) BUG_ON(cond) +#else +#define VIRTUAL_BUG_ON(cond) do { } while (0) +#endif + +#endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 00000000000..b77486d152c --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,279 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mm_types.h> + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. The subsystem + * must guarantee that no additional references are taken to + * the pages in the range established between the call to + * invalidate_range_start() and the matching call to + * invalidate_range_end(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + */ + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +/* + * These two macros will sometime replace ptep_clear_flush. + * ptep_clear_flush is impleemnted as macro itself, so this also is + * implemented as a macro until ptep_clear_flush will converted to an + * inline function, to diminish the risk of compilation failure. The + * invalidate_page method over time can be moved outside the PT lock + * and these two macros can be later removed. + */ +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ + __pte; \ +}) + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 443bc7cd8c6..428328a05fa 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -751,8 +751,9 @@ static inline int zonelist_node_idx(struct zoneref *zoneref) * * This function returns the next zone at or below a given zone index that is * within the allowed nodemask using a cursor as the starting point for the - * search. The zoneref returned is a cursor that is used as the next starting - * point for future calls to next_zones_zonelist(). + * search. The zoneref returned is a cursor that represents the current zone + * being examined. It should be advanced by one before calling + * next_zones_zonelist again. */ struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, @@ -768,9 +769,8 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, * * This function returns the first zone at or below a given zone index that is * within the allowed nodemask. The zoneref returned is a cursor that can be - * used to iterate the zonelist with next_zones_zonelist. The cursor should - * not be used by the caller as it does not match the value of the zone - * returned. + * used to iterate the zonelist with next_zones_zonelist by advancing it by + * one before calling. */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, @@ -795,7 +795,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ zone; \ - z = next_zones_zonelist(z, highidx, nodemask, &zone)) \ + z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \ /** * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index diff --git a/include/linux/mount.h b/include/linux/mount.h index 4374d1adeb4..30a1d63b6fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -47,7 +47,7 @@ struct vfsmount { struct list_head mnt_child; /* and going through their mnt_child */ int mnt_flags; /* 4 bytes hole on 64bits arches */ - char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ + const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ struct list_head mnt_list; struct list_head mnt_expire; /* link in fs-specific expiry list */ struct list_head mnt_share; /* circular list of shared mounts */ @@ -105,7 +105,8 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, struct nameidata; -extern int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, +struct path; +extern int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags, struct list_head *fslist); extern void mark_mounts_for_expiry(struct list_head *mounts); diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 07112ee9293..8a455694d68 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -6,7 +6,6 @@ #ifdef __KERNEL__ #include <linux/in.h> #endif -#include <linux/pim.h> /* * Based on the MROUTING 3.5 defines primarily to keep @@ -130,6 +129,7 @@ struct igmpmsg */ #ifdef __KERNEL__ +#include <linux/pim.h> #include <net/sock.h> #ifdef CONFIG_IP_MROUTE diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 5cf50473a10..6f4c180179e 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -115,6 +115,7 @@ struct sioc_mif_req6 #ifdef __KERNEL__ +#include <linux/pim.h> #include <linux/skbuff.h> /* for struct sk_buff_head */ #ifdef CONFIG_IPV6_MROUTE diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index 9a6e2f953cb..8b4aa0523db 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -1,6 +1,4 @@ /* - * $Id: blktrans.h,v 1.6 2005/11/07 11:14:54 gleixner Exp $ - * * (C) 2003 David Woodhouse <dwmw2@infradead.org> * * Interface to Linux block layer for MTD 'translation layers'. @@ -43,6 +41,8 @@ struct mtd_blktrans_ops { unsigned long block, char *buffer); int (*writesect)(struct mtd_blktrans_dev *dev, unsigned long block, char *buffer); + int (*discard)(struct mtd_blktrans_dev *dev, + unsigned long block, unsigned nr_blocks); /* Block layer ioctls */ int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index b0ddf4b2586..d6fb115f5a0 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -1,7 +1,6 @@ /* Common Flash Interface structures * See http://support.intel.com/design/flash/technote/index.htm - * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $ */ #ifndef __MTD_CFI_H__ diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h index 25724f7d386..d802f7736be 100644 --- a/include/linux/mtd/cfi_endian.h +++ b/include/linux/mtd/cfi_endian.h @@ -1,8 +1,3 @@ -/* - * $Id: cfi_endian.h,v 1.11 2002/01/30 23:20:48 awozniak Exp $ - * - */ - #include <asm/byteorder.h> #ifndef CONFIG_MTD_CFI_ADV_OPTIONS diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h index ed8dc675521..c02f3d264ec 100644 --- a/include/linux/mtd/concat.h +++ b/include/linux/mtd/concat.h @@ -4,8 +4,6 @@ * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> * * This code is GPL - * - * $Id: concat.h,v 1.1 2002/03/08 16:34:36 rkaiser Exp $ */ #ifndef MTD_CONCAT_H diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h index 9addd073bf1..0a6d516ab71 100644 --- a/include/linux/mtd/doc2000.h +++ b/include/linux/mtd/doc2000.h @@ -6,8 +6,6 @@ * Copyright (C) 2002-2003 Greg Ungerer <gerg@snapgear.com> * Copyright (C) 2002-2003 SnapGear Inc * - * $Id: doc2000.h,v 1.25 2005/11/07 11:14:54 gleixner Exp $ - * * Released under GPL */ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index 39e7d2a1be9..08dd131301c 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -5,9 +5,6 @@ * Contains information about the location and state of a given flash device * * (C) 2000 Red Hat. GPLd. - * - * $Id: flashchip.h,v 1.18 2005/11/07 11:14:54 gleixner Exp $ - * */ #ifndef __MTD_FLASHCHIP_H__ diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h index d9960911330..0be442f881d 100644 --- a/include/linux/mtd/ftl.h +++ b/include/linux/mtd/ftl.h @@ -1,6 +1,4 @@ /* - * $Id: ftl.h,v 1.7 2005/11/07 11:14:54 gleixner Exp $ - * * Derived from (and probably identical to): * ftl.h 1.7 1999/10/25 20:23:17 * diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h index 256e7342ed1..df362ddf294 100644 --- a/include/linux/mtd/gen_probe.h +++ b/include/linux/mtd/gen_probe.h @@ -1,7 +1,6 @@ /* * (C) 2001, 2001 Red Hat, Inc. * GPL'd - * $Id: gen_probe.h,v 1.4 2005/11/07 11:14:54 gleixner Exp $ */ #ifndef __LINUX_MTD_GEN_PROBE_H__ diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h index 85fd041d44a..64ee53ce95a 100644 --- a/include/linux/mtd/inftl.h +++ b/include/linux/mtd/inftl.h @@ -2,8 +2,6 @@ * inftl.h -- defines to support the Inverse NAND Flash Translation Layer * * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) - * - * $Id: inftl.h,v 1.7 2005/06/13 13:08:45 sean Exp $ */ #ifndef __MTD_INFTL_H__ @@ -52,8 +50,6 @@ struct INFTLrecord { int INFTL_mount(struct INFTLrecord *s); int INFTL_formatblock(struct INFTLrecord *s, int block); -extern char inftlmountrev[]; - void INFTL_dumptables(struct INFTLrecord *s); void INFTL_dumpVUchains(struct INFTLrecord *s); diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 9c1d95491f8..aa30244492c 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -1,6 +1,5 @@ /* Overhauled routines for dealing with different mmap regions of flash */ -/* $Id: map.h,v 1.54 2005/11/07 11:14:54 gleixner Exp $ */ #ifndef __LINUX_MTD_MAP_H__ #define __LINUX_MTD_MAP_H__ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 8b5d49133ec..92263654855 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -1,6 +1,4 @@ /* - * $Id: mtd.h,v 1.61 2005/11/07 11:14:54 gleixner Exp $ - * * Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al. * * Released under GPL @@ -274,7 +272,11 @@ static inline void mtd_erase_callback(struct erase_info *instr) printk(KERN_INFO args); \ } while(0) #else /* CONFIG_MTD_DEBUG */ -#define DEBUG(n, args...) do { } while(0) +#define DEBUG(n, args...) \ + do { \ + if (0) \ + printk(KERN_INFO args); \ + } while(0) #endif /* CONFIG_MTD_DEBUG */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 53ea3dc8b0e..81774e5facf 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -5,8 +5,6 @@ * Steven J. Hill <sjhill@realitydiluted.com> * Thomas Gleixner <tglx@linutronix.de> * - * $Id: nand.h,v 1.74 2005/09/15 13:58:50 vwool Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -179,6 +177,9 @@ typedef enum { #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) +/* Large page NAND with SOFT_ECC should support subpage reads */ +#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ + && (chip->page_shift > 9)) /* Mask to zero out the chip options, which come from the id table */ #define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) @@ -276,6 +277,10 @@ struct nand_ecc_ctrl { int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf); + int (*read_subpage)(struct mtd_info *mtd, + struct nand_chip *chip, + uint32_t offs, uint32_t len, + uint8_t *buf); void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf); diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 12c5bc342ea..090da505425 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -3,8 +3,6 @@ * * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) * - * $Id: nand_ecc.h,v 1.4 2004/06/17 02:35:02 dbrown Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h index 001eec50cac..dcaf611ed74 100644 --- a/include/linux/mtd/nftl.h +++ b/include/linux/mtd/nftl.h @@ -1,6 +1,4 @@ /* - * $Id: nftl.h,v 1.16 2004/06/30 14:49:00 dbrown Exp $ - * * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 7c37d7e55ab..5014f7a9f5d 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -4,8 +4,6 @@ * (C) 2000 Nicolas Pitre <nico@cam.org> * * This code is GPL - * - * $Id: partitions.h,v 1.17 2005/11/07 11:14:55 gleixner Exp $ */ #ifndef MTD_PARTITIONS_H diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index 0dc07d5f335..c8e63a5ee72 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h @@ -2,8 +2,6 @@ * For boards with physically mapped flash and using * drivers/mtd/maps/physmap.c mapping driver. * - * $Id: physmap.h,v 1.4 2005/11/07 11:14:55 gleixner Exp $ - * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h index 0e37ad07bce..e07890aff1c 100644 --- a/include/linux/mtd/plat-ram.h +++ b/include/linux/mtd/plat-ram.h @@ -6,8 +6,6 @@ * * Generic platform device based RAM map * - * $Id: plat-ram.h,v 1.2 2005/01/24 00:37:40 bjd Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/include/linux/mtd/pmc551.h b/include/linux/mtd/pmc551.h index 5cc070c24d8..27ad40aed19 100644 --- a/include/linux/mtd/pmc551.h +++ b/include/linux/mtd/pmc551.h @@ -1,6 +1,4 @@ /* - * $Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $ - * * PMC551 PCI Mezzanine Ram Device * * Author: @@ -17,7 +15,7 @@ #include <linux/mtd/mtd.h> -#define PMC551_VERSION "$Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $\n"\ +#define PMC551_VERSION \ "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n" /* diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h index e9d40bdde48..36efcba15ec 100644 --- a/include/linux/mtd/xip.h +++ b/include/linux/mtd/xip.h @@ -11,8 +11,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * $Id: xip.h,v 1.5 2005/11/07 11:14:55 gleixner Exp $ */ #ifndef __LINUX_MTD_XIP_H__ diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 12078577aef..cbbbe9bfeca 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -17,9 +17,14 @@ struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; + struct platform_device *shared_smi; unsigned int t_clk; }; +#define MV643XX_ETH_PHY_ADDR_DEFAULT 0 +#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) +#define MV643XX_ETH_PHY_NONE 0xff + struct mv643xx_eth_platform_data { /* * Pointer back to our parent instance, and our port number. @@ -30,8 +35,6 @@ struct mv643xx_eth_platform_data { /* * Whether a PHY is present, and if yes, at which address. */ - struct platform_device *shared_smi; - int force_phy_addr; int phy_addr; /* @@ -49,10 +52,10 @@ struct mv643xx_eth_platform_data { int duplex; /* - * Which RX/TX queues to use. + * How many RX/TX queues to use. */ - int rx_queue_mask; - int tx_queue_mask; + int rx_queue_count; + int tx_queue_count; /* * Override default RX/TX queue sizes if nonzero. diff --git a/include/linux/namei.h b/include/linux/namei.h index 24d88e98a62..68f8c3203c8 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -47,27 +47,24 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_DIRECTORY 2 #define LOOKUP_CONTINUE 4 #define LOOKUP_PARENT 16 -#define LOOKUP_NOALT 32 #define LOOKUP_REVAL 64 /* * Intent data */ #define LOOKUP_OPEN (0x0100) #define LOOKUP_CREATE (0x0200) -#define LOOKUP_ACCESS (0x0400) -#define LOOKUP_CHDIR (0x0800) - -extern int __user_walk(const char __user *, unsigned, struct nameidata *); -extern int __user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *); -#define user_path_walk(name,nd) \ - __user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd) -#define user_path_walk_link(name,nd) \ - __user_walk_fd(AT_FDCWD, name, 0, nd) + +extern int user_path_at(int, const char __user *, unsigned, struct path *); + +#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) +#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path) +#define user_path_dir(name, path) \ + user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path) + extern int path_lookup(const char *, unsigned, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct nameidata *); -extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags); extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags); extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)); diff --git a/include/linux/net.h b/include/linux/net.h index 4a9a30f2d68..6dc14a24004 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -18,16 +18,9 @@ #ifndef _LINUX_NET_H #define _LINUX_NET_H -#include <linux/wait.h> #include <linux/socket.h> -#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <asm/socket.h> -struct poll_table_struct; -struct pipe_inode_info; -struct inode; -struct net; - #define NPROTO AF_MAX #define SYS_SOCKET 1 /* sys_socket(2) */ @@ -62,6 +55,13 @@ typedef enum { #ifdef __KERNEL__ #include <linux/stringify.h> #include <linux/random.h> +#include <linux/wait.h> +#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ + +struct poll_table_struct; +struct pipe_inode_info; +struct inode; +struct net; #define SOCK_ASYNC_NOSPACE 0 #define SOCK_ASYNC_WAITDATA 1 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b4d056ceab9..d3ea3de70a8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -42,6 +42,7 @@ #include <linux/workqueue.h> #include <net/net_namespace.h> +#include <net/dsa.h> struct vlan_group; struct ethtool_ops; @@ -61,9 +62,7 @@ struct wireless_dev; #define NET_XMIT_DROP 1 /* skb dropped */ #define NET_XMIT_CN 2 /* congestion notification */ #define NET_XMIT_POLICED 3 /* skb is shot by police */ -#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; - (TC use only - dev_queue_xmit - returns this as NET_XMIT_SUCCESS) */ +#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ @@ -440,6 +439,7 @@ static inline void napi_synchronize(const struct napi_struct *n) enum netdev_queue_state_t { __QUEUE_STATE_XOFF, + __QUEUE_STATE_FROZEN, }; struct netdev_queue { @@ -472,6 +472,8 @@ struct net_device char name[IFNAMSIZ]; /* device name hash chain */ struct hlist_node name_hlist; + /* snmp alias */ + char *ifalias; /* * I/O specific fields @@ -606,6 +608,9 @@ struct net_device /* Protocol specific pointers */ +#ifdef CONFIG_NET_DSA + void *dsa_ptr; /* dsa specific data */ +#endif void *atalk_ptr; /* AppleTalk link */ void *ip_ptr; /* IPv4 specific data */ void *dn_ptr; /* DECnet specific data */ @@ -636,7 +641,7 @@ struct net_device unsigned int real_num_tx_queues; unsigned long tx_queue_len; /* Max frames per queue allowed */ - + spinlock_t tx_global_lock; /* * One part is mostly used on xmit path (device) */ @@ -797,6 +802,26 @@ void dev_net_set(struct net_device *dev, struct net *net) #endif } +static inline bool netdev_uses_dsa_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_DSA + if (dev->dsa_ptr != NULL) + return dsa_uses_dsa_tags(dev->dsa_ptr); +#endif + + return 0; +} + +static inline bool netdev_uses_trailer_tags(struct net_device *dev) +{ +#ifdef CONFIG_NET_DSA_TAG_TRAILER + if (dev->dsa_ptr != NULL) + return dsa_uses_trailer_tags(dev->dsa_ptr); +#endif + + return 0; +} + /** * netdev_priv - access network device private data * @dev: network device @@ -1099,6 +1124,11 @@ static inline int netif_queue_stopped(const struct net_device *dev) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } +static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); +} + /** * netif_running - test if up * @dev: network device @@ -1219,7 +1249,8 @@ extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); extern int dev_ethtool(struct net *net, struct ifreq *); extern unsigned dev_get_flags(const struct net_device *); extern int dev_change_flags(struct net_device *, unsigned); -extern int dev_change_name(struct net_device *, char *); +extern int dev_change_name(struct net_device *, const char *); +extern int dev_set_alias(struct net_device *, const char *, size_t); extern int dev_change_net_namespace(struct net_device *, struct net *, const char *); extern int dev_set_mtu(struct net_device *, int); @@ -1475,6 +1506,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) txq->xmit_lock_owner = smp_processor_id(); } +static inline int __netif_tx_trylock(struct netdev_queue *txq) +{ + int ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) + txq->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -1484,12 +1535,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) */ static inline void netif_tx_lock(struct net_device *dev) { - int cpu = smp_processor_id(); unsigned int i; + int cpu; + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); } } @@ -1499,40 +1561,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev) netif_tx_lock(dev); } -static inline int __netif_tx_trylock(struct netdev_queue *txq) -{ - int ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); - return ok; -} - -static inline int netif_tx_trylock(struct net_device *dev) -{ - return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); -} - -static inline void __netif_tx_unlock(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock(&txq->_xmit_lock); -} - -static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock_bh(&txq->_xmit_lock); -} - static inline void netif_tx_unlock(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_unlock(txq); - } + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); + } + spin_unlock(&dev->tx_global_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) @@ -1556,13 +1600,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; + int cpu; - netif_tx_lock_bh(dev); + local_bh_disable(); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); } - netif_tx_unlock_bh(dev); + local_bh_enable(); } static inline void netif_addr_lock(struct net_device *dev) @@ -1645,7 +1694,7 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); extern int netdev_class_create_file(struct class_attribute *class_attr); extern void netdev_class_remove_file(struct class_attribute *class_attr); -extern char *netdev_drivername(struct net_device *dev, char *buffer, int len); +extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); extern void linkwatch_run_queue(void); diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0c5eb7ed8b3..48cfe51bfdd 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -5,13 +5,11 @@ #include <linux/init.h> #include <linux/skbuff.h> #include <linux/net.h> -#include <linux/netdevice.h> #include <linux/if.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> -#include <net/net_namespace.h> #endif #include <linux/types.h> #include <linux/compiler.h> @@ -52,6 +50,16 @@ enum nf_inet_hooks { NF_INET_NUMHOOKS }; +enum { + NFPROTO_UNSPEC = 0, + NFPROTO_IPV4 = 2, + NFPROTO_ARP = 3, + NFPROTO_BRIDGE = 7, + NFPROTO_IPV6 = 10, + NFPROTO_DECNET = 12, + NFPROTO_NUMPROTO, +}; + union nf_inet_addr { __u32 all[4]; __be32 ip; @@ -92,8 +100,8 @@ struct nf_hook_ops /* User fills in from here down. */ nf_hookfn *hook; struct module *owner; - int pf; - int hooknum; + u_int8_t pf; + unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; @@ -102,7 +110,7 @@ struct nf_sockopt_ops { struct list_head list; - int pf; + u_int8_t pf; /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; @@ -138,9 +146,9 @@ extern struct ctl_path nf_net_netfilter_sysctl_path[]; extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[]; #endif /* CONFIG_SYSCTL */ -extern struct list_head nf_hooks[NPROTO][NF_MAX_HOOKS]; +extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; -int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, +int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *), int thresh); @@ -151,7 +159,7 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff *skb, * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ -static inline int nf_hook_thresh(int pf, unsigned int hook, +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, @@ -167,7 +175,7 @@ static inline int nf_hook_thresh(int pf, unsigned int hook, return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); } -static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb, +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { @@ -212,14 +220,14 @@ __ret;}) NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, INT_MIN) /* Call setsockopt() */ -int nf_setsockopt(struct sock *sk, int pf, int optval, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int len); -int nf_getsockopt(struct sock *sk, int pf, int optval, char __user *opt, +int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); -int compat_nf_setsockopt(struct sock *sk, int pf, int optval, +int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int len); -int compat_nf_getsockopt(struct sock *sk, int pf, int optval, +int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); /* Call this before modifying an existing packet: ensures it is @@ -247,7 +255,7 @@ struct nf_afinfo { int route_key_size; }; -extern const struct nf_afinfo *nf_afinfo[NPROTO]; +extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO]; static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) { return rcu_dereference(nf_afinfo[family]); @@ -292,7 +300,7 @@ extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo); extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *); static inline void -nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #ifdef CONFIG_NF_NAT_NEEDED void (*decodefn)(struct sk_buff *, struct flowi *); @@ -315,7 +323,7 @@ extern struct proc_dir_entry *proc_net_netfilter; #else /* !CONFIG_NETFILTER */ #define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) #define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) -static inline int nf_hook_thresh(int pf, unsigned int hook, +static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, @@ -324,7 +332,7 @@ static inline int nf_hook_thresh(int pf, unsigned int hook, { return okfn(skb); } -static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb, +static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct sk_buff *)) { @@ -332,7 +340,9 @@ static inline int nf_hook(int pf, unsigned int hook, struct sk_buff *skb, } struct flowi; static inline void -nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {} +nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) +{ +} #endif /*CONFIG_NETFILTER*/ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -343,56 +353,5 @@ extern void (*nf_ct_destroy)(struct nf_conntrack *); static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} #endif -static inline struct net *nf_pre_routing_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return in->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_local_in_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return in->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_forward_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - BUG_ON(in->nd_net != out->nd_net); - return in->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_local_out_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return out->nd_net; -#else - return &init_net; -#endif -} - -static inline struct net *nf_post_routing_net(const struct net_device *in, - const struct net_device *out) -{ -#ifdef CONFIG_NET_NS - return out->nd_net; -#else - return &init_net; -#endif -} - #endif /*__KERNEL__*/ #endif /*__LINUX_NETFILTER_H*/ diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 3aff513d12c..5a8af875bce 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -32,6 +32,7 @@ header-y += xt_owner.h header-y += xt_pkttype.h header-y += xt_rateest.h header-y += xt_realm.h +header-y += xt_recent.h header-y += xt_sctp.h header-y += xt_state.h header-y += xt_statistic.h diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 535e4219d2b..2a10efda17f 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -87,7 +87,7 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -extern void nf_ct_gre_keymap_flush(void); +extern void nf_ct_gre_keymap_flush(struct net *net); extern void nf_nat_need_gre(void); #endif /* __KERNEL__ */ diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 22ce29995f1..a049df4f223 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -30,6 +30,9 @@ enum tcp_conntrack { /* Be liberal in window checking */ #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 +/* Has unacknowledged data */ +#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 + struct nf_ct_tcp_flags { u_int8_t flags; u_int8_t mask; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 2326296b6f2..be41b609c88 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -173,6 +173,98 @@ struct xt_counters_info #include <linux/netdevice.h> +/** + * struct xt_match_param - parameters for match extensions' match functions + * + * @in: input netdevice + * @out: output netdevice + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @fragoff: packet is a fragment, this is the data offset + * @thoff: position of transport header relative to skb->data + * @hotdrop: drop packet if we had inspection problems + * @family: Actual NFPROTO_* through which the function is invoked + * (helpful when match->family == NFPROTO_UNSPEC) + */ +struct xt_match_param { + const struct net_device *in, *out; + const struct xt_match *match; + const void *matchinfo; + int fragoff; + unsigned int thoff; + bool *hotdrop; + u_int8_t family; +}; + +/** + * struct xt_mtchk_param - parameters for match extensions' + * checkentry functions + * + * @table: table the rule is tried to be inserted into + * @entryinfo: the family-specific rule data + * (struct ipt_ip, ip6t_ip, ebt_entry) + * @match: struct xt_match through which this function was invoked + * @matchinfo: per-match data + * @hook_mask: via which hooks the new rule is reachable + */ +struct xt_mtchk_param { + const char *table; + const void *entryinfo; + const struct xt_match *match; + void *matchinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Match destructor parameters */ +struct xt_mtdtor_param { + const struct xt_match *match; + void *matchinfo; + u_int8_t family; +}; + +/** + * struct xt_target_param - parameters for target extensions' target functions + * + * @hooknum: hook through which this target was invoked + * @target: struct xt_target through which this function was invoked + * @targinfo: per-target data + * + * Other fields see above. + */ +struct xt_target_param { + const struct net_device *in, *out; + unsigned int hooknum; + const struct xt_target *target; + const void *targinfo; + u_int8_t family; +}; + +/** + * struct xt_tgchk_param - parameters for target extensions' + * checkentry functions + * + * @entryinfo: the family-specific rule data + * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) + * + * Other fields see above. + */ +struct xt_tgchk_param { + const char *table; + void *entryinfo; + const struct xt_target *target; + void *targinfo; + unsigned int hook_mask; + u_int8_t family; +}; + +/* Target destructor parameters */ +struct xt_tgdtor_param { + const struct xt_target *target; + void *targinfo; + u_int8_t family; +}; + struct xt_match { struct list_head list; @@ -185,24 +277,13 @@ struct xt_match non-linear skb, using skb_header_pointer and skb_ip_make_writable. */ bool (*match)(const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct xt_match *match, - const void *matchinfo, - int offset, - unsigned int protoff, - bool *hotdrop); + const struct xt_match_param *); /* Called when user tries to insert an entry of this type. */ - /* Should return true or false. */ - bool (*checkentry)(const char *tablename, - const void *ip, - const struct xt_match *match, - void *matchinfo, - unsigned int hook_mask); + bool (*checkentry)(const struct xt_mtchk_param *); /* Called when entry of this type deleted. */ - void (*destroy)(const struct xt_match *match, void *matchinfo); + void (*destroy)(const struct xt_mtdtor_param *); /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, void *src); @@ -235,24 +316,16 @@ struct xt_target must now handle non-linear skbs, using skb_copy_bits and skb_ip_make_writable. */ unsigned int (*target)(struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - unsigned int hooknum, - const struct xt_target *target, - const void *targinfo); + const struct xt_target_param *); /* Called when user tries to insert an entry of this type: hook_mask is a bitmask of hooks from which it can be called. */ /* Should return true or false. */ - bool (*checkentry)(const char *tablename, - const void *entry, - const struct xt_target *target, - void *targinfo, - unsigned int hook_mask); + bool (*checkentry)(const struct xt_tgchk_param *); /* Called when entry of this type deleted. */ - void (*destroy)(const struct xt_target *target, void *targinfo); + void (*destroy)(const struct xt_tgdtor_param *); /* Called when userspace align differs from kernel space one */ void (*compat_from_user)(void *dst, void *src); @@ -292,7 +365,7 @@ struct xt_table /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; - int af; /* address/protocol family */ + u_int8_t af; /* address/protocol family */ }; #include <linux/netfilter_ipv4.h> @@ -328,12 +401,10 @@ extern void xt_unregister_match(struct xt_match *target); extern int xt_register_matches(struct xt_match *match, unsigned int n); extern void xt_unregister_matches(struct xt_match *match, unsigned int n); -extern int xt_check_match(const struct xt_match *match, unsigned short family, - unsigned int size, const char *table, unsigned int hook, - unsigned short proto, int inv_proto); -extern int xt_check_target(const struct xt_target *target, unsigned short family, - unsigned int size, const char *table, unsigned int hook, - unsigned short proto, int inv_proto); +extern int xt_check_match(struct xt_mtchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); +extern int xt_check_target(struct xt_tgchk_param *, + unsigned int size, u_int8_t proto, bool inv_proto); extern struct xt_table *xt_register_table(struct net *net, struct xt_table *table, @@ -346,19 +417,19 @@ extern struct xt_table_info *xt_replace_table(struct xt_table *table, struct xt_table_info *newinfo, int *error); -extern struct xt_match *xt_find_match(int af, const char *name, u8 revision); -extern struct xt_target *xt_find_target(int af, const char *name, u8 revision); -extern struct xt_target *xt_request_find_target(int af, const char *name, +extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); +extern struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision); -extern int xt_find_revision(int af, const char *name, u8 revision, int target, - int *err); +extern int xt_find_revision(u8 af, const char *name, u8 revision, + int target, int *err); -extern struct xt_table *xt_find_table_lock(struct net *net, int af, +extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); extern void xt_table_unlock(struct xt_table *t); -extern int xt_proto_init(struct net *net, int af); -extern void xt_proto_fini(struct net *net, int af); +extern int xt_proto_init(struct net *net, u_int8_t af); +extern void xt_proto_fini(struct net *net, u_int8_t af); extern struct xt_table_info *xt_alloc_table_info(unsigned int size); extern void xt_free_table_info(struct xt_table_info *info); @@ -423,12 +494,12 @@ struct compat_xt_counters_info #define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \ & ~(__alignof__(struct compat_xt_counters)-1)) -extern void xt_compat_lock(int af); -extern void xt_compat_unlock(int af); +extern void xt_compat_lock(u_int8_t af); +extern void xt_compat_unlock(u_int8_t af); -extern int xt_compat_add_offset(int af, unsigned int offset, short delta); -extern void xt_compat_flush_offsets(int af); -extern short xt_compat_calc_jump(int af, unsigned int offset); +extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta); +extern void xt_compat_flush_offsets(u_int8_t af); +extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset); extern int xt_compat_match_offset(const struct xt_match *match); extern int xt_compat_match_from_user(struct xt_entry_match *m, diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h new file mode 100644 index 00000000000..152e8f97132 --- /dev/null +++ b/include/linux/netfilter/xt_TPROXY.h @@ -0,0 +1,14 @@ +#ifndef _XT_TPROXY_H_target +#define _XT_TPROXY_H_target + +/* TPROXY target is capable of marking the packet to perform + * redirection. We can get rid of that whenever we get support for + * mutliple targets in the same rule. */ +struct xt_tproxy_target_info { + u_int32_t mark_mask; + u_int32_t mark_value; + __be32 laddr; + __be16 lport; +}; + +#endif /* _XT_TPROXY_H_target */ diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h new file mode 100644 index 00000000000..5cfeb81c679 --- /dev/null +++ b/include/linux/netfilter/xt_recent.h @@ -0,0 +1,26 @@ +#ifndef _LINUX_NETFILTER_XT_RECENT_H +#define _LINUX_NETFILTER_XT_RECENT_H 1 + +enum { + XT_RECENT_CHECK = 1 << 0, + XT_RECENT_SET = 1 << 1, + XT_RECENT_UPDATE = 1 << 2, + XT_RECENT_REMOVE = 1 << 3, + XT_RECENT_TTL = 1 << 4, + + XT_RECENT_SOURCE = 0, + XT_RECENT_DEST = 1, + + XT_RECENT_NAME_LEN = 200, +}; + +struct xt_recent_mtinfo { + u_int32_t seconds; + u_int32_t hit_count; + u_int8_t check_set; + u_int8_t invert; + char name[XT_RECENT_NAME_LEN]; + u_int8_t side; +}; + +#endif /* _LINUX_NETFILTER_XT_RECENT_H */ diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 892f5b7771c..d45e29cd1cf 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -31,6 +31,9 @@ * The 4 lsb are more than enough to store the verdict. */ #define EBT_VERDICT_BITS 0x0000000F +struct xt_match; +struct xt_target; + struct ebt_counter { uint64_t pcnt; @@ -121,7 +124,7 @@ struct ebt_entry_match { union { char name[EBT_FUNCTION_MAXNAMELEN]; - struct ebt_match *match; + struct xt_match *match; } u; /* size of data */ unsigned int match_size; @@ -132,7 +135,7 @@ struct ebt_entry_watcher { union { char name[EBT_FUNCTION_MAXNAMELEN]; - struct ebt_watcher *watcher; + struct xt_target *watcher; } u; /* size of data */ unsigned int watcher_size; @@ -143,7 +146,7 @@ struct ebt_entry_target { union { char name[EBT_FUNCTION_MAXNAMELEN]; - struct ebt_target *target; + struct xt_target *target; } u; /* size of data */ unsigned int target_size; @@ -207,14 +210,17 @@ struct ebt_match { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; - /* 0 == it matches */ - int (*match)(const struct sk_buff *skb, const struct net_device *in, - const struct net_device *out, const void *matchdata, - unsigned int datalen); - /* 0 == let it in */ - int (*check)(const char *tablename, unsigned int hookmask, - const struct ebt_entry *e, void *matchdata, unsigned int datalen); - void (*destroy)(void *matchdata, unsigned int datalen); + bool (*match)(const struct sk_buff *skb, const struct net_device *in, + const struct net_device *out, const struct xt_match *match, + const void *matchinfo, int offset, unsigned int protoff, + bool *hotdrop); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_match *match, void *matchinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_match *match, void *matchinfo); + unsigned int matchsize; + u_int8_t revision; + u_int8_t family; struct module *me; }; @@ -222,13 +228,17 @@ struct ebt_watcher { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; - void (*watcher)(const struct sk_buff *skb, unsigned int hooknr, - const struct net_device *in, const struct net_device *out, - const void *watcherdata, unsigned int datalen); - /* 0 == let it in */ - int (*check)(const char *tablename, unsigned int hookmask, - const struct ebt_entry *e, void *watcherdata, unsigned int datalen); - void (*destroy)(void *watcherdata, unsigned int datalen); + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; struct module *me; }; @@ -236,14 +246,18 @@ struct ebt_target { struct list_head list; const char name[EBT_FUNCTION_MAXNAMELEN]; - /* returns one of the standard verdicts */ - int (*target)(struct sk_buff *skb, unsigned int hooknr, - const struct net_device *in, const struct net_device *out, - const void *targetdata, unsigned int datalen); - /* 0 == let it in */ - int (*check)(const char *tablename, unsigned int hookmask, - const struct ebt_entry *e, void *targetdata, unsigned int datalen); - void (*destroy)(void *targetdata, unsigned int datalen); + /* returns one of the standard EBT_* verdicts */ + unsigned int (*target)(struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int hook_num, const struct xt_target *target, + const void *targinfo); + bool (*checkentry)(const char *table, const void *entry, + const struct xt_target *target, void *targinfo, + unsigned int hook_mask); + void (*destroy)(const struct xt_target *target, void *targinfo); + unsigned int targetsize; + u_int8_t revision; + u_int8_t family; struct module *me; }; @@ -288,12 +302,6 @@ struct ebt_table ~(__alignof__(struct ebt_replace)-1)) extern int ebt_register_table(struct ebt_table *table); extern void ebt_unregister_table(struct ebt_table *table); -extern int ebt_register_match(struct ebt_match *match); -extern void ebt_unregister_match(struct ebt_match *match); -extern int ebt_register_watcher(struct ebt_watcher *watcher); -extern void ebt_unregister_watcher(struct ebt_watcher *watcher); -extern int ebt_register_target(struct ebt_target *target); -extern void ebt_unregister_target(struct ebt_target *target); extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, struct ebt_table *table); @@ -302,9 +310,9 @@ extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, #define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg)) /* True if the hook mask denotes that the rule is in a base chain, * used in the check() functions */ -#define BASE_CHAIN (hookmask & (1 << NF_BR_NUMHOOKS)) +#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS)) /* Clear the bit in the hook mask that tells if the rule is on a base chain */ -#define CLEAR_BASE_CHAIN_BIT (hookmask &= ~(1 << NF_BR_NUMHOOKS)) +#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS)) /* True if the target is not a standard target */ #define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0) diff --git a/include/linux/netfilter_ipv4/ipt_recent.h b/include/linux/netfilter_ipv4/ipt_recent.h index 6508a459265..d636cca133c 100644 --- a/include/linux/netfilter_ipv4/ipt_recent.h +++ b/include/linux/netfilter_ipv4/ipt_recent.h @@ -1,27 +1,21 @@ #ifndef _IPT_RECENT_H #define _IPT_RECENT_H -#define RECENT_NAME "ipt_recent" -#define RECENT_VER "v0.3.1" +#include <linux/netfilter/xt_recent.h> -#define IPT_RECENT_CHECK 1 -#define IPT_RECENT_SET 2 -#define IPT_RECENT_UPDATE 4 -#define IPT_RECENT_REMOVE 8 -#define IPT_RECENT_TTL 16 +#define ipt_recent_info xt_recent_mtinfo -#define IPT_RECENT_SOURCE 0 -#define IPT_RECENT_DEST 1 +enum { + IPT_RECENT_CHECK = XT_RECENT_CHECK, + IPT_RECENT_SET = XT_RECENT_SET, + IPT_RECENT_UPDATE = XT_RECENT_UPDATE, + IPT_RECENT_REMOVE = XT_RECENT_REMOVE, + IPT_RECENT_TTL = XT_RECENT_TTL, -#define IPT_RECENT_NAME_LEN 200 + IPT_RECENT_SOURCE = XT_RECENT_SOURCE, + IPT_RECENT_DEST = XT_RECENT_DEST, -struct ipt_recent_info { - u_int32_t seconds; - u_int32_t hit_count; - u_int8_t check_set; - u_int8_t invert; - char name[IPT_RECENT_NAME_LEN]; - u_int8_t side; + IPT_RECENT_NAME_LEN = XT_RECENT_NAME_LEN, }; #endif /*_IPT_RECENT_H*/ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 29d26191873..78a5922a2f1 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -42,7 +42,6 @@ #include <linux/in.h> #include <linux/kref.h> #include <linux/mm.h> -#include <linux/namei.h> #include <linux/pagemap.h> #include <linux/rbtree.h> #include <linux/rwsem.h> @@ -332,7 +331,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); -extern int nfs_permission(struct inode *, int, struct nameidata *); +extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_release(struct inode *, struct file *); extern int nfs_attribute_timeout(struct inode *inode); diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 2be7c63bc0f..9bad65400fb 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -89,6 +89,22 @@ * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC * or, if no MAC address given, all mesh paths, on the interface identified * by %NL80211_ATTR_IFINDEX. + * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command + * after being queried by the kernel. CRDA replies by sending a regulatory + * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our + * current alpha2 if it found a match. It also provides + * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each + * regulatory rule is a nested set of attributes given by + * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and + * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by + * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and + * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP. + * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain + * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will + * store this as a valid request and then query userspace for it. * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use @@ -127,13 +143,23 @@ enum nl80211_commands { NL80211_CMD_NEW_MPATH, NL80211_CMD_DEL_MPATH, - /* add commands here */ + NL80211_CMD_SET_BSS, + + NL80211_CMD_SET_REG, + NL80211_CMD_REQ_SET_REG, + + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ __NL80211_CMD_AFTER_LAST, NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 }; +/* + * Allow user space programs to use #ifdef on new commands by defining them + * here + */ +#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS /** * enum nl80211_attrs - nl80211 netlink attributes @@ -188,10 +214,34 @@ enum nl80211_commands { * info given for %NL80211_CMD_GET_MPATH, nested attribute described at * &enum nl80211_mpath_info. * - * * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of * &enum nl80211_mntr_flags. * + * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the + * current regulatory domain should be set to or is already set to. + * For example, 'CR', for Costa Rica. This attribute is used by the kernel + * to query the CRDA to retrieve one regulatory domain. This attribute can + * also be used by userspace to query the kernel for the currently set + * regulatory domain. We chose an alpha2 as that is also used by the + * IEEE-802.11d country information element to identify a country. + * Users can also simply ask the wireless core to set regulatory domain + * to a specific alpha2. + * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory + * rules. + * + * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled + * (u8, 0 or 1) + * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled + * (u8, 0 or 1) + * + * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION) + * + * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all + * supported interface types, each a flag attribute with the number + * of the interface mode. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -235,16 +285,35 @@ enum nl80211_attrs { NL80211_ATTR_MPATH_NEXT_HOP, NL80211_ATTR_MPATH_INFO, + NL80211_ATTR_BSS_CTS_PROT, + NL80211_ATTR_BSS_SHORT_PREAMBLE, + NL80211_ATTR_BSS_SHORT_SLOT_TIME, + + NL80211_ATTR_HT_CAPABILITY, + + NL80211_ATTR_SUPPORTED_IFTYPES, + + NL80211_ATTR_REG_ALPHA2, + NL80211_ATTR_REG_RULES, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; +/* + * Allow user space programs to use #ifdef on new attributes by defining them + * here + */ +#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY + #define NL80211_MAX_SUPP_RATES 32 +#define NL80211_MAX_SUPP_REG_RULES 32 #define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 #define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 +#define NL80211_HT_CAPABILITY_LEN 26 /** * enum nl80211_iftype - (virtual) interface types @@ -436,6 +505,66 @@ enum nl80211_bitrate_attr { }; /** + * enum nl80211_reg_rule_attr - regulatory rule attributes + * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional + * considerations for a given frequency range. These are the + * &enum nl80211_reg_rule_flags. + * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory + * rule in KHz. This is not a center of frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule + * in KHz. This is not a center a frequency but an actual regulatory + * band edge. + * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this + * frequency range, in KHz. + * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain + * for a given frequency range. The value is in mBi (100 * dBi). + * If you don't have one then don't send this. + * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for + * a given frequency range. The value is in mBm (100 * dBm). + */ +enum nl80211_reg_rule_attr { + __NL80211_REG_RULE_ATTR_INVALID, + NL80211_ATTR_REG_RULE_FLAGS, + + NL80211_ATTR_FREQ_RANGE_START, + NL80211_ATTR_FREQ_RANGE_END, + NL80211_ATTR_FREQ_RANGE_MAX_BW, + + NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, + NL80211_ATTR_POWER_RULE_MAX_EIRP, + + /* keep last */ + __NL80211_REG_RULE_ATTR_AFTER_LAST, + NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_reg_rule_flags - regulatory rule flags + * + * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed + * @NL80211_RRF_NO_CCK: CCK modulation not allowed + * @NL80211_RRF_NO_INDOOR: indoor operation not allowed + * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed + * @NL80211_RRF_DFS: DFS support is required to be used + * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links + * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links + * @NL80211_RRF_PASSIVE_SCAN: passive scan is required + * @NL80211_RRF_NO_IBSS: no IBSS is allowed + */ +enum nl80211_reg_rule_flags { + NL80211_RRF_NO_OFDM = 1<<0, + NL80211_RRF_NO_CCK = 1<<1, + NL80211_RRF_NO_INDOOR = 1<<2, + NL80211_RRF_NO_OUTDOOR = 1<<3, + NL80211_RRF_DFS = 1<<4, + NL80211_RRF_PTP_ONLY = 1<<5, + NL80211_RRF_PTMP_ONLY = 1<<6, + NL80211_RRF_PASSIVE_SCAN = 1<<7, + NL80211_RRF_NO_IBSS = 1<<8, +}; + +/** * enum nl80211_mntr_flags - monitor configuration flags * * Monitor configuration flags. diff --git a/include/linux/notifier.h b/include/linux/notifier.h index da2698b0fdd..b86fa2ffca0 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret) #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, - * not handling interrupts, soon dead */ + * not handling interrupts, soon dead. + * Called on the dying cpu, interrupts + * are already disabled. Must not + * sleep, must not fail */ #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug * lock is dropped */ +#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. + * Called on the new cpu, just before + * enabling interrupts. Must not sleep, + * must not fail */ /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend * operation in progress @@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret) #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) +#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) /* Hibernation and suspend events */ #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ diff --git a/include/linux/of.h b/include/linux/of.h index 59a61bdc98b..79886ade070 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); +extern int of_modalias_node(struct device_node *node, char *modalias, int len); #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h new file mode 100644 index 00000000000..5f71ee8c086 --- /dev/null +++ b/include/linux/of_spi.h @@ -0,0 +1,18 @@ +/* + * OpenFirmware SPI support routines + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * Support routines for deriving SPI device attachments from the device + * tree. + */ + +#ifndef __LINUX_OF_SPI_H +#define __LINUX_OF_SPI_H + +#include <linux/of.h> +#include <linux/spi/spi.h> + +extern void of_register_spi_devices(struct spi_master *master, + struct device_node *np); + +#endif /* __LINUX_OF_SPI */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 54590a9a103..c74d3e87531 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ struct page; /* forward declaration */ -PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) @@ -239,9 +239,6 @@ static inline void __SetPageUptodate(struct page *page) { smp_wmb(); __set_bit(PG_uptodate, &(page)->flags); -#ifdef CONFIG_S390 - page_clear_dirty(page); -#endif } static inline void SetPageUptodate(struct page *page) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ee1ec2c7723..5da31c12101 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -12,6 +12,7 @@ #include <asm/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> +#include <linux/hardirq.h> /* for in_interrupt() */ /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -19,6 +20,7 @@ */ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ +#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ static inline void mapping_set_error(struct address_space *mapping, int error) { @@ -62,6 +64,121 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* + * speculatively take a reference to a page. + * If the page is free (_count == 0), then _count is untouched, and 0 + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * + * This function must be called inside the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree (or page table): + * this allows allocators to use a synchronize_rcu() to stabilize _count. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with, no matter what it is subsequently allocated + * for (because put_page is what is used here to drop an invalid speculative + * reference). + * + * This is the interesting part of the lockless pagecache (and lockless + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) + * has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. check the page is still in pagecache (if no, goto 1) + * + * Remove-side that cares about stability of _count (eg. reclaim) has the + * following (with tree_lock held for write): + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) + * B. remove page from pagecache + * C. free the page + * + * There are 2 critical interleavings that matter: + * - 2 runs before A: in this case, A sees elevated refcount and bails out + * - A runs before 2: in this case, 2 sees zero refcount and retries; + * subsequently, B will complete and 1 will find no page, causing the + * lookup to return NULL. + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using tree_lock could equally have run before or after + * such a re-insertion, depending on order that locks are granted. + * + * Lookups racing against pagecache insertion isn't a big problem: either 1 + * will find the page or it will not. Likewise, the old find_get_page could run + * either before the insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON(page_count(page) == 0); + atomic_inc(&page->_count); + +#else + if (unlikely(!get_page_unless_zero(page))) { + /* + * Either the page has been freed, or will be freed. + * In either case, retry here and the caller should + * do the right thing (see comments above). + */ + return 0; + } +#endif + VM_BUG_ON(PageTail(page)); + + return 1; +} + +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON(page_count(page) == 0); + atomic_add(count, &page->_count); + +#else + if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + return 0; +#endif + VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + + return 1; +} + +static inline int page_freeze_refs(struct page *page, int count) +{ + return likely(atomic_cmpxchg(&page->_count, count, 0) == count); +} + +static inline void page_unfreeze_refs(struct page *page, int count) +{ + VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON(count == 0); + + atomic_set(&page->_count, count); +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -133,13 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -extern void remove_from_page_cache(struct page *page); -extern void __remove_from_page_cache(struct page *page); - /* * Return byte-offset into filesystem object for page. */ @@ -161,13 +271,28 @@ extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); +static inline void set_page_locked(struct page *page) +{ + set_bit(PG_locked, &page->flags); +} + +static inline void clear_page_locked(struct page *page) +{ + clear_bit(PG_locked, &page->flags); +} + +static inline int trylock_page(struct page *page) +{ + return !test_and_set_bit(PG_locked, &page->flags); +} + /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page(page); } @@ -179,7 +304,7 @@ static inline void lock_page(struct page *page) static inline int lock_page_killable(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) return __lock_page_killable(page); return 0; } @@ -191,7 +316,7 @@ static inline int lock_page_killable(struct page *page) static inline void lock_page_nosync(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page_nosync(page); } @@ -276,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return ret; } +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void remove_from_page_cache(struct page *page); +extern void __remove_from_page_cache(struct page *page); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run set_page_locked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + set_page_locked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + clear_page_locked(page); + return error; +} + #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/parport.h b/include/linux/parport.h index dcb9e01a69c..6a0d7cdb577 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device); #endif /* !CONFIG_PARPORT_NOT_PC */ +extern unsigned long parport_default_timeslice; +extern int parport_default_spintime; + #endif /* __KERNEL__ */ #endif /* _PARPORT_H_ */ diff --git a/include/linux/parser.h b/include/linux/parser.h index cc554ca8bc7..7dcd0507575 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -14,7 +14,7 @@ struct match_token { const char *pattern; }; -typedef const struct match_token match_table_t[]; +typedef struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 3ba25065fa9..8837928fbf3 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -57,6 +57,15 @@ static inline acpi_status pcie_osc_support_set(u32 flags) { return __pci_osc_support_set(flags, PCI_EXPRESS_ROOT_HID_STRING); } +static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) +{ + /* Find root host bridge */ + while (pdev->bus->self) + pdev = pdev->bus->self; + + return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), + pdev->bus->number); +} #else #if !defined(AE_ERROR) typedef u32 acpi_status; @@ -66,6 +75,8 @@ static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) {return AE_ERROR;} static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;} static inline acpi_status pcie_osc_support_set(u32 flags) {return AE_ERROR;} +static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) +{ return NULL; } #endif #endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index a1a1e618e99..91ba0b338b4 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev); extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); extern void pci_disable_link_state(struct pci_dev *pdev, int state); +extern void pcie_no_aspm(void); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { @@ -40,6 +41,10 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } + +static inline void pcie_no_aspm(void) +{ +} #endif #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 1d296d31abe..98dc6243a70 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -124,6 +124,8 @@ enum pci_dev_flags { * generation too. */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, + /* Device configuration is irrevocably lost if disabled into D3 */ + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, }; typedef unsigned short __bitwise pci_bus_flags_t; @@ -532,7 +534,7 @@ extern void pci_sort_breadthfirst(void); #ifdef CONFIG_PCI_LEGACY struct pci_dev __deprecated *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from); + struct pci_dev *from); struct pci_dev __deprecated *pci_find_slot(unsigned int bus, unsigned int devfn); #endif /* CONFIG_PCI_LEGACY */ @@ -548,7 +550,7 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from); + struct pci_dev *from); struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); @@ -639,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); @@ -678,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus); /* Proper probing supporting hot-pluggable devices */ int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -static inline int __must_check pci_register_driver(struct pci_driver *driver) -{ - return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); -} + +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) void pci_unregister_driver(struct pci_driver *dev); void pci_remove_behind_bridge(struct pci_dev *dev); @@ -811,7 +816,7 @@ _PCI_NOP_ALL(write,) static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from) + struct pci_dev *from) { return NULL; } @@ -833,7 +838,7 @@ static inline struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from) + struct pci_dev *from) { return NULL; } diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 917d48e5ea0..f63b5455801 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -502,6 +502,11 @@ #define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202 #define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203 #define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204 +#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300 +#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301 +#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 +#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 +#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_SCSI 0x2020 @@ -753,6 +758,7 @@ #define PCI_VENDOR_ID_TI 0x104c #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023 #define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 #define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 #define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 @@ -1415,6 +1421,8 @@ #define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 #define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 +#define PCI_VENDOR_ID_CISCO 0x1137 + #define PCI_VENDOR_ID_ZIATECH 0x1138 #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 @@ -1837,7 +1845,13 @@ #define PCI_DEVICE_ID_MOXA_C320 0x3200 #define PCI_VENDOR_ID_CCD 0x1397 +#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 +#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 +#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 #define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 +#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 +#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 +#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 #define PCI_DEVICE_ID_CCD_B000 0xb000 #define PCI_DEVICE_ID_CCD_B006 0xb006 #define PCI_DEVICE_ID_CCD_B007 0xb007 @@ -1847,8 +1861,32 @@ #define PCI_DEVICE_ID_CCD_B00B 0xb00b #define PCI_DEVICE_ID_CCD_B00C 0xb00c #define PCI_DEVICE_ID_CCD_B100 0xb100 +#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 +#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 +#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 +#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 +#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 +#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 +#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 +#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 +#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 +#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 +#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 +#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 +#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 +#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 +#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A +#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B +#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 +#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 #define PCI_DEVICE_ID_CCD_B700 0xb700 #define PCI_DEVICE_ID_CCD_B701 0xb701 +#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 +#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 +#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 +#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 #define PCI_VENDOR_ID_EXAR 0x13a8 #define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 @@ -2151,8 +2189,6 @@ #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 -#define PCI_VENDOR_ID_RDC 0x17f3 - #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 @@ -2189,6 +2225,7 @@ #define PCI_VENDOR_ID_ATTANSIC 0x1969 #define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 +#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048 #define PCI_VENDOR_ID_JMICRON 0x197B #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 @@ -2220,6 +2257,16 @@ #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 +#define PCI_VENDOR_ID_NETXEN 0x4040 +#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 +#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 +#define PCI_DEVICE_ID_NX2031_4GCU 0x0003 +#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 +#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 +#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 +#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 +#define PCI_DEVICE_ID_NX3031 0x0100 + #define PCI_VENDOR_ID_AKS 0x416c #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 @@ -2404,6 +2451,9 @@ #define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 +#define PCI_DEVICE_ID_INTEL_PCH_0 0x3b10 +#define PCI_DEVICE_ID_INTEL_PCH_1 0x3b11 +#define PCI_DEVICE_ID_INTEL_PCH_2 0x3b30 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 @@ -2528,6 +2578,9 @@ #define PCI_VENDOR_ID_3COM_2 0xa727 +#define PCI_VENDOR_ID_DIGIUM 0xd161 +#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 + #define PCI_SUBVENDOR_ID_EXSYS 0xd84d #define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 #define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 19958b92990..450684f7eaa 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -374,6 +374,7 @@ #define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ #define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ #define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ +#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ #define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ #define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ #define PCI_EXP_DEVCTL 8 /* Device Control */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4cdd393e71e..9f2a3751873 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -23,12 +23,19 @@ __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ ____cacheline_aligned_in_smp + +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.page_aligned"))) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name #else #define DEFINE_PER_CPU(type, name) \ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ DEFINE_PER_CPU(type, name) + +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) #endif #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) @@ -74,11 +81,6 @@ struct percpu_data { (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); -extern void percpu_depopulate(void *__pdata, int cpu); -extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask); -extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); extern void percpu_free(void *__pdata); @@ -86,26 +88,6 @@ extern void percpu_free(void *__pdata); #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) -static inline void percpu_depopulate(void *__pdata, int cpu) -{ -} - -static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) -{ -} - -static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, - int cpu) -{ - return percpu_ptr(__pdata, cpu); -} - -static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask) -{ - return 0; -} - static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) { return kzalloc(size, gfp); @@ -118,10 +100,6 @@ static inline void percpu_free(void *__pdata) #endif /* CONFIG_SMP */ -#define percpu_populate_mask(__pdata, size, gfp, mask) \ - __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) -#define percpu_depopulate_mask(__pdata, mask) \ - __percpu_depopulate_mask((__pdata), &(mask)) #define percpu_alloc_mask(size, gfp, mask) \ __percpu_alloc_mask((size), (gfp), &(mask)) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 20838883535..9007ccdfc11 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); +s64 __percpu_counter_sum(struct percpu_counter *fbc); static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -44,19 +44,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { - s64 ret = __percpu_counter_sum(fbc, 0); + s64 ret = __percpu_counter_sum(fbc); return ret < 0 ? 0 : ret; } -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) -{ - return __percpu_counter_sum(fbc, 1); -} - - static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { - return __percpu_counter_sum(fbc, 0); + return __percpu_counter_sum(fbc); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h index 700725ddcaa..01b262959f2 100644 --- a/include/linux/pfkeyv2.h +++ b/include/linux/pfkeyv2.h @@ -226,6 +226,15 @@ struct sadb_x_sec_ctx { } __attribute__((packed)); /* sizeof(struct sadb_sec_ctx) = 8 */ +/* Used by MIGRATE to pass addresses IKE will use to perform + * negotiation with the peer */ +struct sadb_x_kmaddress { + uint16_t sadb_x_kmaddress_len; + uint16_t sadb_x_kmaddress_exttype; + uint32_t sadb_x_kmaddress_reserved; +} __attribute__((packed)); +/* sizeof(struct sadb_x_kmaddress) == 8 */ + /* Message types */ #define SADB_RESERVED 0 #define SADB_GETSPI 1 @@ -346,7 +355,9 @@ struct sadb_x_sec_ctx { #define SADB_X_EXT_NAT_T_DPORT 22 #define SADB_X_EXT_NAT_T_OA 23 #define SADB_X_EXT_SEC_CTX 24 -#define SADB_EXT_MAX 24 +/* Used with MIGRATE to pass @ to IKE for negotiation */ +#define SADB_X_EXT_KMADDRESS 25 +#define SADB_EXT_MAX 25 /* Identity Extension values */ #define SADB_IDENTTYPE_RESERVED 0 diff --git a/include/linux/phonet.h b/include/linux/phonet.h new file mode 100644 index 00000000000..c9609f9aeda --- /dev/null +++ b/include/linux/phonet.h @@ -0,0 +1,170 @@ +/** + * file phonet.h + * + * Phonet sockets kernel interface + * + * Copyright (C) 2008 Nokia Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef LINUX_PHONET_H +#define LINUX_PHONET_H + +/* Automatic protocol selection */ +#define PN_PROTO_TRANSPORT 0 +/* Phonet datagram socket */ +#define PN_PROTO_PHONET 1 +/* Phonet pipe */ +#define PN_PROTO_PIPE 2 +#define PHONET_NPROTO 3 + +/* Socket options for SOL_PNPIPE level */ +#define PNPIPE_ENCAP 1 +#define PNPIPE_IFINDEX 2 + +#define PNADDR_ANY 0 +#define PNPORT_RESOURCE_ROUTING 0 + +/* Values for PNPIPE_ENCAP option */ +#define PNPIPE_ENCAP_NONE 0 +#define PNPIPE_ENCAP_IP 1 + +/* ioctls */ +#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) + +/* Phonet protocol header */ +struct phonethdr { + __u8 pn_rdev; + __u8 pn_sdev; + __u8 pn_res; + __be16 pn_length; + __u8 pn_robj; + __u8 pn_sobj; +} __attribute__((packed)); + +/* Common Phonet payload header */ +struct phonetmsg { + __u8 pn_trans_id; /* transaction ID */ + __u8 pn_msg_id; /* message type */ + union { + struct { + __u8 pn_submsg_id; /* message subtype */ + __u8 pn_data[5]; + } base; + struct { + __u16 pn_e_res_id; /* extended resource ID */ + __u8 pn_e_submsg_id; /* message subtype */ + __u8 pn_e_data[3]; + } ext; + } pn_msg_u; +}; +#define PN_COMMON_MESSAGE 0xF0 +#define PN_PREFIX 0xE0 /* resource for extended messages */ +#define pn_submsg_id pn_msg_u.base.pn_submsg_id +#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id +#define pn_e_res_id pn_msg_u.ext.pn_e_res_id +#define pn_data pn_msg_u.base.pn_data +#define pn_e_data pn_msg_u.ext.pn_e_data + +/* data for unreachable errors */ +#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01 +#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14 +#define pn_orig_msg_id pn_data[0] +#define pn_status pn_data[1] +#define pn_e_orig_msg_id pn_e_data[0] +#define pn_e_status pn_e_data[1] + +/* Phonet socket address structure */ +struct sockaddr_pn { + sa_family_t spn_family; + __u8 spn_obj; + __u8 spn_dev; + __u8 spn_resource; + __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; +} __attribute__ ((packed)); + +static inline __u16 pn_object(__u8 addr, __u16 port) +{ + return (addr << 8) | (port & 0x3ff); +} + +static inline __u8 pn_obj(__u16 handle) +{ + return handle & 0xff; +} + +static inline __u8 pn_dev(__u16 handle) +{ + return handle >> 8; +} + +static inline __u16 pn_port(__u16 handle) +{ + return handle & 0x3ff; +} + +static inline __u8 pn_addr(__u16 handle) +{ + return (handle >> 8) & 0xfc; +} + +static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr) +{ + spn->spn_dev &= 0x03; + spn->spn_dev |= addr & 0xfc; +} + +static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port) +{ + spn->spn_dev &= 0xfc; + spn->spn_dev |= (port >> 8) & 0x03; + spn->spn_obj = port & 0xff; +} + +static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn, + __u16 handle) +{ + spn->spn_dev = pn_dev(handle); + spn->spn_obj = pn_obj(handle); +} + +static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn, + __u8 resource) +{ + spn->spn_resource = resource; +} + +static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn) +{ + return spn->spn_dev & 0xfc; +} + +static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn) +{ + return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj; +} + +static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn) +{ + return pn_object(spn->spn_dev, spn->spn_obj); +} + +static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn) +{ + return spn->spn_resource; +} + +#endif diff --git a/include/linux/phy.h b/include/linux/phy.h index 7224c4099a2..77c4ed60b98 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -99,7 +99,14 @@ struct mii_bus { */ struct mutex mdio_lock; - struct device *dev; + struct device *parent; + enum { + MDIOBUS_ALLOCATED = 1, + MDIOBUS_REGISTERED, + MDIOBUS_UNREGISTERED, + MDIOBUS_RELEASED, + } state; + struct device dev; /* list of all PHYs on bus */ struct phy_device *phy_map[PHY_MAX_ADDR]; @@ -113,6 +120,16 @@ struct mii_bus { */ int *irq; }; +#define to_mii_bus(d) container_of(d, struct mii_bus, dev) + +struct mii_bus *mdiobus_alloc(void); +int mdiobus_register(struct mii_bus *bus); +void mdiobus_unregister(struct mii_bus *bus); +void mdiobus_free(struct mii_bus *bus); +struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); +int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum); +int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val); + #define PHY_INTERRUPT_DISABLED 0x0 #define PHY_INTERRUPT_ENABLED 0x80000000 @@ -391,8 +408,35 @@ struct phy_fixup { int (*run)(struct phy_device *phydev); }; -int phy_read(struct phy_device *phydev, u16 regnum); -int phy_write(struct phy_device *phydev, u16 regnum, u16 val); +/** + * phy_read - Convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_read(struct phy_device *phydev, u16 regnum) +{ + return mdiobus_read(phydev->bus, phydev->addr, regnum); +} + +/** + * phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val) +{ + return mdiobus_write(phydev->bus, phydev->addr, regnum, val); +} + int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); struct phy_device* get_phy_device(struct mii_bus *bus, int addr); int phy_clear_interrupt(struct phy_device *phydev); @@ -408,8 +452,6 @@ void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); -int mdiobus_register(struct mii_bus *bus); -void mdiobus_unregister(struct mii_bus *bus); void phy_sanitize_settings(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_enable_interrupts(struct phy_device *phydev); diff --git a/include/linux/pid.h b/include/linux/pid.h index 22921ac4cfd..d7e98ff8021 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -161,4 +161,13 @@ pid_t pid_vnr(struct pid *pid); } \ } while (0) +#define do_each_pid_thread(pid, type, task) \ + do_each_pid_task(pid, type, task) { \ + struct task_struct *tg___ = task; \ + do { + +#define while_each_pid_thread(pid, type, task) \ + } while_each_thread(tg___, task); \ + task = tg___; \ + } while_each_pid_task(pid, type, task) #endif /* _LINUX_PID_H */ diff --git a/include/linux/pim.h b/include/linux/pim.h index 236ffd31739..1ba0661561a 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -3,22 +3,6 @@ #include <asm/byteorder.h> -#ifndef __KERNEL__ -struct pim { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 pim_type:4, /* PIM message type */ - pim_ver:4; /* PIM version */ -#elif defined(__BIG_ENDIAN_BITFIELD) - __u8 pim_ver:4; /* PIM version */ - pim_type:4; /* PIM message type */ -#endif - __u8 pim_rsv; /* Reserved */ - __be16 pim_cksum; /* Checksum */ -}; - -#define PIM_MINLEN 8 -#endif - /* Message types - V1 */ #define PIM_V1_VERSION __constant_htonl(0x10000000) #define PIM_V1_REGISTER 1 @@ -27,7 +11,6 @@ struct pim { #define PIM_VERSION 2 #define PIM_REGISTER 1 -#if defined(__KERNEL__) #define PIM_NULL_REGISTER __constant_htonl(0x40000000) /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ @@ -42,4 +25,3 @@ struct pimreghdr struct sk_buff; extern int pim_rcv_v1(struct sk_buff *); #endif -#endif diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index e5de421ac7b..5d921fa91a5 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -123,6 +123,13 @@ struct tc_prio_qopt __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ }; +/* MULTIQ section */ + +struct tc_multiq_qopt { + __u16 bands; /* Number of bands */ + __u16 max_bands; /* Maximum number of queues */ +}; + /* TBF section */ struct tc_tbf_qopt diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 2e4e97bd19f..d74f75ed1e4 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -1,6 +1,6 @@ /* interface for the pm_qos_power infrastructure of the linux kernel. * - * Mark Gross + * Mark Gross <mgross@linux.intel.com> */ #include <linux/list.h> #include <linux/notifier.h> diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 1ce54b63085..be764e514e3 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -21,7 +21,14 @@ struct pnp_dev; /* * Resource Management */ +#ifdef CONFIG_PNP struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int); +#else +static inline struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num) +{ + return NULL; +} +#endif static inline int pnp_resource_valid(struct resource *res) { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 68ed19ccf1f..ea96ead1d39 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -78,6 +78,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index f560d1705af..fb61850d1cf 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -282,11 +282,16 @@ union proc_op { struct task_struct *task); }; +struct ctl_table_header; +struct ctl_table; + struct proc_inode { struct pid *pid; int fd; union proc_op op; struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + struct ctl_table *sysctl_entry; struct inode vfs_inode; }; diff --git a/include/linux/proportions.h b/include/linux/proportions.h index 5afc1b23346..cf793bbbd05 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h @@ -104,8 +104,8 @@ struct prop_local_single { * snapshot of the last seen global state * and a lock protecting this state */ - int shift; unsigned long period; + int shift; spinlock_t lock; /* protect the snapshot state */ }; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index c6f5f9dd0ce..ea7416c901d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child) int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); +/** + * task_ptrace - return %PT_* flags that apply to a task + * @task: pointer to &task_struct in question + * + * Returns the %PT_* flags that apply to @task. + */ +static inline int task_ptrace(struct task_struct *task) +{ + return task->ptrace; +} + +/** + * ptrace_event - possibly stop for a ptrace event notification + * @mask: %PT_* bit to check in @current->ptrace + * @event: %PTRACE_EVENT_* value to report if @mask is set + * @message: value for %PTRACE_GETEVENTMSG to return + * + * This checks the @mask bit to see if ptrace wants stops for this event. + * If so we stop, reporting @event and @message to the ptrace parent. + * + * Returns nonzero if we did a ptrace notification, zero if not. + * + * Called without locks. + */ +static inline int ptrace_event(int mask, int event, unsigned long message) +{ + if (mask && likely(!(current->ptrace & mask))) + return 0; + current->ptrace_message = message; + ptrace_notify((event << 8) | SIGTRAP); + return 1; +} + +/** + * ptrace_init_task - initialize ptrace state for a new child + * @child: new child task + * @ptrace: true if child should be ptrace'd by parent's tracer + * + * This is called immediately after adding @child to its parent's children + * list. @ptrace is false in the normal case, and true to ptrace @child. + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void ptrace_init_task(struct task_struct *child, bool ptrace) +{ + INIT_LIST_HEAD(&child->ptrace_entry); + INIT_LIST_HEAD(&child->ptraced); + child->parent = child->real_parent; + child->ptrace = 0; + if (unlikely(ptrace)) { + child->ptrace = current->ptrace; + ptrace_link(child, current->parent); + } +} + +/** + * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped + * @task: task in %EXIT_DEAD state + * + * Called with write_lock(&tasklist_lock) held. + */ +static inline void ptrace_release_task(struct task_struct *task) +{ + BUG_ON(!list_empty(&task->ptraced)); + ptrace_unlink(task); + BUG_ON(!list_empty(&task->ptrace_entry)); +} + #ifndef force_successful_syscall_return /* * System call handlers that, upon successful completion, need to return a @@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task) #define arch_ptrace_stop(code, info) do { } while (0) #endif +extern int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc); + #endif #endif diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h index 39b66713a0b..bd466439c58 100644 --- a/include/linux/quicklist.h +++ b/include/linux/quicklist.h @@ -80,6 +80,13 @@ void quicklist_trim(int nr, void (*dtor)(void *), unsigned long quicklist_total_size(void); +#else + +static inline unsigned long quicklist_total_size(void) +{ + return 0; +} + #endif #endif /* LINUX_QUICKLIST_H */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 742187f7a05..ca6b9b5c8d5 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -43,6 +43,8 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path, int remount); +int vfs_quota_on_path(struct super_block *sb, int type, int format_id, + struct path *path); int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int vfs_quota_off(struct super_block *sb, int type, int remount); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index b8ce2b444bb..a916c6660df 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -99,12 +99,15 @@ do { \ * * The notable exceptions to this rule are the following functions: * radix_tree_lookup + * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag + * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 4 functions are able to be called locklessly, using RCU. The + * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items); unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); int radix_tree_preload(gfp_t gfp_mask); @@ -173,6 +179,10 @@ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 9f2549ac0e2..c200b9a34af 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -128,6 +128,7 @@ struct mddev_s #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ +#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */ int ro; diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index b160fb18e8d..37aaf2b3986 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); #ifndef CONFIG_MMU +extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcf..5f89b62e698 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -40,12 +40,21 @@ #include <linux/cpumask.h> #include <linux/seqlock.h> +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ /* Global control variables for rcupdate callback mechanism. */ struct rcu_ctrlblk { long cur; /* Current batch number. */ long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ + long pending; /* Number of the last pending batch */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR + unsigned long gp_start; /* Time at which GP started in jiffies. */ + unsigned long jiffies_stall; + /* Time at which to check for CPU stalls. */ +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ int signaled; @@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b) return (a - b) > 0; } -/* - * Per-CPU data for Read-Copy UPdate. - * nxtlist - new callbacks are added here - * curlist - current batch for which quiescent cycle started if any - */ +/* Per-CPU data for Read-Copy UPdate. */ struct rcu_data { /* 1) quiescent state handling : */ long quiescbatch; /* Batch # for grace period */ @@ -78,12 +83,24 @@ struct rcu_data { int qs_pending; /* core waits for quiesc state */ /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ + /* + * if nxtlist is not NULL, then: + * batch: + * The batch # for the last entry of nxtlist + * [*nxttail[1], NULL = *nxttail[2]): + * Entries that batch # <= batch + * [*nxttail[0], *nxttail[1]): + * Entries that batch # <= batch - 1 + * [nxtlist, *nxttail[0]): + * Entries that batch # <= batch - 2 + * The grace period for these entries has completed, and + * the other grace-period-completed entries may be moved + * here temporarily in rcu_process_callbacks(). + */ + long batch; struct rcu_head *nxtlist; - struct rcu_head **nxttail; + struct rcu_head **nxttail[3]; long qlen; /* # of queued callbacks */ - struct rcu_head *curlist; - struct rcu_head **curtail; struct rcu_head *donelist; struct rcu_head **donetail; long blimit; /* Upper limit on a processed batch */ @@ -117,7 +134,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/include/linux/rculist.h b/include/linux/rculist.h index b0f39be08b6..e649bd3f2c9 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -98,6 +98,34 @@ static inline void list_del_rcu(struct list_head *entry) } /** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_add_head_rcu() or + * hlist_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_for_each_entry_rcu(). + */ +static inline void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + n->pprev = NULL; + } +} + +/** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert @@ -170,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list, at->prev = last; } -/** - * list_for_each_rcu - iterate over an rcu-protected list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * This list-traversal primitive may safely run concurrently with - * the _rcu list-mutation primitives such as list_add_rcu() - * as long as the traversal is guarded by rcu_read_lock(). - */ -#define list_for_each_rcu(pos, head) \ - for (pos = rcu_dereference((head)->next); \ - prefetch(pos->next), pos != (head); \ - pos = rcu_dereference(pos->next)) - #define __list_for_each_rcu(pos, head) \ for (pos = rcu_dereference((head)->next); \ pos != (head); \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e8b4039cfb2..86f1f5e43e3 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -133,6 +133,26 @@ struct rcu_head { #define rcu_read_unlock_bh() __rcu_read_unlock_bh() /** + * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section + * + * Should be used with either + * - synchronize_sched() + * or + * - call_rcu_sched() and rcu_barrier_sched() + * on the write-side to insure proper synchronization. + */ +#define rcu_read_lock_sched() preempt_disable() + +/* + * rcu_read_unlock_sched - marks the end of a RCU-classic critical section + * + * See rcu_read_lock_sched for more information. + */ +#define rcu_read_unlock_sched() preempt_enable() + + + +/** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later * be safely dereferenced. diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 0967f03b070..3e05c09b54a 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu) rdssp->sched_qs++; } #define rcu_bh_qsctr_inc(cpu) -#define call_rcu_bh(head, rcu) call_rcu(head, rcu) + +/* + * Someone might want to pass call_rcu_bh as a function pointer. + * So this needs to just be a rename and not a macro function. + * (no parentheses) + */ +#define call_rcu_bh call_rcu /** * call_rcu_sched - Queue RCU callback for invocation after sched grace period. @@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); struct softirq_action; #ifdef CONFIG_NO_HZ -DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); static inline void rcu_enter_nohz(void) { @@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void) { static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ __get_cpu_var(rcu_dyntick_sched).dynticks++; + smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), &rs); } diff --git a/include/linux/reboot.h b/include/linux/reboot.h index b93b541cf11..988e55fe649 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -59,6 +59,7 @@ extern void machine_crash_shutdown(struct pt_regs *); * Architecture independent implemenations of sys_reboot commands. */ +extern void kernel_restart_prepare(char *cmd); extern void kernel_restart(char *cmd); extern void kernel_halt(void); extern void kernel_power_off(void); diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h new file mode 100644 index 00000000000..e84b0a9feda --- /dev/null +++ b/include/linux/regulator/bq24022.h @@ -0,0 +1,21 @@ +/* + * Support for TI bq24022 (bqTINY-II) Dual Input (USB/AC Adpater) + * 1-Cell Li-Ion Charger connected via GPIOs. + * + * Copyright (c) 2008 Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * bq24022_mach_info - platform data for bq24022 + * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging + * @gpio_iset2: GPIO line connected to the ISET2 pin, used to limit charging current to 100 mA / 500 mA + */ +struct bq24022_mach_info { + int gpio_nce; + int gpio_iset2; +}; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h new file mode 100644 index 00000000000..afdc4558bb9 --- /dev/null +++ b/include/linux/regulator/consumer.h @@ -0,0 +1,284 @@ +/* + * consumer.h -- SoC Regulator consumer support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Consumer Interface. + * + * A Power Management Regulator framework for SoC based devices. + * Features:- + * o Voltage and current level control. + * o Operating mode control. + * o Regulator status. + * o sysfs entries for showing client devices and status + * + * EXPERIMENTAL FEATURES: + * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators + * to use most efficient operating mode depending upon voltage and load and + * is transparent to client drivers. + * + * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during + * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when + * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA + * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% + * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate + * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * + */ + +#ifndef __LINUX_REGULATOR_CONSUMER_H_ +#define __LINUX_REGULATOR_CONSUMER_H_ + +/* + * Regulator operating modes. + * + * Regulators can run in a variety of different operating modes depending on + * output load. This allows further system power savings by selecting the + * best (and most efficient) regulator mode for a desired load. + * + * Most drivers will only care about NORMAL. The modes below are generic and + * will probably not match the naming convention of your regulator data sheet + * but should match the use cases in the datasheet. + * + * In order of power efficiency (least efficient at top). + * + * Mode Description + * FAST Regulator can handle fast changes in it's load. + * e.g. useful in CPU voltage & frequency scaling where + * load can quickly increase with CPU frequency increases. + * + * NORMAL Normal regulator power supply mode. Most drivers will + * use this mode. + * + * IDLE Regulator runs in a more efficient mode for light + * loads. Can be used for devices that have a low power + * requirement during periods of inactivity. This mode + * may be more noisy than NORMAL and may not be able + * to handle fast load switching. + * + * STANDBY Regulator runs in the most efficient mode for very + * light loads. Can be used by devices when they are + * in a sleep/standby state. This mode is likely to be + * the most noisy and may not be able to handle fast load + * switching. + * + * NOTE: Most regulators will only support a subset of these modes. Some + * will only just support NORMAL. + * + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + +#define REGULATOR_MODE_FAST 0x1 +#define REGULATOR_MODE_NORMAL 0x2 +#define REGULATOR_MODE_IDLE 0x4 +#define REGULATOR_MODE_STANDBY 0x8 + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator shut down by software. + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 + +struct regulator; + +/** + * struct regulator_bulk_data - Data used for bulk regulator operations. + * + * @supply The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer The regulator consumer for the supply. This will be managed + * by the bulk API. + * + * The regulator APIs provide a series of regulator_bulk_() API calls as + * a convenience to consumers which require multiple supplies. This + * structure is used to manage data for these calls. + */ +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; +}; + +#if defined(CONFIG_REGULATOR) + +/* regulator get and put */ +struct regulator *__must_check regulator_get(struct device *dev, + const char *id); +void regulator_put(struct regulator *regulator); + +/* regulator output control and status */ +int regulator_enable(struct regulator *regulator); +int regulator_disable(struct regulator *regulator); +int regulator_force_disable(struct regulator *regulator); +int regulator_is_enabled(struct regulator *regulator); + +int regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers); +void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers); + +int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); +int regulator_get_voltage(struct regulator *regulator); +int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA); +int regulator_get_current_limit(struct regulator *regulator); + +int regulator_set_mode(struct regulator *regulator, unsigned int mode); +unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); + +/* regulator notifier block */ +int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); + +/* driver data - core doesn't touch */ +void *regulator_get_drvdata(struct regulator *regulator); +void regulator_set_drvdata(struct regulator *regulator, void *data); + +#else + +/* + * Make sure client drivers will still build on systems with no software + * controllable voltage or current regulators. + */ +static inline struct regulator *__must_check regulator_get(struct device *dev, + const char *id) +{ + /* Nothing except the stubbed out regulator API should be + * looking at the value except to check if it is an error + * value so the actual return value doesn't matter. + */ + return (struct regulator *)id; +} +static inline void regulator_put(struct regulator *regulator) +{ +} + +static inline int regulator_enable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_is_enabled(struct regulator *regulator) +{ + return 1; +} + +static inline int regulator_bulk_get(struct device *dev, + int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers) +{ +} + +static inline int regulator_set_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_get_voltage(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA) +{ + return 0; +} + +static inline int regulator_get_current_limit(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_mode(struct regulator *regulator, + unsigned int mode) +{ + return 0; +} + +static inline unsigned int regulator_get_mode(struct regulator *regulator) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_set_optimum_mode(struct regulator *regulator, + int load_uA) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline void *regulator_get_drvdata(struct regulator *regulator) +{ + return NULL; +} + +static inline void regulator_set_drvdata(struct regulator *regulator, + void *data) +{ +} + +#endif + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h new file mode 100644 index 00000000000..1d712c7172a --- /dev/null +++ b/include/linux/regulator/driver.h @@ -0,0 +1,99 @@ +/* + * driver.h -- SoC Regulator driver support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Driver Interface. + */ + +#ifndef __LINUX_REGULATOR_DRIVER_H_ +#define __LINUX_REGULATOR_DRIVER_H_ + +#include <linux/device.h> +#include <linux/regulator/consumer.h> + +struct regulator_constraints; +struct regulator_dev; + +/** + * struct regulator_ops - regulator operations. + * + * This struct describes regulator operations. + */ +struct regulator_ops { + + /* get/set regulator voltage */ + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); + int (*get_voltage) (struct regulator_dev *); + + /* get/set regulator current */ + int (*set_current_limit) (struct regulator_dev *, + int min_uA, int max_uA); + int (*get_current_limit) (struct regulator_dev *); + + /* enable/disable regulator */ + int (*enable) (struct regulator_dev *); + int (*disable) (struct regulator_dev *); + int (*is_enabled) (struct regulator_dev *); + + /* get/set regulator operating mode (defined in regulator.h) */ + int (*set_mode) (struct regulator_dev *, unsigned int mode); + unsigned int (*get_mode) (struct regulator_dev *); + + /* get most efficient regulator operating mode for load */ + unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, + int output_uV, int load_uA); + + /* the operations below are for configuration of regulator state when + * it's parent PMIC enters a global STANBY/HIBERNATE state */ + + /* set regulator suspend voltage */ + int (*set_suspend_voltage) (struct regulator_dev *, int uV); + + /* enable/disable regulator in suspend state */ + int (*set_suspend_enable) (struct regulator_dev *); + int (*set_suspend_disable) (struct regulator_dev *); + + /* set regulator suspend operating mode (defined in regulator.h) */ + int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); +}; + +/* + * Regulators can either control voltage or current. + */ +enum regulator_type { + REGULATOR_VOLTAGE, + REGULATOR_CURRENT, +}; + +/** + * struct regulator_desc - Regulator descriptor + * + */ +struct regulator_desc { + const char *name; + int id; + struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; +}; + + +struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, + void *reg_data); +void regulator_unregister(struct regulator_dev *rdev); + +int regulator_notifier_call_chain(struct regulator_dev *rdev, + unsigned long event, void *data); + +void *rdev_get_drvdata(struct regulator_dev *rdev); +int rdev_get_id(struct regulator_dev *rdev); + +#endif diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h new file mode 100644 index 00000000000..1387a5d2190 --- /dev/null +++ b/include/linux/regulator/fixed.h @@ -0,0 +1,22 @@ +/* + * fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_FIXED_H +#define __REGULATOR_FIXED_H + +struct fixed_voltage_config { + const char *supply_name; + int microvolts; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h new file mode 100644 index 00000000000..11e737dbfcf --- /dev/null +++ b/include/linux/regulator/machine.h @@ -0,0 +1,104 @@ +/* + * machine.h -- SoC Regulator support, machine/board driver API. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Machine/Board Interface. + */ + +#ifndef __LINUX_REGULATOR_MACHINE_H_ +#define __LINUX_REGULATOR_MACHINE_H_ + +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> + +struct regulator; + +/* + * Regulator operation constraint flags. These flags are used to enable + * certain regulator operations and can be OR'ed together. + * + * VOLTAGE: Regulator output voltage can be changed by software on this + * board/machine. + * CURRENT: Regulator output current can be changed by software on this + * board/machine. + * MODE: Regulator operating mode can be changed by software on this + * board/machine. + * STATUS: Regulator can be enabled and disabled. + * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + */ + +#define REGULATOR_CHANGE_VOLTAGE 0x1 +#define REGULATOR_CHANGE_CURRENT 0x2 +#define REGULATOR_CHANGE_MODE 0x4 +#define REGULATOR_CHANGE_STATUS 0x8 +#define REGULATOR_CHANGE_DRMS 0x10 + +/** + * struct regulator_state - regulator state during low power syatem states + * + * This describes a regulators state during a system wide low power state. + */ +struct regulator_state { + int uV; /* suspend voltage */ + unsigned int mode; /* suspend regulator operating mode */ + int enabled; /* is regulator enabled in this suspend state */ +}; + +/** + * struct regulation_constraints - regulator operating constraints. + * + * This struct describes regulator and board/machine specific constraints. + */ +struct regulation_constraints { + + char *name; + + /* voltage output range (inclusive) - for voltage control */ + int min_uV; + int max_uV; + + /* current output range (inclusive) - for current control */ + int min_uA; + int max_uA; + + /* valid regulator operating modes for this machine */ + unsigned int valid_modes_mask; + + /* valid operations for regulator on this machine */ + unsigned int valid_ops_mask; + + /* regulator input voltage - only if supply is another regulator */ + int input_uV; + + /* regulator suspend states for global PMIC STANDBY/HIBERNATE */ + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; /* suspend state to set at init */ + + /* constriant flags */ + unsigned always_on:1; /* regulator never off when system is on */ + unsigned boot_on:1; /* bootloader/firmware enabled regulator */ + unsigned apply_uV:1; /* apply uV constraint iff min == max */ +}; + +int regulator_set_supply(const char *regulator, const char *regulator_supply); + +const char *regulator_get_supply(const char *regulator); + +int regulator_set_machine_constraints(const char *regulator, + struct regulation_constraints *constraints); + +int regulator_set_device_supply(const char *regulator, struct device *dev, + const char *supply); + +int regulator_suspend_prepare(suspend_state_t state); + +#endif diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 66a96814d61..af135ae895d 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -55,7 +55,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name); int reiserfs_delete_xattrs(struct inode *inode); int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); int reiserfs_xattr_init(struct super_block *sb, int mount_flags); -int reiserfs_permission(struct inode *inode, int mask, struct nameidata *nd); +int reiserfs_permission(struct inode *inode, int mask); int reiserfs_xattr_del(struct inode *, const char *); int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t); diff --git a/include/linux/relay.h b/include/linux/relay.h index 6cd8c4425fc..953fc055e87 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -48,6 +48,7 @@ struct rchan_buf size_t *padding; /* padding counts per sub-buffer */ size_t prev_padding; /* temporary variable */ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ + size_t early_bytes; /* bytes consumed before VFS inited */ unsigned int cpu; /* this buf's cpu */ } ____cacheline_aligned; @@ -68,6 +69,7 @@ struct rchan int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ struct dentry *parent; /* parent dentry passed to open */ + int has_base_filename; /* has a filename associated? */ char base_filename[NAME_MAX]; /* saved base filename */ }; @@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data); +extern int relay_late_setup_files(struct rchan *chan, + const char *base_filename, + struct dentry *parent); extern void relay_close(struct rchan *chan); extern void relay_flush(struct rchan *chan); extern void relay_subbufs_consumed(struct rchan *chan, diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index fdeadd9740d..271c1c2c9f6 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -166,7 +166,7 @@ static inline int res_counter_set_limit(struct res_counter *cnt, int ret = -EBUSY; spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage < limit) { + if (cnt->usage <= limit) { cnt->limit = limit; ret = 0; } diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index c5f6e54ec6a..4cd64b0d982 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -49,6 +49,7 @@ enum rfkill_state { RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */ RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */ RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */ + RFKILL_STATE_MAX, /* marker for last valid state */ }; /* @@ -68,7 +69,8 @@ enum rfkill_state { * @user_claim_unsupported: Whether the hardware supports exclusive * RF-kill control by userspace. Set this before registering. * @user_claim: Set when the switch is controlled exlusively by userspace. - * @mutex: Guards switch state transitions + * @mutex: Guards switch state transitions. It serializes callbacks + * and also protects the state. * @data: Pointer to the RF button drivers private data which will be * passed along when toggling radio state. * @toggle_radio(): Mandatory handler to control state of the radio. @@ -89,12 +91,13 @@ struct rfkill { const char *name; enum rfkill_type type; - enum rfkill_state state; bool user_claim_unsupported; bool user_claim; + /* the mutex serializes callbacks and also protects + * the state */ struct mutex mutex; - + enum rfkill_state state; void *data; int (*toggle_radio)(void *data, enum rfkill_state state); int (*get_state)(void *data, enum rfkill_state *state); @@ -108,12 +111,14 @@ struct rfkill { }; #define to_rfkill(d) container_of(d, struct rfkill, dev) -struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type); +struct rfkill * __must_check rfkill_allocate(struct device *parent, + enum rfkill_type type); void rfkill_free(struct rfkill *rfkill); -int rfkill_register(struct rfkill *rfkill); +int __must_check rfkill_register(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state); +int rfkill_set_default(enum rfkill_type type, enum rfkill_state state); /** * rfkill_state_complement - return complementar state diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1383692ac5b..fed6f5e0b41 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,6 +26,14 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ + /* + * NOTE: the LSB of the head.next is set by + * mm_take_all_locks() _after_ taking the above lock. So the + * head must only be read/written after taking the above lock + * to be sure to see a valid next pointer. The LSB bit itself + * is serialized by a system wide lock only visible to + * mm_take_all_locks() (mm_all_locks_mutex). + */ struct list_head head; /* List of private "related" vmas */ }; @@ -94,7 +102,7 @@ int try_to_unmap(struct page *, int ignore_refs); * Called from mm/filemap_xip.c to unmap empty zero page */ pte_t *page_check_address(struct page *, struct mm_struct *, - unsigned long, spinlock_t **); + unsigned long, spinlock_t **, int); /* * Used by swapoff to help locate where page is expected in vma. diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b01fe004cb5..91f597ad6ac 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -225,8 +225,6 @@ typedef struct rtc_task { int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); -void rtc_get_rtc_time(struct rtc_time *rtc_tm); -irqreturn_t rtc_interrupt(int irq, void *dev_id); #endif /* __KERNEL__ */ diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h index e8c7c21ceb1..6fc961459b4 100644 --- a/include/linux/rtc/m48t59.h +++ b/include/linux/rtc/m48t59.h @@ -18,40 +18,47 @@ /* * M48T59 Register Offset */ -#define M48T59_YEAR 0x1fff -#define M48T59_MONTH 0x1ffe -#define M48T59_MDAY 0x1ffd /* Day of Month */ -#define M48T59_WDAY 0x1ffc /* Day of Week */ +#define M48T59_YEAR 0xf +#define M48T59_MONTH 0xe +#define M48T59_MDAY 0xd /* Day of Month */ +#define M48T59_WDAY 0xc /* Day of Week */ #define M48T59_WDAY_CB 0x20 /* Century Bit */ #define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */ -#define M48T59_HOUR 0x1ffb -#define M48T59_MIN 0x1ffa -#define M48T59_SEC 0x1ff9 -#define M48T59_CNTL 0x1ff8 +#define M48T59_HOUR 0xb +#define M48T59_MIN 0xa +#define M48T59_SEC 0x9 +#define M48T59_CNTL 0x8 #define M48T59_CNTL_READ 0x40 #define M48T59_CNTL_WRITE 0x80 -#define M48T59_WATCHDOG 0x1ff7 -#define M48T59_INTR 0x1ff6 +#define M48T59_WATCHDOG 0x7 +#define M48T59_INTR 0x6 #define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */ #define M48T59_INTR_ABE 0x20 -#define M48T59_ALARM_DATE 0x1ff5 -#define M48T59_ALARM_HOUR 0x1ff4 -#define M48T59_ALARM_MIN 0x1ff3 -#define M48T59_ALARM_SEC 0x1ff2 -#define M48T59_UNUSED 0x1ff1 -#define M48T59_FLAGS 0x1ff0 +#define M48T59_ALARM_DATE 0x5 +#define M48T59_ALARM_HOUR 0x4 +#define M48T59_ALARM_MIN 0x3 +#define M48T59_ALARM_SEC 0x2 +#define M48T59_UNUSED 0x1 +#define M48T59_FLAGS 0x0 #define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */ #define M48T59_FLAGS_AF 0x40 /* alarm */ #define M48T59_FLAGS_BF 0x10 /* low battery */ -#define M48T59_NVRAM_SIZE 0x1ff0 +#define M48T59RTC_TYPE_M48T59 0 /* to keep compatibility */ +#define M48T59RTC_TYPE_M48T02 1 +#define M48T59RTC_TYPE_M48T08 2 struct m48t59_plat_data { - /* The method to access M48T59 registers, - * NOTE: The 'ofs' should be 0x00~0x1fff - */ + /* The method to access M48T59 registers */ void (*write_byte)(struct device *dev, u32 ofs, u8 val); unsigned char (*read_byte)(struct device *dev, u32 ofs); + + int type; /* RTC model */ + + /* ioaddr mapped externally */ + void __iomem *ioaddr; + /* offset to RTC registers, automatically set according to the type */ + unsigned int offset; }; #endif /* _LINUX_RTC_M48T59_H_ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index f4d386c191f..2b3d51c6ec9 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -582,6 +582,10 @@ enum rtnetlink_groups { #define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE RTNLGRP_ND_USEROPT, #define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT + RTNLGRP_PHONET_IFADDR, +#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR + RTNLGRP_PHONET_ROUTE, +#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) @@ -755,13 +759,6 @@ extern void __rtnl_unlock(void); } \ } while(0) -#define BUG_TRAP(x) do { \ - if (unlikely(!(x))) { \ - printk(KERN_ERR "KERNEL: assertion (%s) failed at %s (%d)\n", \ - #x, __FILE__ , __LINE__); \ - } \ -} while(0) - static inline u32 rtm_get_table(struct rtattr **rta, u8 table) { return RTA_GET_U32(rta[RTA_TABLE-1]); diff --git a/include/linux/sched.h b/include/linux/sched.h index 42036ffe6b0..c226c7b8294 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -87,6 +87,7 @@ struct sched_param { #include <linux/task_io_accounting.h> #include <linux/kobject.h> #include <linux/latencytop.h> +#include <linux/cred.h> #include <asm/processor.h> @@ -292,7 +293,6 @@ extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); -extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); extern unsigned int softlockup_panic; @@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, extern void arch_unmap_area(struct mm_struct *, unsigned long); extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); -#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +#if USE_SPLIT_PTLOCKS /* * The mm counters are not protected by its page_table_lock, * so must be incremented atomically. @@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) -#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#else /* !USE_SPLIT_PTLOCKS */ /* * The mm counters are protected by its page_table_lock, * so can be incremented directly. @@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #define inc_mm_counter(mm, member) (mm)->_##member++ #define dec_mm_counter(mm, member) (mm)->_##member-- -#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ +#endif /* !USE_SPLIT_PTLOCKS */ #define get_mm_rss(mm) \ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) @@ -451,8 +451,8 @@ struct signal_struct { * - everyone except group_exit_task is stopped during signal delivery * of fatal signals, group_exit_task processes the signal. */ - struct task_struct *group_exit_task; int notify_count; + struct task_struct *group_exit_task; /* thread group stop support, overloads group_exit_code too */ int group_stop_count; @@ -506,9 +506,6 @@ struct signal_struct { unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; -#ifdef CONFIG_TASK_XACCT - u64 rchar, wchar, syscr, syscw; -#endif struct task_io_accounting ioac; /* @@ -827,6 +824,9 @@ struct sched_domain { unsigned int ttwu_move_affine; unsigned int ttwu_move_balance; #endif +#ifdef CONFIG_SCHED_DEBUG + char *name; +#endif }; extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, @@ -900,7 +900,7 @@ struct sched_class { void (*yield_task) (struct rq *rq); int (*select_task_rq)(struct task_struct *p, int sync); - void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); struct task_struct * (*pick_next_task) (struct rq *rq); void (*put_prev_task) (struct rq *rq, struct task_struct *p); @@ -1013,8 +1013,8 @@ struct sched_entity { struct sched_rt_entity { struct list_head run_list; - unsigned int time_slice; unsigned long timeout; + unsigned int time_slice; int nr_cpus_allowed; struct sched_rt_entity *back; @@ -1257,10 +1257,6 @@ struct task_struct { unsigned long ptrace_message; siginfo_t *last_siginfo; /* For ptrace use. */ -#ifdef CONFIG_TASK_XACCT -/* i/o counters(bytes read/written, #syscalls */ - u64 rchar, wchar, syscr, syscw; -#endif struct task_io_accounting ioac; #if defined(CONFIG_TASK_XACCT) u64 acct_rss_mem1; /* accumulated rss usage */ @@ -1482,6 +1478,10 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +extern cputime_t task_utime(struct task_struct *p); +extern cputime_t task_stime(struct task_struct *p); +extern cputime_t task_gtime(struct task_struct *p); + /* * Per process flags */ @@ -1559,16 +1559,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } @@ -1580,28 +1574,11 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } - -#ifdef CONFIG_NO_HZ -static inline void sched_clock_tick_stop(int cpu) -{ -} - -static inline void sched_clock_tick_start(int cpu) -{ -} -#endif - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); +#else extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#ifdef CONFIG_NO_HZ -extern void sched_clock_tick_stop(int cpu); -extern void sched_clock_tick_start(int cpu); #endif -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu @@ -1797,7 +1774,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_ extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void do_notify_parent(struct task_struct *, int); +extern int do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); @@ -1883,9 +1860,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(struct task_struct * p); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else -#define wait_task_inactive(p) do { } while (0) +static inline unsigned long wait_task_inactive(struct task_struct *p, + long match_state) +{ + return 1; +} #endif #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) @@ -2139,16 +2120,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT extern void arch_pick_mmap_layout(struct mm_struct *mm); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) -{ - mm->mmap_base = TASK_UNMAPPED_BASE; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; -} -#endif #ifdef CONFIG_TRACING extern void @@ -2196,22 +2168,22 @@ extern long sched_group_rt_period(struct task_group *tg); #ifdef CONFIG_TASK_XACCT static inline void add_rchar(struct task_struct *tsk, ssize_t amt) { - tsk->rchar += amt; + tsk->ioac.rchar += amt; } static inline void add_wchar(struct task_struct *tsk, ssize_t amt) { - tsk->wchar += amt; + tsk->ioac.wchar += amt; } static inline void inc_syscr(struct task_struct *tsk) { - tsk->syscr++; + tsk->ioac.syscr++; } static inline void inc_syscw(struct task_struct *tsk) { - tsk->syscw++; + tsk->ioac.syscw++; } #else static inline void add_rchar(struct task_struct *tsk, ssize_t amt) @@ -2231,14 +2203,6 @@ static inline void inc_syscw(struct task_struct *tsk) } #endif -#ifdef CONFIG_SMP -void migration_init(void); -#else -static inline void migration_init(void) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif diff --git a/include/linux/security.h b/include/linux/security.h index f0e9adb22ac..f5c4a51eb42 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -46,8 +46,8 @@ struct audit_krule; */ extern int cap_capable(struct task_struct *tsk, int cap); extern int cap_settime(struct timespec *ts, struct timezone *tz); -extern int cap_ptrace(struct task_struct *parent, struct task_struct *child, - unsigned int mode); +extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); +extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1157,17 +1157,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @alter contains the flag indicating whether changes are to be made. * Return 0 if permission is granted. * - * @ptrace: - * Check permission before allowing the @parent process to trace the + * @ptrace_may_access: + * Check permission before allowing the current process to trace the * @child process. * Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of * binprm_security_ops if the process is being traced and its security * attributes would be changed by the execve. - * @parent contains the task_struct structure for parent process. - * @child contains the task_struct structure for child process. + * @child contains the task_struct structure for the target process. * @mode contains the PTRACE_MODE flags indicating the form of access. * Return 0 if permission is granted. + * @ptrace_traceme: + * Check that the @parent process has sufficient permission to trace the + * current process before allowing the current process to present itself + * to the @parent process for tracing. + * The parent process will still have to undergo the ptrace_may_access + * checks before it is allowed to trace this one. + * @parent contains the task_struct structure for debugger process. + * Return 0 if permission is granted. * @capget: * Get the @effective, @inheritable, and @permitted capability sets for * the @target process. The hook may also perform permission checking to @@ -1287,8 +1294,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) struct security_operations { char name[SECURITY_NAME_MAX + 1]; - int (*ptrace) (struct task_struct *parent, struct task_struct *child, - unsigned int mode); + int (*ptrace_may_access) (struct task_struct *child, unsigned int mode); + int (*ptrace_traceme) (struct task_struct *parent); int (*capget) (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1362,7 +1369,7 @@ struct security_operations { struct inode *new_dir, struct dentry *new_dentry); int (*inode_readlink) (struct dentry *dentry); int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); - int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd); + int (*inode_permission) (struct inode *inode, int mask); int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); void (*inode_delete) (struct inode *inode); @@ -1553,15 +1560,10 @@ struct security_operations { extern int security_init(void); extern int security_module_enable(struct security_operations *ops); extern int register_security(struct security_operations *ops); -extern struct dentry *securityfs_create_file(const char *name, mode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops); -extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); -extern void securityfs_remove(struct dentry *dentry); /* Security operations */ -int security_ptrace(struct task_struct *parent, struct task_struct *child, - unsigned int mode); +int security_ptrace_may_access(struct task_struct *child, unsigned int mode); +int security_ptrace_traceme(struct task_struct *parent); int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, @@ -1628,7 +1630,7 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); -int security_inode_permission(struct inode *inode, int mask, struct nameidata *nd); +int security_inode_permission(struct inode *inode, int mask); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); void security_inode_delete(struct inode *inode); @@ -1742,11 +1744,15 @@ static inline int security_init(void) return 0; } -static inline int security_ptrace(struct task_struct *parent, - struct task_struct *child, - unsigned int mode) +static inline int security_ptrace_may_access(struct task_struct *child, + unsigned int mode) { - return cap_ptrace(parent, child, mode); + return cap_ptrace_may_access(child, mode); +} + +static inline int security_ptrace_traceme(struct task_struct *parent) +{ + return cap_ptrace_traceme(parent); } static inline int security_capget(struct task_struct *target, @@ -2021,8 +2027,7 @@ static inline int security_inode_follow_link(struct dentry *dentry, return 0; } -static inline int security_inode_permission(struct inode *inode, int mask, - struct nameidata *nd) +static inline int security_inode_permission(struct inode *inode, int mask) { return 0; } @@ -2414,25 +2419,6 @@ static inline int security_netlink_recv(struct sk_buff *skb, int cap) return cap_netlink_recv(skb, cap); } -static inline struct dentry *securityfs_create_dir(const char *name, - struct dentry *parent) -{ - return ERR_PTR(-ENODEV); -} - -static inline struct dentry *securityfs_create_file(const char *name, - mode_t mode, - struct dentry *parent, - void *data, - const struct file_operations *fops) -{ - return ERR_PTR(-ENODEV); -} - -static inline void securityfs_remove(struct dentry *dentry) -{ -} - static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return -EOPNOTSUPP; @@ -2796,5 +2782,35 @@ static inline void security_audit_rule_free(void *lsmrule) #endif /* CONFIG_SECURITY */ #endif /* CONFIG_AUDIT */ +#ifdef CONFIG_SECURITYFS + +extern struct dentry *securityfs_create_file(const char *name, mode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); +extern void securityfs_remove(struct dentry *dentry); + +#else /* CONFIG_SECURITYFS */ + +static inline struct dentry *securityfs_create_dir(const char *name, + struct dentry *parent) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct dentry *securityfs_create_file(const char *name, + mode_t mode, + struct dentry *parent, + void *data, + const struct file_operations *fops) +{ + return ERR_PTR(-ENODEV); +} + +static inline void securityfs_remove(struct dentry *dentry) +{} + +#endif + #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a66304a0995..a1783b229ef 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,8 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/mutex.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> struct seq_operations; struct file; @@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +{ + return seq_bitmap(m, mask->bits, NR_CPUS); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); diff --git a/include/linux/serio.h b/include/linux/serio.h index e72716cca57..25641d9e0ea 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -87,11 +87,10 @@ void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); -static inline int serio_register_driver(struct serio_driver *drv) +static inline int __must_check serio_register_driver(struct serio_driver *drv) { return __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME); } -int serio_register_driver(struct serio_driver *drv); void serio_unregister_driver(struct serio_driver *drv); static inline int serio_write(struct serio *serio, unsigned char data) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f2d12d5a21b..fd83f2584b1 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -43,7 +43,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) } #ifdef CONFIG_TMPFS_POSIX_ACL -int shmem_permission(struct inode *, int, struct nameidata *); +int shmem_permission(struct inode *, int); int shmem_acl_init(struct inode *, struct inode *); void shmem_acl_destroy_inode(struct inode *); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ea44f6621f..2725f4e5a9b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -146,8 +146,14 @@ struct skb_shared_info { unsigned short gso_segs; unsigned short gso_type; __be32 ip6_frag_id; +#ifdef CONFIG_HAS_DMA + unsigned int num_dma_maps; +#endif struct sk_buff *frag_list; skb_frag_t frags[MAX_SKB_FRAGS]; +#ifdef CONFIG_HAS_DMA + dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; +#endif }; /* We divide dataref into two halves. The higher 16 bits hold references @@ -243,6 +249,7 @@ typedef unsigned char *sk_buff_data_t; * @tc_index: Traffic control index * @tc_verd: traffic control verdict * @ndisc_nodetype: router type (from link layer) + * @do_not_encrypt: set to prevent encryption of this frame * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -316,7 +323,10 @@ struct sk_buff { #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - /* 14 bit hole */ +#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) + __u8 do_not_encrypt:1; +#endif + /* 0/13/14 bit hole */ #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; @@ -349,6 +359,14 @@ struct sk_buff { #include <asm/system.h> +#ifdef CONFIG_HAS_DMA +#include <linux/dma-mapping.h> +extern int skb_dma_map(struct device *dev, struct sk_buff *skb, + enum dma_data_direction dir); +extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, + enum dma_data_direction dir); +#endif + extern void kfree_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, @@ -365,6 +383,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } +extern int skb_recycle_check(struct sk_buff *skb, int skb_size); + extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); @@ -455,6 +475,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_is_last - check if skb is the last entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the last buffer on the list. + */ +static inline bool skb_queue_is_last(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->next == (struct sk_buff *) list); +} + +/** + * skb_queue_next - return the next packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the next packet in @list after @skb. It is only valid to + * call this if skb_queue_is_last() evaluates to false. + */ +static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_last(list, skb)); + return skb->next; +} + +/** * skb_get - reference buffer * @skb: buffer to reference * @@ -642,6 +693,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/** + * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head + * @list: queue to initialize + * + * This initializes only the list and queue length aspects of + * an sk_buff_head object. This allows to initialize the list + * aspects of an sk_buff_head without reinitializing things like + * the spinlock. It can also be used for on-stack sk_buff_head + * objects where the spinlock is known to not be used. + */ +static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} + /* * This function creates a split out lock class for each invocation; * this is needed for now since a whole lot of users of the skb-queue @@ -653,8 +720,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) static inline void skb_queue_head_init(struct sk_buff_head *list) { spin_lock_init(&list->lock); - list->prev = list->next = (struct sk_buff *)list; - list->qlen = 0; + __skb_queue_head_init(list); } static inline void skb_queue_head_init_class(struct sk_buff_head *list, @@ -681,6 +747,83 @@ static inline void __skb_insert(struct sk_buff *newsk, list->qlen++; } +static inline void __skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff *prev, + struct sk_buff *next) +{ + struct sk_buff *first = list->next; + struct sk_buff *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * skb_queue_splice - join two skb lists, this is designed for stacks + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, (struct sk_buff *) head, head->next); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + +/** + * skb_queue_splice_tail - join two skb lists, each list being a queue + * @list: the new list to add + * @head: the place to add it in the first list + */ +static inline void skb_queue_splice_tail(const struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + } +} + +/** + * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list + * @list: the new list to add + * @head: the place to add it in the first list + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, + struct sk_buff_head *head) +{ + if (!skb_queue_empty(list)) { + __skb_queue_splice(list, head->prev, (struct sk_buff *) head); + head->qlen += list->qlen; + __skb_queue_head_init(list); + } +} + /** * __skb_queue_after - queue a buffer at the list head * @list: list to use @@ -825,6 +968,9 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i, skb_shinfo(skb)->nr_frags = i + 1; } +extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size); + #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) @@ -897,7 +1043,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len-skb_headlen(skb))) + !__pskb_pull_tail(skb, len - skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -914,7 +1060,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } /** @@ -1239,6 +1385,26 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } +extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); + +/** + * netdev_alloc_page - allocate a page for ps-rx on a specific device + * @dev: network device to receive on + * + * Allocate a new page node local to the specified device. + * + * %NULL is returned if there is no free memory. + */ +static inline struct page *netdev_alloc_page(struct net_device *dev) +{ + return __netdev_alloc_page(dev, GFP_ATOMIC); +} + +static inline void netdev_free_page(struct net_device *dev, struct page *page) +{ + __free_page(page); +} + /** * skb_clone_writable - is the header of a clone writable * @skb: buffer to check @@ -1317,7 +1483,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len-size); + return skb_pad(skb, len - size); } static inline int skb_add_data(struct sk_buff *skb, @@ -1430,6 +1596,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) skb != (struct sk_buff *)(queue); \ skb = tmp, tmp = skb->next) +#define skb_queue_walk_from(queue, skb) \ + for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ + skb = skb->next) + +#define skb_queue_walk_from_safe(queue, skb, tmp) \ + for (tmp = skb->next; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->next) + #define skb_queue_reverse_walk(queue, skb) \ for (skb = (queue)->prev; \ prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ @@ -1448,6 +1623,10 @@ extern int skb_copy_datagram_iovec(const struct sk_buff *from, extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov); +extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, + int offset, + struct iovec *from, + int len); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); diff --git a/include/linux/slab.h b/include/linux/slab.h index 41103910f8a..5ff9676c1e2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -58,7 +58,7 @@ int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, - void (*)(struct kmem_cache *, void *)); + void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); @@ -96,6 +96,7 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); /* * Common kmalloc functions provided by all allocators */ +void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); size_t ksize(const void *); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d117ea2825a..2f5c16b1aac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,6 +46,7 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; @@ -85,7 +86,7 @@ struct kmem_cache { struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(struct kmem_cache *, void *); + void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ diff --git a/include/linux/smb.h b/include/linux/smb.h index caa43b2370c..82fefddc598 100644 --- a/include/linux/smb.h +++ b/include/linux/smb.h @@ -11,7 +11,9 @@ #include <linux/types.h> #include <linux/magic.h> +#ifdef __KERNEL__ #include <linux/time.h> +#endif enum smb_protocol { SMB_PROTOCOL_NONE, diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 3827b922ba1..bc21db598c0 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -16,8 +16,19 @@ #define SMC91X_USE_DMA (1 << 6) +#define RPC_LED_100_10 (0x00) /* LED = 100Mbps OR's with 10Mbps link detect */ +#define RPC_LED_RES (0x01) /* LED = Reserved */ +#define RPC_LED_10 (0x02) /* LED = 10Mbps link detect */ +#define RPC_LED_FD (0x03) /* LED = Full Duplex Mode */ +#define RPC_LED_TX_RX (0x04) /* LED = TX or RX packet occurred */ +#define RPC_LED_100 (0x05) /* LED = 100Mbps link dectect */ +#define RPC_LED_TX (0x06) /* LED = TX packet occurred */ +#define RPC_LED_RX (0x07) /* LED = RX packet occurred */ + struct smc91x_platdata { unsigned long flags; + unsigned char leda; + unsigned char ledb; }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 48262f86c96..66484d4a845 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data); #ifdef CONFIG_USE_GENERIC_SMP_HELPERS void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); -void init_call_single_data(void); void ipi_call_lock(void); void ipi_call_unlock(void); void ipi_call_lock_irq(void); void ipi_call_unlock_irq(void); -#else -static inline void init_call_single_data(void) -{ -} #endif /* diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 5df62ef1280..7a6e6bba4a7 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -214,6 +214,8 @@ enum LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ + LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ + LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ __LINUX_MIB_MAX }; diff --git a/include/linux/socket.h b/include/linux/socket.h index 950af631e7f..20fc4bbfca4 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -189,7 +189,9 @@ struct ucred { #define AF_BLUETOOTH 31 /* Bluetooth sockets */ #define AF_IUCV 32 /* IUCV sockets */ #define AF_RXRPC 33 /* RxRPC sockets */ -#define AF_MAX 34 /* For now.. */ +#define AF_ISDN 34 /* mISDN sockets */ +#define AF_PHONET 35 /* Phonet sockets */ +#define AF_MAX 36 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -225,6 +227,8 @@ struct ucred { #define PF_BLUETOOTH AF_BLUETOOTH #define PF_IUCV AF_IUCV #define PF_RXRPC AF_RXRPC +#define PF_ISDN AF_ISDN +#define PF_PHONET AF_PHONET #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -293,6 +297,7 @@ struct ucred { #define SOL_RXRPC 272 #define SOL_PPPOL2TP 273 #define SOL_BLUETOOTH 274 +#define SOL_PNPIPE 275 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index daf744017a3..05eab2f11e6 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -43,6 +43,9 @@ struct ads7846_platform_data { u16 debounce_tol; /* tolerance used for filtering */ u16 debounce_rep; /* additional consecutive good readings * required after the first two */ + int gpio_pendown; /* the GPIO used to decide the pendown + * state if get_pendown_state == NULL + */ int (*get_pendown_state)(void); int (*filter_init) (struct ads7846_platform_data *pdata, void **filter_data); diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h new file mode 100644 index 00000000000..6692b3418cc --- /dev/null +++ b/include/linux/spi/corgi_lcd.h @@ -0,0 +1,20 @@ +#ifndef __LINUX_SPI_CORGI_LCD_H +#define __LINUX_SPI_CORGI_LCD_H + +#define CORGI_LCD_MODE_QVGA 1 +#define CORGI_LCD_MODE_VGA 2 + +struct corgi_lcd_platform_data { + int init_mode; + int max_intensity; + int default_intensity; + int limit_mask; + + int gpio_backlight_on; /* -1 if n/a */ + int gpio_backlight_cont; /* -1 if n/a */ + + void (*notify)(int intensity); + void (*kick_battery)(void); +}; + +#endif /* __LINUX_SPI_CORGI_LCD_H */ diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h new file mode 100644 index 00000000000..b4d9fa6f797 --- /dev/null +++ b/include/linux/spi/orion_spi.h @@ -0,0 +1,17 @@ +/* + * orion_spi.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_SPI_ORION_SPI_H +#define __LINUX_SPI_ORION_SPI_H + +struct orion_spi_info { + u32 tclk; /* no <linux/clk.h> support yet */ +}; + + +#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a9cc29d4665..4be01bb4437 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -778,8 +778,20 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_master(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. */ extern struct spi_device * +spi_alloc_device(struct spi_master *master); + +extern int +spi_add_device(struct spi_device *spi); + +extern struct spi_device * spi_new_device(struct spi_master *, struct spi_board_info *); static inline void diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61e5610ad16..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -183,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce729..d79845d034b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4bf8cade9db..e530026eedf 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: - return pci_dma_mapping_error(addr); + return pci_dma_mapping_error(dev->bus->host_pci, addr); case SSB_BUSTYPE_SSB: - return dma_mapping_error(addr); + return dma_mapping_error(dev->dev, addr); default: __ssb_dma_not_implemented(dev); } diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index ebad0bac980..99a0f991e85 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -245,8 +245,6 @@ /* SPROM Revision 3 (inherits most data from rev 2) */ #define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ -#define SSB_SPROM3_ET0MAC 0x1050 /* 6 bytes MAC address for Ethernet ?? */ -#define SSB_SPROM3_ET1MAC 0x1050 /* 6 bytes MAC address for 802.11a ?? */ #define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ #define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ @@ -267,8 +265,6 @@ /* SPROM Revision 4 */ #define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ -#define SSB_SPROM4_ET0MAC 0x1018 /* 6 bytes MAC address for Ethernet ?? */ -#define SSB_SPROM4_ET1MAC 0x1018 /* 6 bytes MAC address for 802.11a ?? */ #define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ #define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ #define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ @@ -316,6 +312,21 @@ #define SSB_SPROM4_PA1B1 0x1090 #define SSB_SPROM4_PA1B2 0x1092 +/* SPROM Revision 5 (inherits most data from rev 4) */ +#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */ +#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */ +#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */ +#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */ +#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM5_GPIOA_P1_SHIFT 8 +#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM5_GPIOB_P3_SHIFT 8 + + /* Values for SSB_SPROM1_BINF_CCODE */ enum { SSB_SPROM1CCODE_WORLD = 0, diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d7..b106fd8e0d5 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,6 +1,8 @@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +struct task_struct; + #ifdef CONFIG_STACKTRACE struct stack_trace { unsigned int nr_entries, max_entries; diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 5bfc553bdb2..faf1519b5ad 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -3,43 +3,42 @@ /* "Bogolock": stop the entire machine, disable interrupts. This is a very heavy lock, which is equivalent to grabbing every spinlock (and more). So the "read" side to such a lock is anything which - diables preeempt. */ + disables preeempt. */ #include <linux/cpu.h> +#include <linux/cpumask.h> #include <asm/system.h> #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) + /** - * stop_machine_run: freeze the machine on all CPUs and run this function + * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This causes a thread to be scheduled on every other cpu, - * each of which disables interrupts, and finally interrupts are disabled - * on the current CPU. The result is that noone is holding a spinlock - * or inside any other preempt-disabled region when @fn() runs. + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that noone is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); +int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); /** - * __stop_machine_run: freeze the machine on all CPUs and run this function + * __stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn - * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This is a special version of the above, which returns the - * thread which has run @fn(): kthread_stop will return the return value - * of @fn(). Used by hotplug cpu. + * Description: This is a special version of the above, which assumes cpus + * won't come or go while it's being called. Used by hotplug cpu. */ -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu); - +int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); #else -static inline int stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) +static inline int stop_machine(int (*fn)(void *), void *data, + const cpumask_t *cpus) { int ret; local_irq_disable(); diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h new file mode 100644 index 00000000000..a3eb2f65b65 --- /dev/null +++ b/include/linux/string_helpers.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_STRING_HELPERS_H_ +#define _LINUX_STRING_HELPERS_H_ + +#include <linux/types.h> + +/* Descriptions of the types of units to + * print in */ +enum string_size_units { + STRING_UNITS_10, /* use powers of 10^3 (standard SI) */ + STRING_UNITS_2, /* use binary powers of 2^10 */ +}; + +int string_get_size(u64 size, enum string_size_units units, + char *buf, int len); + +#endif diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index ef2e3a20bf3..dc05b54bd3a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -143,7 +143,6 @@ struct svcxprt_rdma { unsigned long sc_flags; struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ struct list_head sc_read_complete_q; - spinlock_t sc_read_complete_lock; struct work_struct sc_work; }; /* sc_flags */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index e8e69159af7..2ce8207686e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -217,11 +217,11 @@ struct platform_hibernation_ops { #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ extern void __register_nosave_region(unsigned long b, unsigned long e, int km); -static inline void register_nosave_region(unsigned long b, unsigned long e) +static inline void __init register_nosave_region(unsigned long b, unsigned long e) { __register_nosave_region(b, e, 0); } -static inline void register_nosave_region_late(unsigned long b, unsigned long e) +static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) { __register_nosave_region(b, e, 1); } @@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e) } #endif +extern struct mutex pm_mutex; + #endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000000..270d5c208a8 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,309 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/byteorder.h> + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define __const_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define __const_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define __const_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define __const_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define __const_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 ___swab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#elif defined(__arch_swab16p) + return __arch_swab16p(&val); +#else + return __const_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 ___swab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#elif defined(__arch_swab32p) + return __arch_swab32p(&val); +#else + return __const_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 ___swab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__arch_swab64p) + return __arch_swab64p(&val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); +#else + return __const_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#elif defined(__arch_swahw32p) + return __arch_swahw32p(&val); +#else + return __const_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#elif defined(__arch_swahb32p) + return __arch_swahb32p(&val); +#else + return __const_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + __const_swab16((x)) : \ + ___swab16((x))) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swab32((x)) : \ + ___swab32((x))) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + __const_swab64((x)) : \ + ___swab64((x))) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahw32((x)) : \ + ___swahw32((x))) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahb32((x)) : \ + ___swahb32((x))) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 0b3377650c8..de40f169a4e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, /* linux/mm/swapfile.c */ extern long total_swap_pages; -extern unsigned int nr_swapfiles; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); @@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *); extern int remove_exclusive_swap_page(struct page *); struct backing_dev_info; -extern spinlock_t swap_lock; - /* linux/mm/thrash.c */ extern struct mm_struct * swap_token_mm; extern void grab_swap_token(void); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 24141b4d1a1..d0437f36921 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -947,6 +947,22 @@ struct ctl_table; struct nsproxy; struct ctl_table_root; +struct ctl_table_set { + struct list_head list; + struct ctl_table_set *parent; + int (*is_seen)(struct ctl_table_set *); +}; + +extern void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_set *parent, + int (*is_seen)(struct ctl_table_set *)); + +struct ctl_table_header; + +extern void sysctl_head_get(struct ctl_table_header *); +extern void sysctl_head_put(struct ctl_table_header *); +extern int sysctl_is_seen(struct ctl_table_header *); +extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *); extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, struct ctl_table_header *prev); @@ -1049,8 +1065,8 @@ struct ctl_table struct ctl_table_root { struct list_head root_list; - struct list_head header_list; - struct list_head *(*lookup)(struct ctl_table_root *root, + struct ctl_table_set default_set; + struct ctl_table_set *(*lookup)(struct ctl_table_root *root, struct nsproxy *namespaces); int (*permissions)(struct ctl_table_root *root, struct nsproxy *namespaces, struct ctl_table *table); @@ -1063,9 +1079,14 @@ struct ctl_table_header struct ctl_table *ctl_table; struct list_head ctl_entry; int used; + int count; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_table *attached_by; + struct ctl_table *attached_to; + struct ctl_table_header *parent; }; /* struct ctl_path describes where in the hierarchy a table is added */ diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index 44d00e9ccee..5e88afc9a2f 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -8,8 +8,19 @@ * Blame akpm@osdl.org for all this. */ -#ifdef CONFIG_TASK_IO_ACCOUNTING struct task_io_accounting { +#ifdef CONFIG_TASK_XACCT + /* bytes read */ + u64 rchar; + /* bytes written */ + u64 wchar; + /* # of read syscalls */ + u64 syscr; + /* # of write syscalls */ + u64 syscw; +#endif /* CONFIG_TASK_XACCT */ + +#ifdef CONFIG_TASK_IO_ACCOUNTING /* * The number of bytes which this task has caused to be read from * storage. @@ -30,8 +41,5 @@ struct task_io_accounting { * information loss in doing that. */ u64 cancelled_write_bytes; +#endif /* CONFIG_TASK_IO_ACCOUNTING */ }; -#else -struct task_io_accounting { -}; -#endif diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h index ff46c6fad79..4d090f9ee60 100644 --- a/include/linux/task_io_accounting_ops.h +++ b/include/linux/task_io_accounting_ops.h @@ -40,9 +40,17 @@ static inline void task_io_account_cancelled_write(size_t bytes) current->ioac.cancelled_write_bytes += bytes; } -static inline void task_io_accounting_init(struct task_struct *tsk) +static inline void task_io_accounting_init(struct task_io_accounting *ioac) { - memset(&tsk->ioac, 0, sizeof(tsk->ioac)); + memset(ioac, 0, sizeof(*ioac)); +} + +static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + dst->read_bytes += src->read_bytes; + dst->write_bytes += src->write_bytes; + dst->cancelled_write_bytes += src->cancelled_write_bytes; } #else @@ -69,9 +77,37 @@ static inline void task_io_account_cancelled_write(size_t bytes) { } -static inline void task_io_accounting_init(struct task_struct *tsk) +static inline void task_io_accounting_init(struct task_io_accounting *ioac) +{ +} + +static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) { } -#endif /* CONFIG_TASK_IO_ACCOUNTING */ -#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */ +#endif /* CONFIG_TASK_IO_ACCOUNTING */ + +#ifdef CONFIG_TASK_XACCT +static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + dst->rchar += src->rchar; + dst->wchar += src->wchar; + dst->syscr += src->syscr; + dst->syscw += src->syscw; +} +#else +static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ +} +#endif /* CONFIG_TASK_XACCT */ + +static inline void task_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + task_chr_io_accounting_add(dst, src); + task_blk_io_accounting_add(dst, src); +} +#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */ diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild index 6dac0d7365c..76990937f4c 100644 --- a/include/linux/tc_act/Kbuild +++ b/include/linux/tc_act/Kbuild @@ -3,3 +3,4 @@ header-y += tc_ipt.h header-y += tc_mirred.h header-y += tc_pedit.h header-y += tc_nat.h +header-y += tc_skbedit.h diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h new file mode 100644 index 00000000000..a14e461a7af --- /dev/null +++ b/include/linux/tc_act/tc_skbedit.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Author: Alexander Duyck <alexander.h.duyck@intel.com> + */ + +#ifndef __LINUX_TC_SKBEDIT_H +#define __LINUX_TC_SKBEDIT_H + +#include <linux/pkt_cls.h> + +#define TCA_ACT_SKBEDIT 11 + +#define SKBEDIT_F_PRIORITY 0x1 +#define SKBEDIT_F_QUEUE_MAPPING 0x2 + +struct tc_skbedit { + tc_gen; +}; + +enum { + TCA_SKBEDIT_UNSPEC, + TCA_SKBEDIT_TM, + TCA_SKBEDIT_PARMS, + TCA_SKBEDIT_PRIORITY, + TCA_SKBEDIT_QUEUE_MAPPING, + __TCA_SKBEDIT_MAX +}; +#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1) + +#endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 2e2557388e3..fe77e1499ab 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -312,8 +312,11 @@ struct tcp_sock { u32 retrans_out; /* Retransmitted packets out */ u16 urg_data; /* Saved octet of OOB data and control flags */ - u8 urg_mode; /* In urgent mode */ u8 ecn_flags; /* ECN status bits. */ + u8 reordering; /* Packet reordering metric. */ + u32 snd_up; /* Urgent pointer */ + + u8 keepalive_probes; /* num of allowed keep alive probes */ /* * Options received (usually on last packet, some only on SYN packets). */ @@ -342,7 +345,6 @@ struct tcp_sock { struct sk_buff* lost_skb_hint; struct sk_buff *scoreboard_skb_hint; struct sk_buff *retransmit_skb_hint; - struct sk_buff *forward_skb_hint; struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ @@ -358,12 +360,10 @@ struct tcp_sock { */ int lost_cnt_hint; - int retransmit_cnt_hint; + u32 retransmit_high; /* L-bits may be on up to this seqno */ u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ - u8 reordering; /* Packet reordering metric. */ - u8 keepalive_probes; /* num of allowed keep alive probes */ u32 prior_ssthresh; /* ssthresh saved at recovery start */ u32 high_seq; /* snd_nxt at onset of congestion */ @@ -375,8 +375,6 @@ struct tcp_sock { u32 total_retrans; /* Total retransmits for entire connection */ u32 urg_seq; /* Seq of received urgent pointer */ - u32 snd_up; /* Urgent pointer */ - unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_intvl; /* time interval between keep alive probes */ diff --git a/include/linux/tick.h b/include/linux/tick.h index d3c02695dc5..98921a3e1aa 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -74,10 +74,13 @@ extern struct tick_device *tick_get_device(int cpu); extern int tick_init_highres(void); extern int tick_program_event(ktime_t expires, int force); extern void tick_setup_sched_timer(void); +# endif + +# if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS extern void tick_cancel_sched_timer(int cpu); # else static inline void tick_cancel_sched_timer(int cpu) { } -# endif /* HIGHRES */ +# endif # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST extern struct tick_device *tick_get_broadcast_device(void); @@ -123,7 +126,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) return len; } static inline void tick_nohz_stop_idle(int cpu) { } -static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; } +static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } # endif /* !NO_HZ */ #endif diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h new file mode 100644 index 00000000000..6186a789d6c --- /dev/null +++ b/include/linux/tracehook.h @@ -0,0 +1,582 @@ +/* + * Tracing hooks + * + * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file defines hook entry points called by core code where + * user tracing/debugging support might need to do something. These + * entry points are called tracehook_*(). Each hook declared below + * has a detailed kerneldoc comment giving the context (locking et + * al) from which it is called, and the meaning of its return value. + * + * Each function here typically has only one call site, so it is ok + * to have some nontrivial tracehook_*() inlines. In all cases, the + * fast path when no tracing is enabled should be very short. + * + * The purpose of this file and the tracehook_* layer is to consolidate + * the interface that the kernel core and arch code uses to enable any + * user debugging or tracing facility (such as ptrace). The interfaces + * here are carefully documented so that maintainers of core and arch + * code do not need to think about the implementation details of the + * tracing facilities. Likewise, maintainers of the tracing code do not + * need to understand all the calling core or arch code in detail, just + * documented circumstances of each call, such as locking conditions. + * + * If the calling core code changes so that locking is different, then + * it is ok to change the interface documented here. The maintainer of + * core code changing should notify the maintainers of the tracing code + * that they need to work out the change. + * + * Some tracehook_*() inlines take arguments that the current tracing + * implementations might not necessarily use. These function signatures + * are chosen to pass in all the information that is on hand in the + * caller and might conceivably be relevant to a tracer, so that the + * core code won't have to be updated when tracing adds more features. + * If a call site changes so that some of those parameters are no longer + * already on hand without extra work, then the tracehook_* interface + * can change so there is no make-work burden on the core code. The + * maintainer of core code changing should notify the maintainers of the + * tracing code that they need to work out the change. + */ + +#ifndef _LINUX_TRACEHOOK_H +#define _LINUX_TRACEHOOK_H 1 + +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <linux/security.h> +struct linux_binprm; + +/** + * tracehook_expect_breakpoints - guess if task memory might be touched + * @task: current task, making a new mapping + * + * Return nonzero if @task is expected to want breakpoint insertion in + * its memory at some point. A zero return is no guarantee it won't + * be done, but this is a hint that it's known to be likely. + * + * May be called with @task->mm->mmap_sem held for writing. + */ +static inline int tracehook_expect_breakpoints(struct task_struct *task) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/* + * ptrace report for syscall entry and exit looks identical. + */ +static inline void ptrace_report_syscall(struct pt_regs *regs) +{ + int ptrace = task_ptrace(current); + + if (!(ptrace & PT_PTRACED)) + return; + + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} + +/** + * tracehook_report_syscall_entry - task is about to attempt a system call + * @regs: user register state of current task + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just entered the kernel for a system call. + * Full user register state is available here. Changing the values + * in @regs can affect the system call number and arguments to be tried. + * It is safe to block here, preventing the system call from beginning. + * + * Returns zero normally, or nonzero if the calling arch code should abort + * the system call. That must prevent normal entry so no system call is + * made. If @task ever returns to user mode after this, its register state + * is unspecified, but should be something harmless like an %ENOSYS error + * return. It should preserve enough information so that syscall_rollback() + * can work (see asm-generic/syscall.h). + * + * Called without locks, just after entering kernel mode. + */ +static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) +{ + ptrace_report_syscall(regs); + return 0; +} + +/** + * tracehook_report_syscall_exit - task has just finished a system call + * @regs: user register state of current task + * @step: nonzero if simulating single-step or block-step + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just finished an attempted system call. Full + * user register state is available here. It is safe to block here, + * preventing signals from being processed. + * + * If @step is nonzero, this report is also in lieu of the normal + * trap that would follow the system call instruction because + * user_enable_block_step() or user_enable_single_step() was used. + * In this case, %TIF_SYSCALL_TRACE might not be set. + * + * Called without locks, just before checking for pending signals. + */ +static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) +{ + ptrace_report_syscall(regs); +} + +/** + * tracehook_unsafe_exec - check for exec declared unsafe due to tracing + * @task: current task doing exec + * + * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. + * + * Called with task_lock() held on @task. + */ +static inline int tracehook_unsafe_exec(struct task_struct *task) +{ + int unsafe = 0; + int ptrace = task_ptrace(task); + if (ptrace & PT_PTRACED) { + if (ptrace & PT_PTRACE_CAP) + unsafe |= LSM_UNSAFE_PTRACE_CAP; + else + unsafe |= LSM_UNSAFE_PTRACE; + } + return unsafe; +} + +/** + * tracehook_tracer_task - return the task that is tracing the given task + * @tsk: task to consider + * + * Returns NULL if noone is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() + * held on @task, still held from when tracehook_unsafe_exec() was called. + */ +static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) +{ + if (task_ptrace(tsk) & PT_PTRACED) + return rcu_dereference(tsk->parent); + return NULL; +} + +/** + * tracehook_report_exec - a successful exec was completed + * @fmt: &struct linux_binfmt that performed the exec + * @bprm: &struct linux_binprm containing exec details + * @regs: user-mode register state + * + * An exec just completed, we are shortly going to return to user mode. + * The freshly initialized register state can be seen and changed in @regs. + * The name, file and other pointers in @bprm are still on hand to be + * inspected, but will be freed as soon as this returns. + * + * Called with no locks, but with some kernel resources held live + * and a reference on @fmt->module. + */ +static inline void tracehook_report_exec(struct linux_binfmt *fmt, + struct linux_binprm *bprm, + struct pt_regs *regs) +{ + if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && + unlikely(task_ptrace(current) & PT_PTRACED)) + send_sig(SIGTRAP, current, 0); +} + +/** + * tracehook_report_exit - task has begun to exit + * @exit_code: pointer to value destined for @current->exit_code + * + * @exit_code points to the value passed to do_exit(), which tracing + * might change here. This is almost the first thing in do_exit(), + * before freeing any resources or setting the %PF_EXITING flag. + * + * Called with no locks held. + */ +static inline void tracehook_report_exit(long *exit_code) +{ + ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); +} + +/** + * tracehook_prepare_clone - prepare for new child to be cloned + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * + * This is called before a new user task is to be cloned. + * Its return value will be passed to tracehook_finish_clone(). + * + * Called with no locks held. + */ +static inline int tracehook_prepare_clone(unsigned clone_flags) +{ + if (clone_flags & CLONE_UNTRACED) + return 0; + + if (clone_flags & CLONE_VFORK) { + if (current->ptrace & PT_TRACE_VFORK) + return PTRACE_EVENT_VFORK; + } else if ((clone_flags & CSIGNAL) != SIGCHLD) { + if (current->ptrace & PT_TRACE_CLONE) + return PTRACE_EVENT_CLONE; + } else if (current->ptrace & PT_TRACE_FORK) + return PTRACE_EVENT_FORK; + + return 0; +} + +/** + * tracehook_finish_clone - new child created and being attached + * @child: new child task + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * @trace: return value from tracehook_prepare_clone() + * + * This is called immediately after adding @child to its parent's children list. + * The @trace value is that returned by tracehook_prepare_clone(). + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void tracehook_finish_clone(struct task_struct *child, + unsigned long clone_flags, int trace) +{ + ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); +} + +/** + * tracehook_report_clone - in parent, new child is about to start running + * @trace: return value from tracehook_prepare_clone() + * @regs: parent's user register state + * @clone_flags: flags from parent's system call + * @pid: new child's PID in the parent's namespace + * @child: new child task + * + * Called after a child is set up, but before it has been started + * running. @trace is the value returned by tracehook_prepare_clone(). + * This is not a good place to block, because the child has not started + * yet. Suspend the child here if desired, and then block in + * tracehook_report_clone_complete(). This must prevent the child from + * self-reaping if tracehook_report_clone_complete() uses the @child + * pointer; otherwise it might have died and been released by the time + * tracehook_report_clone_complete() is called. + * + * Called with no locks held, but the child cannot run until this returns. + */ +static inline void tracehook_report_clone(int trace, struct pt_regs *regs, + unsigned long clone_flags, + pid_t pid, struct task_struct *child) +{ + if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { + /* + * The child starts up with an immediate SIGSTOP. + */ + sigaddset(&child->pending.signal, SIGSTOP); + set_tsk_thread_flag(child, TIF_SIGPENDING); + } +} + +/** + * tracehook_report_clone_complete - new child is running + * @trace: return value from tracehook_prepare_clone() + * @regs: parent's user register state + * @clone_flags: flags from parent's system call + * @pid: new child's PID in the parent's namespace + * @child: child task, already running + * + * This is called just after the child has started running. This is + * just before the clone/fork syscall returns, or blocks for vfork + * child completion if @clone_flags has the %CLONE_VFORK bit set. + * The @child pointer may be invalid if a self-reaping child died and + * tracehook_report_clone() took no action to prevent it from self-reaping. + * + * Called with no locks held. + */ +static inline void tracehook_report_clone_complete(int trace, + struct pt_regs *regs, + unsigned long clone_flags, + pid_t pid, + struct task_struct *child) +{ + if (unlikely(trace)) + ptrace_event(0, trace, pid); +} + +/** + * tracehook_report_vfork_done - vfork parent's child has exited or exec'd + * @child: child task, already running + * @pid: new child's PID in the parent's namespace + * + * Called after a %CLONE_VFORK parent has waited for the child to complete. + * The clone/vfork system call will return immediately after this. + * The @child pointer may be invalid if a self-reaping child died and + * tracehook_report_clone() took no action to prevent it from self-reaping. + * + * Called with no locks held. + */ +static inline void tracehook_report_vfork_done(struct task_struct *child, + pid_t pid) +{ + ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid); +} + +/** + * tracehook_prepare_release_task - task is being reaped, clean up tracing + * @task: task in %EXIT_DEAD state + * + * This is called in release_task() just before @task gets finally reaped + * and freed. This would be the ideal place to remove and clean up any + * tracing-related state for @task. + * + * Called with no locks held. + */ +static inline void tracehook_prepare_release_task(struct task_struct *task) +{ +} + +/** + * tracehook_finish_release_task - final tracing clean-up + * @task: task in %EXIT_DEAD state + * + * This is called in release_task() when @task is being in the middle of + * being reaped. After this, there must be no tracing entanglements. + * + * Called with write_lock_irq(&tasklist_lock) held. + */ +static inline void tracehook_finish_release_task(struct task_struct *task) +{ + ptrace_release_task(task); +} + +/** + * tracehook_signal_handler - signal handler setup is complete + * @sig: number of signal being delivered + * @info: siginfo_t of signal being delivered + * @ka: sigaction setting that chose the handler + * @regs: user register state + * @stepping: nonzero if debugger single-step or block-step in use + * + * Called by the arch code after a signal handler has been set up. + * Register and stack state reflects the user handler about to run. + * Signal mask changes have already been made. + * + * Called without locks, shortly before returning to user mode + * (or handling more signals). + */ +static inline void tracehook_signal_handler(int sig, siginfo_t *info, + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) +{ + if (stepping) + ptrace_notify(SIGTRAP); +} + +/** + * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal + * @task: task receiving the signal + * @sig: signal number being sent + * @handler: %SIG_IGN or %SIG_DFL + * + * Return zero iff tracing doesn't care to examine this ignored signal, + * so it can short-circuit normal delivery and never even get queued. + * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. + * + * Called with @task->sighand->siglock held. + */ +static inline int tracehook_consider_ignored_signal(struct task_struct *task, + int sig, + void __user *handler) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/** + * tracehook_consider_fatal_signal - suppress special handling of fatal signal + * @task: task receiving the signal + * @sig: signal number being sent + * @handler: %SIG_DFL or %SIG_IGN + * + * Return nonzero to prevent special handling of this termination signal. + * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, + * in which case force_sig() is about to reset it to %SIG_DFL. + * When this returns zero, this signal might cause a quick termination + * that does not give the debugger a chance to intercept the signal. + * + * Called with or without @task->sighand->siglock held. + */ +static inline int tracehook_consider_fatal_signal(struct task_struct *task, + int sig, + void __user *handler) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/** + * tracehook_force_sigpending - let tracing force signal_pending(current) on + * + * Called when recomputing our signal_pending() flag. Return nonzero + * to force the signal_pending() flag on, so that tracehook_get_signal() + * will be called before the next return to user mode. + * + * Called with @current->sighand->siglock held. + */ +static inline int tracehook_force_sigpending(void) +{ + return 0; +} + +/** + * tracehook_get_signal - deliver synthetic signal to traced task + * @task: @current + * @regs: task_pt_regs(@current) + * @info: details of synthetic signal + * @return_ka: sigaction for synthetic signal + * + * Return zero to check for a real pending signal normally. + * Return -1 after releasing the siglock to repeat the check. + * Return a signal number to induce an artifical signal delivery, + * setting *@info and *@return_ka to specify its details and behavior. + * + * The @return_ka->sa_handler value controls the disposition of the + * signal, no matter the signal number. For %SIG_DFL, the return value + * is a representative signal to indicate the behavior (e.g. %SIGTERM + * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop, + * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number + * reported will be @info->si_signo instead. + * + * Called with @task->sighand->siglock held, before dequeuing pending signals. + */ +static inline int tracehook_get_signal(struct task_struct *task, + struct pt_regs *regs, + siginfo_t *info, + struct k_sigaction *return_ka) +{ + return 0; +} + +/** + * tracehook_notify_jctl - report about job control stop/continue + * @notify: nonzero if this is the last thread in the group to stop + * @why: %CLD_STOPPED or %CLD_CONTINUED + * + * This is called when we might call do_notify_parent_cldstop(). + * It's called when about to stop for job control; we are already in + * %TASK_STOPPED state, about to call schedule(). It's also called when + * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made. + * + * Return nonzero to generate a %SIGCHLD with @why, which is + * normal if @notify is nonzero. + * + * Called with no locks held. + */ +static inline int tracehook_notify_jctl(int notify, int why) +{ + return notify || (current->ptrace & PT_PTRACED); +} + +#define DEATH_REAP -1 +#define DEATH_DELAYED_GROUP_LEADER -2 + +/** + * tracehook_notify_death - task is dead, ready to notify parent + * @task: @current task now exiting + * @death_cookie: value to pass to tracehook_report_death() + * @group_dead: nonzero if this was the last thread in the group to die + * + * A return value >= 0 means call do_notify_parent() with that signal + * number. Negative return value can be %DEATH_REAP to self-reap right + * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our + * parent. Note that a return value of 0 means a do_notify_parent() call + * that sends no signal, but still wakes up a parent blocked in wait*(). + * + * Called with write_lock_irq(&tasklist_lock) held. + */ +static inline int tracehook_notify_death(struct task_struct *task, + void **death_cookie, int group_dead) +{ + if (task->exit_signal == -1) + return task->ptrace ? SIGCHLD : DEATH_REAP; + + /* + * If something other than our normal parent is ptracing us, then + * send it a SIGCHLD instead of honoring exit_signal. exit_signal + * only has special meaning to our real parent. + */ + if (thread_group_empty(task) && !ptrace_reparented(task)) + return task->exit_signal; + + return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; +} + +/** + * tracehook_report_death - task is dead and ready to be reaped + * @task: @current task now exiting + * @signal: return value from tracheook_notify_death() + * @death_cookie: value passed back from tracehook_notify_death() + * @group_dead: nonzero if this was the last thread in the group to die + * + * Thread has just become a zombie or is about to self-reap. If positive, + * @signal is the signal number just sent to the parent (usually %SIGCHLD). + * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is + * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. + * The @death_cookie was passed back by tracehook_notify_death(). + * + * If normal reaping is not inhibited, @task->exit_state might be changing + * in parallel. + * + * Called without locks. + */ +static inline void tracehook_report_death(struct task_struct *task, + int signal, void *death_cookie, + int group_dead) +{ +} + +#ifdef TIF_NOTIFY_RESUME +/** + * set_notify_resume - cause tracehook_notify_resume() to be called + * @task: task that will call tracehook_notify_resume() + * + * Calling this arranges that @task will call tracehook_notify_resume() + * before returning to user mode. If it's already running in user mode, + * it will enter the kernel and call tracehook_notify_resume() soon. + * If it's blocked, it will not be woken. + */ +static inline void set_notify_resume(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) + kick_process(task); +} + +/** + * tracehook_notify_resume - report when about to return to user mode + * @regs: user-mode registers of @current task + * + * This is called when %TIF_NOTIFY_RESUME has been set. Now we are + * about to return to user mode, and the user state in @regs can be + * inspected or adjusted. The caller in arch code has cleared + * %TIF_NOTIFY_RESUME before the call. If the flag gets set again + * asynchronously, this will be called again before we return to + * user mode. + * + * Called without locks. + */ +static inline void tracehook_notify_resume(struct pt_regs *regs) +{ +} +#endif /* TIF_NOTIFY_RESUME */ + +#endif /* <linux/tracehook.h> */ diff --git a/include/linux/tty.h b/include/linux/tty.h index e3579cb086e..0cbec74ec08 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -331,6 +331,8 @@ extern int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); +extern int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize *ws); extern int is_current_pgrp_orphaned(void); extern struct pid *tty_get_pgrp(struct tty_struct *tty); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index e1065ac0d92..16d27944c32 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -168,6 +168,18 @@ * * Optional: If not provided then the write method is called under * the atomic write lock to keep it serialized with the ldisc. + * + * int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, + * unsigned int rows, unsigned int cols); + * + * Called when a termios request is issued which changes the + * requested terminal geometry. + * + * Optional: the default action is to update the termios structure + * without error. This is usually the correct behaviour. Drivers should + * not force errors here if they are not resizable objects (eg a serial + * line). See tty_do_resize() if you need to wrap the standard method + * in your own logic - the usual case. */ #include <linux/fs.h> @@ -206,6 +218,8 @@ struct tty_operations { int (*tiocmget)(struct tty_struct *tty, struct file *file); int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); + int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize *ws); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); int (*poll_get_char)(struct tty_driver *driver, int line); diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h new file mode 100644 index 00000000000..970473bf8d5 --- /dev/null +++ b/include/linux/ucb1400.h @@ -0,0 +1,161 @@ +/* + * Register definitions and functions for: + * Philips UCB1400 driver + * + * Based on ucb1400_ts: + * Author: Nicolas Pitre + * Created: September 25, 2006 + * Copyright: MontaVista Software, Inc. + * + * Spliting done by: Marek Vasut <marek.vasut@gmail.com> + * If something doesnt work and it worked before spliting, e-mail me, + * dont bother Nicolas please ;-) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This code is heavily based on ucb1x00-*.c copyrighted by Russell King + * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has + * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request. + */ + +#ifndef _LINUX__UCB1400_H +#define _LINUX__UCB1400_H + +#include <sound/ac97_codec.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> + +/* + * UCB1400 AC-link registers + */ + +#define UCB_IO_DATA 0x5a +#define UCB_IO_DIR 0x5c +#define UCB_IE_RIS 0x5e +#define UCB_IE_FAL 0x60 +#define UCB_IE_STATUS 0x62 +#define UCB_IE_CLEAR 0x62 +#define UCB_IE_ADC (1 << 11) +#define UCB_IE_TSPX (1 << 12) + +#define UCB_TS_CR 0x64 +#define UCB_TS_CR_TSMX_POW (1 << 0) +#define UCB_TS_CR_TSPX_POW (1 << 1) +#define UCB_TS_CR_TSMY_POW (1 << 2) +#define UCB_TS_CR_TSPY_POW (1 << 3) +#define UCB_TS_CR_TSMX_GND (1 << 4) +#define UCB_TS_CR_TSPX_GND (1 << 5) +#define UCB_TS_CR_TSMY_GND (1 << 6) +#define UCB_TS_CR_TSPY_GND (1 << 7) +#define UCB_TS_CR_MODE_INT (0 << 8) +#define UCB_TS_CR_MODE_PRES (1 << 8) +#define UCB_TS_CR_MODE_POS (2 << 8) +#define UCB_TS_CR_BIAS_ENA (1 << 11) +#define UCB_TS_CR_TSPX_LOW (1 << 12) +#define UCB_TS_CR_TSMX_LOW (1 << 13) + +#define UCB_ADC_CR 0x66 +#define UCB_ADC_SYNC_ENA (1 << 0) +#define UCB_ADC_VREFBYP_CON (1 << 1) +#define UCB_ADC_INP_TSPX (0 << 2) +#define UCB_ADC_INP_TSMX (1 << 2) +#define UCB_ADC_INP_TSPY (2 << 2) +#define UCB_ADC_INP_TSMY (3 << 2) +#define UCB_ADC_INP_AD0 (4 << 2) +#define UCB_ADC_INP_AD1 (5 << 2) +#define UCB_ADC_INP_AD2 (6 << 2) +#define UCB_ADC_INP_AD3 (7 << 2) +#define UCB_ADC_EXT_REF (1 << 5) +#define UCB_ADC_START (1 << 7) +#define UCB_ADC_ENA (1 << 15) + +#define UCB_ADC_DATA 0x68 +#define UCB_ADC_DAT_VALID (1 << 15) +#define UCB_ADC_DAT_MASK 0x3ff + +#define UCB_ID 0x7e +#define UCB_ID_1400 0x4304 + +struct ucb1400_ts { + struct input_dev *ts_idev; + struct task_struct *ts_task; + int id; + wait_queue_head_t ts_wait; + unsigned int ts_restart:1; + int irq; + unsigned int irq_pending; /* not bit field shared */ + struct snd_ac97 *ac97; +}; + +struct ucb1400 { + struct platform_device *ucb1400_ts; +}; + +static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg) +{ + return ac97->bus->ops->read(ac97, reg); +} + +static inline void ucb1400_reg_write(struct snd_ac97 *ac97, u16 reg, u16 val) +{ + ac97->bus->ops->write(ac97, reg, val); +} + +static inline u16 ucb1400_gpio_get_value(struct snd_ac97 *ac97, u16 gpio) +{ + return ucb1400_reg_read(ac97, UCB_IO_DATA) & (1 << gpio); +} + +static inline void ucb1400_gpio_set_value(struct snd_ac97 *ac97, u16 gpio, + u16 val) +{ + ucb1400_reg_write(ac97, UCB_IO_DATA, val ? + ucb1400_reg_read(ac97, UCB_IO_DATA) | (1 << gpio) : + ucb1400_reg_read(ac97, UCB_IO_DATA) & ~(1 << gpio)); +} + +static inline u16 ucb1400_gpio_get_direction(struct snd_ac97 *ac97, u16 gpio) +{ + return ucb1400_reg_read(ac97, UCB_IO_DIR) & (1 << gpio); +} + +static inline void ucb1400_gpio_set_direction(struct snd_ac97 *ac97, u16 gpio, + u16 dir) +{ + ucb1400_reg_write(ac97, UCB_IO_DIR, dir ? + ucb1400_reg_read(ac97, UCB_IO_DIR) | (1 << gpio) : + ucb1400_reg_read(ac97, UCB_IO_DIR) & ~(1 << gpio)); +} + +static inline void ucb1400_adc_enable(struct snd_ac97 *ac97) +{ + ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA); +} + +static unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, + int adcsync) +{ + unsigned int val; + + if (adcsync) + adc_channel |= UCB_ADC_SYNC_ENA; + + ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel); + ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel | + UCB_ADC_START); + + while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA)) + & UCB_ADC_DAT_VALID)) + schedule_timeout_uninterruptible(1); + + return val & UCB_ADC_DAT_MASK; +} + +static inline void ucb1400_adc_disable(struct snd_ac97 *ac97) +{ + ucb1400_reg_write(ac97, UCB_ADC_CR, 0); +} + +#endif diff --git a/include/linux/usb.h b/include/linux/usb.h index 5811c5da69f..94ac74aba6b 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -110,6 +110,10 @@ enum usb_interface_condition { * @sysfs_files_created: sysfs attributes exist * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. + * @needs_altsetting0: flag set when a set-interface request for altsetting 0 + * has been deferred. + * @needs_binding: flag set when the driver should be re-probed or unbound + * following a reset or suspend operation it doesn't support. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. @@ -160,6 +164,7 @@ struct usb_interface { unsigned is_active:1; /* the interface is not suspended */ unsigned sysfs_files_created:1; /* the sysfs attributes exist */ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ + unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ struct device dev; /* interface specific device info */ diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h new file mode 100644 index 00000000000..5b88e36c910 --- /dev/null +++ b/include/linux/usb/ehci_def.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_USB_EHCI_DEF_H +#define __LINUX_USB_EHCI_DEF_H + +/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ + +/* Section 2.2 Host Controller Capability Registers */ +struct ehci_caps { + /* these fields are specified as 8 and 16 bit registers, + * but some hosts can't perform 8 or 16 bit PCI accesses. + */ + u32 hc_capbase; +#define HC_LENGTH(p) (((p)>>00)&0x00ff) /* bits 7:0 */ +#define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */ + u32 hcs_params; /* HCSPARAMS - offset 0x4 */ +#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */ +#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */ +#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */ +#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */ +#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ +#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ +#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ + + u32 hcc_params; /* HCCPARAMS - offset 0x8 */ +#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */ +#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */ +#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */ +#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */ +#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ +#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */ + u8 portroute [8]; /* nibbles for routing - offset 0xC */ +} __attribute__ ((packed)); + + +/* Section 2.3 Host Controller Operational Registers */ +struct ehci_regs { + + /* USBCMD: offset 0x00 */ + u32 command; +/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ +#define CMD_PARK (1<<11) /* enable "park" on async qh */ +#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ +#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */ +#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */ +#define CMD_ASE (1<<5) /* async schedule enable */ +#define CMD_PSE (1<<4) /* periodic schedule enable */ +/* 3:2 is periodic frame list size */ +#define CMD_RESET (1<<1) /* reset HC not bus */ +#define CMD_RUN (1<<0) /* start/stop HC */ + + /* USBSTS: offset 0x04 */ + u32 status; +#define STS_ASS (1<<15) /* Async Schedule Status */ +#define STS_PSS (1<<14) /* Periodic Schedule Status */ +#define STS_RECL (1<<13) /* Reclamation */ +#define STS_HALT (1<<12) /* Not running (any reason) */ +/* some bits reserved */ + /* these STS_* flags are also intr_enable bits (USBINTR) */ +#define STS_IAA (1<<5) /* Interrupted on async advance */ +#define STS_FATAL (1<<4) /* such as some PCI access errors */ +#define STS_FLR (1<<3) /* frame list rolled over */ +#define STS_PCD (1<<2) /* port change detect */ +#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */ +#define STS_INT (1<<0) /* "normal" completion (short, ...) */ + + /* USBINTR: offset 0x08 */ + u32 intr_enable; + + /* FRINDEX: offset 0x0C */ + u32 frame_index; /* current microframe number */ + /* CTRLDSSEGMENT: offset 0x10 */ + u32 segment; /* address bits 63:32 if needed */ + /* PERIODICLISTBASE: offset 0x14 */ + u32 frame_list; /* points to periodic list */ + /* ASYNCLISTADDR: offset 0x18 */ + u32 async_next; /* address of next async queue head */ + + u32 reserved [9]; + + /* CONFIGFLAG: offset 0x40 */ + u32 configured_flag; +#define FLAG_CF (1<<0) /* true: we'll support "high speed" */ + + /* PORTSC: offset 0x44 */ + u32 port_status [0]; /* up to N_PORTS */ +/* 31:23 reserved */ +#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */ +#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ +#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */ +/* 19:16 for port testing */ +#define PORT_LED_OFF (0<<14) +#define PORT_LED_AMBER (1<<14) +#define PORT_LED_GREEN (2<<14) +#define PORT_LED_MASK (3<<14) +#define PORT_OWNER (1<<13) /* true: companion hc owns this port */ +#define PORT_POWER (1<<12) /* true: has power (see PPC) */ +#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ +/* 11:10 for detecting lowspeed devices (reset vs release ownership) */ +/* 9 reserved */ +#define PORT_RESET (1<<8) /* reset port */ +#define PORT_SUSPEND (1<<7) /* suspend port */ +#define PORT_RESUME (1<<6) /* resume it */ +#define PORT_OCC (1<<5) /* over current change */ +#define PORT_OC (1<<4) /* over current active */ +#define PORT_PEC (1<<3) /* port enable change */ +#define PORT_PE (1<<2) /* port enable */ +#define PORT_CSC (1<<1) /* connect status change */ +#define PORT_CONNECT (1<<0) /* device connected */ +#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) +} __attribute__ ((packed)); + +#define USBMODE 0x68 /* USB Device mode */ +#define USBMODE_SDIS (1<<3) /* Stream disable */ +#define USBMODE_BE (1<<2) /* BE/LE endianness select */ +#define USBMODE_CM_HC (3<<0) /* host controller mode */ +#define USBMODE_CM_IDLE (0<<0) /* idle state */ + +/* Appendix C, Debug port ... intended for use with special "debug devices" + * that can help if there's no serial console. (nonstandard enumeration.) + */ +struct ehci_dbg_port { + u32 control; +#define DBGP_OWNER (1<<30) +#define DBGP_ENABLED (1<<28) +#define DBGP_DONE (1<<16) +#define DBGP_INUSE (1<<10) +#define DBGP_ERRCODE(x) (((x)>>7)&0x07) +# define DBGP_ERR_BAD 1 +# define DBGP_ERR_SIGNAL 2 +#define DBGP_ERROR (1<<6) +#define DBGP_GO (1<<5) +#define DBGP_OUT (1<<4) +#define DBGP_LEN(x) (((x)>>0)&0x0f) + u32 pids; +#define DBGP_PID_GET(x) (((x)>>16)&0xff) +#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) + u32 data03; + u32 data47; + u32 address; +#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) +} __attribute__ ((packed)); + +#endif /* __LINUX_USB_EHCI_DEF_H */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 00000000000..630962c04ca --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,98 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb_hdrc". It encapsulates + * key configuration differences between boards. + */ + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ + unsigned soft_con:1; /* soft connect required */ + unsigned utm_16:1; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1; /* supports DMA */ + unsigned vendor_req:1; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl; /* vendor control reg width */ + u8 vendor_stat; /* vendor status reg witdh */ + u8 dma_req_chan; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits; +}; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* Turn device clock on or off */ + int (*set_clock)(struct clk *clock, int is_on); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2 + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 09a3e6a7518..655341d0f53 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,8 @@ #include <linux/mutex.h> #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ -#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 8 diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 9385a566aed..15a653d4113 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h @@ -17,6 +17,21 @@ #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) +#define VID_TYPE_CAPTURE 1 /* Can capture */ +#define VID_TYPE_TUNER 2 /* Can tune */ +#define VID_TYPE_TELETEXT 4 /* Does teletext */ +#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */ +#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */ +#define VID_TYPE_CLIPPING 32 /* Can clip */ +#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */ +#define VID_TYPE_SCALES 128 /* Scalable */ +#define VID_TYPE_MONOCHROME 256 /* Monochrome only */ +#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */ +#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */ +#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ +#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ +#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ + struct video_capability { char name[32]; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 2e66a95e8d3..303d93ffd6b 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -55,13 +55,13 @@ */ #ifndef __LINUX_VIDEODEV2_H #define __LINUX_VIDEODEV2_H + #ifdef __KERNEL__ #include <linux/time.h> /* need struct timeval */ -#include <linux/compiler.h> /* need __user */ #else -#define __user #include <sys/time.h> #endif +#include <linux/compiler.h> #include <linux/ioctl.h> #include <linux/types.h> @@ -71,6 +71,11 @@ */ #define VIDEO_MAX_FRAME 32 +#ifndef __KERNEL__ + +/* These defines are V4L1 specific and should not be used with the V4L2 API! + They will be removed from this header in the future. */ + #define VID_TYPE_CAPTURE 1 /* Can capture */ #define VID_TYPE_TUNER 2 /* Can tune */ #define VID_TYPE_TELETEXT 4 /* Does teletext */ @@ -85,14 +90,15 @@ #define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ #define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ #define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ +#endif /* * M I S C E L L A N E O U S */ /* Four-character-code (FOURCC) */ -#define v4l2_fourcc(a,b,c,d)\ - (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24)) +#define v4l2_fourcc(a, b, c, d)\ + ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) /* * E N U M S @@ -226,8 +232,7 @@ struct v4l2_fract { /* * D R I V E R C A P A B I L I T I E S */ -struct v4l2_capability -{ +struct v4l2_capability { __u8 driver[16]; /* i.e. "bttv" */ __u8 card[32]; /* i.e. "Hauppauge WinTV" */ __u8 bus_info[32]; /* "PCI:" + pci_name(pci_dev) */ @@ -259,8 +264,7 @@ struct v4l2_capability /* * V I D E O I M A G E F O R M A T */ -struct v4l2_pix_format -{ +struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; @@ -272,68 +276,71 @@ struct v4l2_pix_format }; /* Pixel format FOURCC depth Description */ -#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R','G','B','1') /* 8 RGB-3-3-2 */ -#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R','4','4','4') /* 16 xxxxrrrr ggggbbbb */ -#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R','G','B','O') /* 16 RGB-5-5-5 */ -#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R','G','B','P') /* 16 RGB-5-6-5 */ -#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R','G','B','Q') /* 16 RGB-5-5-5 BE */ -#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R','G','B','R') /* 16 RGB-5-6-5 BE */ -#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B','G','R','3') /* 24 BGR-8-8-8 */ -#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R','G','B','3') /* 24 RGB-8-8-8 */ -#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B','G','R','4') /* 32 BGR-8-8-8-8 */ -#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R','G','B','4') /* 32 RGB-8-8-8-8 */ -#define V4L2_PIX_FMT_GREY v4l2_fourcc('G','R','E','Y') /* 8 Greyscale */ -#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y','1','6',' ') /* 16 Greyscale */ -#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P','A','L','8') /* 8 8-bit palette */ -#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y','V','U','9') /* 9 YVU 4:1:0 */ -#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y','V','1','2') /* 12 YVU 4:2:0 */ -#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y','U','Y','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U','Y','V','Y') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4','2','2','P') /* 16 YVU422 planar */ -#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4','1','1','P') /* 16 YVU411 planar */ -#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y','4','1','P') /* 12 YUV 4:1:1 */ -#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y','4','4','4') /* 16 xxxxyyyy uuuuvvvv */ -#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y','U','V','O') /* 16 YUV-5-5-5 */ -#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y','U','V','P') /* 16 YUV-5-6-5 */ -#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y','U','V','4') /* 32 YUV-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */ +#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ +#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ +#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */ +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ +#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ +#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */ +#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ +#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ +#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ +#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ +#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ /* two planes -- one Y, one Cr + Cb interleaved */ -#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N','V','1','2') /* 12 Y/CbCr 4:2:0 */ -#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N','V','2','1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ /* The following formats are not defined in the V4L2 specification */ -#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y','U','V','9') /* 9 YUV 4:1:0 */ -#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */ -#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ -#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */ +#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ +#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */ +#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */ /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ -#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ -#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */ -#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ /* compressed formats */ -#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M','J','P','G') /* Motion-JPEG */ -#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J','P','E','G') /* JFIF JPEG */ -#define V4L2_PIX_FMT_DV v4l2_fourcc('d','v','s','d') /* 1394 */ -#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M','P','E','G') /* MPEG-1/2/4 */ +#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ +#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */ +#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */ +#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */ /* Vendor-specific formats */ -#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W','N','V','A') /* Winnov hw compress */ -#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S','9','1','0') /* SN9C10x compression */ -#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */ -#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */ -#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */ -#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */ -#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */ -#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ +#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ +#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ +#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ +#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ /* * F O R M A T E N U M E R A T I O N */ -struct v4l2_fmtdesc -{ +struct v4l2_fmtdesc { __u32 index; /* Format number */ enum v4l2_buf_type type; /* buffer type */ __u32 flags; @@ -349,21 +356,18 @@ struct v4l2_fmtdesc /* * F R A M E S I Z E E N U M E R A T I O N */ -enum v4l2_frmsizetypes -{ +enum v4l2_frmsizetypes { V4L2_FRMSIZE_TYPE_DISCRETE = 1, V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, V4L2_FRMSIZE_TYPE_STEPWISE = 3, }; -struct v4l2_frmsize_discrete -{ +struct v4l2_frmsize_discrete { __u32 width; /* Frame width [pixel] */ __u32 height; /* Frame height [pixel] */ }; -struct v4l2_frmsize_stepwise -{ +struct v4l2_frmsize_stepwise { __u32 min_width; /* Minimum frame width [pixel] */ __u32 max_width; /* Maximum frame width [pixel] */ __u32 step_width; /* Frame width step size [pixel] */ @@ -372,8 +376,7 @@ struct v4l2_frmsize_stepwise __u32 step_height; /* Frame height step size [pixel] */ }; -struct v4l2_frmsizeenum -{ +struct v4l2_frmsizeenum { __u32 index; /* Frame size number */ __u32 pixel_format; /* Pixel format */ __u32 type; /* Frame size type the device supports. */ @@ -389,22 +392,19 @@ struct v4l2_frmsizeenum /* * F R A M E R A T E E N U M E R A T I O N */ -enum v4l2_frmivaltypes -{ +enum v4l2_frmivaltypes { V4L2_FRMIVAL_TYPE_DISCRETE = 1, V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, V4L2_FRMIVAL_TYPE_STEPWISE = 3, }; -struct v4l2_frmival_stepwise -{ +struct v4l2_frmival_stepwise { struct v4l2_fract min; /* Minimum frame interval [s] */ struct v4l2_fract max; /* Maximum frame interval [s] */ struct v4l2_fract step; /* Frame interval step size [s] */ }; -struct v4l2_frmivalenum -{ +struct v4l2_frmivalenum { __u32 index; /* Frame format index */ __u32 pixel_format; /* Pixel format */ __u32 width; /* Frame width */ @@ -423,8 +423,7 @@ struct v4l2_frmivalenum /* * T I M E C O D E */ -struct v4l2_timecode -{ +struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; @@ -449,8 +448,7 @@ struct v4l2_timecode #define V4L2_TC_USERBITS_8BITCHARS 0x0008 /* The above is based on SMPTE timecodes */ -struct v4l2_jpegcompression -{ +struct v4l2_jpegcompression { int quality; int APPn; /* Number of APP segment to be written, @@ -482,16 +480,14 @@ struct v4l2_jpegcompression /* * M E M O R Y - M A P P I N G B U F F E R S */ -struct v4l2_requestbuffers -{ +struct v4l2_requestbuffers { __u32 count; enum v4l2_buf_type type; enum v4l2_memory memory; __u32 reserved[2]; }; -struct v4l2_buffer -{ +struct v4l2_buffer { __u32 index; enum v4l2_buf_type type; __u32 bytesused; @@ -525,13 +521,12 @@ struct v4l2_buffer /* * O V E R L A Y P R E V I E W */ -struct v4l2_framebuffer -{ +struct v4l2_framebuffer { __u32 capability; __u32 flags; /* FIXME: in theory we should pass something like PCI device + memory * region + offset instead of some physical address */ - void* base; + void *base; struct v4l2_pix_format fmt; }; /* Flags for the 'capability' field. Read only */ @@ -550,14 +545,12 @@ struct v4l2_framebuffer #define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 #define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020 -struct v4l2_clip -{ +struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip __user *next; }; -struct v4l2_window -{ +struct v4l2_window { struct v4l2_rect w; enum v4l2_field field; __u32 chromakey; @@ -570,8 +563,7 @@ struct v4l2_window /* * C A P T U R E P A R A M E T E R S */ -struct v4l2_captureparm -{ +struct v4l2_captureparm { __u32 capability; /* Supported modes */ __u32 capturemode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in .1us units */ @@ -584,8 +576,7 @@ struct v4l2_captureparm #define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */ #define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */ -struct v4l2_outputparm -{ +struct v4l2_outputparm { __u32 capability; /* Supported modes */ __u32 outputmode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in seconds */ @@ -702,8 +693,7 @@ typedef __u64 v4l2_std_id; #define V4L2_STD_ALL (V4L2_STD_525_60 |\ V4L2_STD_625_50) -struct v4l2_standard -{ +struct v4l2_standard { __u32 index; v4l2_std_id id; __u8 name[24]; @@ -715,8 +705,7 @@ struct v4l2_standard /* * V I D E O I N P U T S */ -struct v4l2_input -{ +struct v4l2_input { __u32 index; /* Which input */ __u8 name[32]; /* Label */ __u32 type; /* Type of input */ @@ -753,8 +742,7 @@ struct v4l2_input /* * V I D E O O U T P U T S */ -struct v4l2_output -{ +struct v4l2_output { __u32 index; /* Which output */ __u8 name[32]; /* Label */ __u32 type; /* Type of output */ @@ -771,14 +759,12 @@ struct v4l2_output /* * C O N T R O L S */ -struct v4l2_control -{ +struct v4l2_control { __u32 id; __s32 value; }; -struct v4l2_ext_control -{ +struct v4l2_ext_control { __u32 id; __u32 reserved2[2]; union { @@ -788,8 +774,7 @@ struct v4l2_ext_control }; } __attribute__ ((packed)); -struct v4l2_ext_controls -{ +struct v4l2_ext_controls { __u32 ctrl_class; __u32 count; __u32 error_idx; @@ -807,8 +792,7 @@ struct v4l2_ext_controls #define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ -struct v4l2_queryctrl -{ +struct v4l2_queryctrl { __u32 id; enum v4l2_ctrl_type type; __u8 name[32]; /* Whatever */ @@ -821,8 +805,7 @@ struct v4l2_queryctrl }; /* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */ -struct v4l2_querymenu -{ +struct v4l2_querymenu { __u32 id; __u32 index; __u8 name[32]; /* Whatever */ @@ -1104,8 +1087,7 @@ enum v4l2_exposure_auto_type { /* * T U N I N G */ -struct v4l2_tuner -{ +struct v4l2_tuner { __u32 index; __u8 name[32]; enum v4l2_tuner_type type; @@ -1119,8 +1101,7 @@ struct v4l2_tuner __u32 reserved[4]; }; -struct v4l2_modulator -{ +struct v4l2_modulator { __u32 index; __u8 name[32]; __u32 capability; @@ -1153,8 +1134,7 @@ struct v4l2_modulator #define V4L2_TUNER_MODE_LANG1 0x0003 #define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 -struct v4l2_frequency -{ +struct v4l2_frequency { __u32 tuner; enum v4l2_tuner_type type; __u32 frequency; @@ -1172,8 +1152,7 @@ struct v4l2_hw_freq_seek { /* * A U D I O */ -struct v4l2_audio -{ +struct v4l2_audio { __u32 index; __u8 name[32]; __u32 capability; @@ -1188,8 +1167,7 @@ struct v4l2_audio /* Flags for the 'mode' field */ #define V4L2_AUDMODE_AVL 0x00001 -struct v4l2_audioout -{ +struct v4l2_audioout { __u32 index; __u8 name[32]; __u32 capability; @@ -1253,8 +1231,7 @@ struct v4l2_encoder_cmd { */ /* Raw VBI */ -struct v4l2_vbi_format -{ +struct v4l2_vbi_format { __u32 sampling_rate; /* in 1 Hz */ __u32 offset; __u32 samples_per_line; @@ -1266,8 +1243,8 @@ struct v4l2_vbi_format }; /* VBI flags */ -#define V4L2_VBI_UNSYNC (1<< 0) -#define V4L2_VBI_INTERLACED (1<< 1) +#define V4L2_VBI_UNSYNC (1 << 0) +#define V4L2_VBI_INTERLACED (1 << 1) /* Sliced VBI * @@ -1276,8 +1253,7 @@ struct v4l2_vbi_format * notice in the definitive implementation. */ -struct v4l2_sliced_vbi_format -{ +struct v4l2_sliced_vbi_format { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1301,8 +1277,7 @@ struct v4l2_sliced_vbi_format #define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) #define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) -struct v4l2_sliced_vbi_cap -{ +struct v4l2_sliced_vbi_cap { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1313,8 +1288,7 @@ struct v4l2_sliced_vbi_cap __u32 reserved[3]; /* must be 0 */ }; -struct v4l2_sliced_vbi_data -{ +struct v4l2_sliced_vbi_data { __u32 id; __u32 field; /* 0: first field, 1: second field */ __u32 line; /* 1-23 */ @@ -1328,27 +1302,23 @@ struct v4l2_sliced_vbi_data /* Stream data format */ -struct v4l2_format -{ +struct v4l2_format { enum v4l2_buf_type type; - union - { - struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE - struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY - struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE - struct v4l2_sliced_vbi_format sliced; // V4L2_BUF_TYPE_SLICED_VBI_CAPTURE - __u8 raw_data[200]; // user-defined + union { + struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */ + struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */ + struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */ + struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ + __u8 raw_data[200]; /* user-defined */ } fmt; }; /* Stream type-dependent parameters */ -struct v4l2_streamparm -{ +struct v4l2_streamparm { enum v4l2_buf_type type; - union - { + union { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200]; /* user-defined */ @@ -1386,92 +1356,86 @@ struct v4l2_chip_ident { * I O C T L C O D E S F O R V I D E O D E V I C E S * */ -#define VIDIOC_QUERYCAP _IOR ('V', 0, struct v4l2_capability) -#define VIDIOC_RESERVED _IO ('V', 1) -#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) -#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) -#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) -#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) -#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) -#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) -#define VIDIOC_S_FBUF _IOW ('V', 11, struct v4l2_framebuffer) -#define VIDIOC_OVERLAY _IOW ('V', 14, int) -#define VIDIOC_QBUF _IOWR ('V', 15, struct v4l2_buffer) -#define VIDIOC_DQBUF _IOWR ('V', 17, struct v4l2_buffer) -#define VIDIOC_STREAMON _IOW ('V', 18, int) -#define VIDIOC_STREAMOFF _IOW ('V', 19, int) -#define VIDIOC_G_PARM _IOWR ('V', 21, struct v4l2_streamparm) -#define VIDIOC_S_PARM _IOWR ('V', 22, struct v4l2_streamparm) -#define VIDIOC_G_STD _IOR ('V', 23, v4l2_std_id) -#define VIDIOC_S_STD _IOW ('V', 24, v4l2_std_id) -#define VIDIOC_ENUMSTD _IOWR ('V', 25, struct v4l2_standard) -#define VIDIOC_ENUMINPUT _IOWR ('V', 26, struct v4l2_input) -#define VIDIOC_G_CTRL _IOWR ('V', 27, struct v4l2_control) -#define VIDIOC_S_CTRL _IOWR ('V', 28, struct v4l2_control) -#define VIDIOC_G_TUNER _IOWR ('V', 29, struct v4l2_tuner) -#define VIDIOC_S_TUNER _IOW ('V', 30, struct v4l2_tuner) -#define VIDIOC_G_AUDIO _IOR ('V', 33, struct v4l2_audio) -#define VIDIOC_S_AUDIO _IOW ('V', 34, struct v4l2_audio) -#define VIDIOC_QUERYCTRL _IOWR ('V', 36, struct v4l2_queryctrl) -#define VIDIOC_QUERYMENU _IOWR ('V', 37, struct v4l2_querymenu) -#define VIDIOC_G_INPUT _IOR ('V', 38, int) -#define VIDIOC_S_INPUT _IOWR ('V', 39, int) -#define VIDIOC_G_OUTPUT _IOR ('V', 46, int) -#define VIDIOC_S_OUTPUT _IOWR ('V', 47, int) -#define VIDIOC_ENUMOUTPUT _IOWR ('V', 48, struct v4l2_output) -#define VIDIOC_G_AUDOUT _IOR ('V', 49, struct v4l2_audioout) -#define VIDIOC_S_AUDOUT _IOW ('V', 50, struct v4l2_audioout) -#define VIDIOC_G_MODULATOR _IOWR ('V', 54, struct v4l2_modulator) -#define VIDIOC_S_MODULATOR _IOW ('V', 55, struct v4l2_modulator) -#define VIDIOC_G_FREQUENCY _IOWR ('V', 56, struct v4l2_frequency) -#define VIDIOC_S_FREQUENCY _IOW ('V', 57, struct v4l2_frequency) -#define VIDIOC_CROPCAP _IOWR ('V', 58, struct v4l2_cropcap) -#define VIDIOC_G_CROP _IOWR ('V', 59, struct v4l2_crop) -#define VIDIOC_S_CROP _IOW ('V', 60, struct v4l2_crop) -#define VIDIOC_G_JPEGCOMP _IOR ('V', 61, struct v4l2_jpegcompression) -#define VIDIOC_S_JPEGCOMP _IOW ('V', 62, struct v4l2_jpegcompression) -#define VIDIOC_QUERYSTD _IOR ('V', 63, v4l2_std_id) -#define VIDIOC_TRY_FMT _IOWR ('V', 64, struct v4l2_format) -#define VIDIOC_ENUMAUDIO _IOWR ('V', 65, struct v4l2_audio) -#define VIDIOC_ENUMAUDOUT _IOWR ('V', 66, struct v4l2_audioout) -#define VIDIOC_G_PRIORITY _IOR ('V', 67, enum v4l2_priority) -#define VIDIOC_S_PRIORITY _IOW ('V', 68, enum v4l2_priority) -#define VIDIOC_G_SLICED_VBI_CAP _IOWR ('V', 69, struct v4l2_sliced_vbi_cap) -#define VIDIOC_LOG_STATUS _IO ('V', 70) -#define VIDIOC_G_EXT_CTRLS _IOWR ('V', 71, struct v4l2_ext_controls) -#define VIDIOC_S_EXT_CTRLS _IOWR ('V', 72, struct v4l2_ext_controls) -#define VIDIOC_TRY_EXT_CTRLS _IOWR ('V', 73, struct v4l2_ext_controls) +#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) +#define VIDIOC_RESERVED _IO('V', 1) +#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) +#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) +#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) +#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers) +#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer) +#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer) +#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer) +#define VIDIOC_OVERLAY _IOW('V', 14, int) +#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer) +#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer) +#define VIDIOC_STREAMON _IOW('V', 18, int) +#define VIDIOC_STREAMOFF _IOW('V', 19, int) +#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm) +#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm) +#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input) +#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control) +#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control) +#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner) +#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner) +#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio) +#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio) +#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl) +#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu) +#define VIDIOC_G_INPUT _IOR('V', 38, int) +#define VIDIOC_S_INPUT _IOWR('V', 39, int) +#define VIDIOC_G_OUTPUT _IOR('V', 46, int) +#define VIDIOC_S_OUTPUT _IOWR('V', 47, int) +#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output) +#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout) +#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout) +#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator) +#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator) +#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency) +#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency) +#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap) +#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop) +#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) +#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) +#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) +#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) +#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority) +#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority) +#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap) +#define VIDIOC_LOG_STATUS _IO('V', 70) +#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls) +#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls) +#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls) #if 1 -#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum) -#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum) -#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx) -#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd) -#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd) +#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum) +#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum) +#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) +#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) +#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) /* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ -#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register) -#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register) +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register) -#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident) +#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) #endif -#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ -#define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int) -#define VIDIOC_S_PARM_OLD _IOW ('V', 22, struct v4l2_streamparm) -#define VIDIOC_S_CTRL_OLD _IOW ('V', 28, struct v4l2_control) -#define VIDIOC_G_AUDIO_OLD _IOWR ('V', 33, struct v4l2_audio) -#define VIDIOC_G_AUDOUT_OLD _IOWR ('V', 49, struct v4l2_audioout) -#define VIDIOC_CROPCAP_OLD _IOR ('V', 58, struct v4l2_cropcap) +#define VIDIOC_OVERLAY_OLD _IOWR('V', 14, int) +#define VIDIOC_S_PARM_OLD _IOW('V', 22, struct v4l2_streamparm) +#define VIDIOC_S_CTRL_OLD _IOW('V', 28, struct v4l2_control) +#define VIDIOC_G_AUDIO_OLD _IOWR('V', 33, struct v4l2_audio) +#define VIDIOC_G_AUDOUT_OLD _IOWR('V', 49, struct v4l2_audioout) +#define VIDIOC_CROPCAP_OLD _IOR('V', 58, struct v4l2_cropcap) #endif #define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ #endif /* __LINUX_VIDEODEV2_H */ - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/include/linux/videotext.h b/include/linux/videotext.h index 018f92047ff..3e68c8d1c7f 100644 --- a/include/linux/videotext.h +++ b/include/linux/videotext.h @@ -45,10 +45,10 @@ #define VTXIOCCLRCACHE_OLD 0x710b /* clear cache on VTX-interface (if avail.) */ #define VTXIOCSETVIRT_OLD 0x710c /* turn on virtual mode (this disables TV-display) */ -/* +/* * Definitions for VTXIOCGETINFO */ - + #define SAA5243 0 #define SAA5246 1 #define SAA5249 2 @@ -57,10 +57,10 @@ typedef struct { int version_major, version_minor; /* version of driver; if version_major changes, driver */ - /* is not backward compatible!!! CHECK THIS!!! */ + /* is not backward compatible!!! CHECK THIS!!! */ int numpages; /* number of page-buffers of vtx-chipset */ int cct_type; /* type of vtx-chipset (SAA5243, SAA5246, SAA5248 or - * SAA5249) */ + * SAA5249) */ } vtx_info_t; @@ -81,7 +81,7 @@ vtx_info_t; #define PGMASK_HOUR (HR_TEN | HR_UNIT) #define PGMASK_MINUTE (MIN_TEN | MIN_UNIT) -typedef struct +typedef struct { int page; /* number of requested page (hexadecimal) */ int hour; /* requested hour (hexadecimal) */ @@ -98,11 +98,11 @@ vtx_pagereq_t; /* * Definitions for VTXIOC{GETSTAT,PUTSTAT} */ - + #define VTX_PAGESIZE (40 * 24) #define VTX_VIRTUALSIZE (40 * 49) -typedef struct +typedef struct { int pagenum; /* number of page (hexadecimal) */ int hour; /* hour (hexadecimal) */ @@ -121,5 +121,5 @@ typedef struct unsigned hamming : 1; /* hamming-error occurred */ } vtx_pageinfo_t; - + #endif /* _VTX_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 364789aae9f..328eb402272 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -4,9 +4,9 @@ #include <linux/spinlock.h> #include <asm/page.h> /* pgprot_t */ -struct vm_area_struct; +struct vm_area_struct; /* vma defining user mapping in mm_types.h */ -/* bits in vm_struct->flags */ +/* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 14c0e91be9b..1cbd0a7db4e 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -35,7 +35,6 @@ extern int fg_console, last_console, want_console; int vc_allocate(unsigned int console); int vc_cons_allocated(unsigned int console); int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); -int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); void vc_deallocate(unsigned int console); void reset_palette(struct vc_data *vc); void do_blank_screen(int entering_gfx); @@ -74,7 +73,7 @@ void con_protect_unimap(struct vc_data *vc, int rdonly); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - (vc)->vc_toggle_meta ? 0x80 : 0]) + ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else #define con_set_trans_old(arg) (0) #define con_get_trans_old(arg) (-EINVAL) @@ -86,6 +85,7 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define con_copy_unimap(d, s) (0) #define con_get_unimap(vc, ct, uct, list) (-EINVAL) #define con_free_unimap(vc) do { ; } while (0) +#define con_protect_unimap(vc, rdonly) do { ; } while (0) #define vc_translate(vc, c) (c) #endif diff --git a/include/linux/wm97xx_batt.h b/include/linux/wm97xx_batt.h new file mode 100644 index 00000000000..9681d1ab0e4 --- /dev/null +++ b/include/linux/wm97xx_batt.h @@ -0,0 +1,26 @@ +#ifndef _LINUX_WM97XX_BAT_H +#define _LINUX_WM97XX_BAT_H + +#include <linux/wm97xx.h> + +struct wm97xx_batt_info { + int batt_aux; + int temp_aux; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int temp_div; + int temp_mult; + int batt_tech; + char *batt_name; +}; + +#ifdef CONFIG_BATTERY_WM97XX +void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); +#else +static inline void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) {} +#endif + +#endif diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index fb0c215a305..4bc1e6b86cb 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -279,6 +279,7 @@ enum xfrm_attr_type_t { XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */ XFRMA_MIGRATE, XFRMA_ALG_AEAD, /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS, /* struct xfrm_user_kmaddress */ __XFRMA_MAX #define XFRMA_MAX (__XFRMA_MAX - 1) @@ -415,6 +416,15 @@ struct xfrm_user_report { struct xfrm_selector sel; }; +/* Used by MIGRATE to pass addresses IKE should use to perform + * SA negotiation with the peer */ +struct xfrm_user_kmaddress { + xfrm_address_t local; + xfrm_address_t remote; + __u32 reserved; + __u16 family; +}; + struct xfrm_user_migrate { xfrm_address_t old_daddr; xfrm_address_t old_saddr; |