aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/agp_backend.h5
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/byteorder.h372
-rw-r--r--include/linux/firmware-map.h26
-rw-r--r--include/linux/harrier_defs.h212
-rw-r--r--include/linux/i2c-id.h2
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/mfd/t7l66xb.h36
-rw-r--r--include/linux/mfd/tc6387xb.h23
-rw-r--r--include/linux/mfd/tc6393xb.h9
-rw-r--r--include/linux/mfd/tmio.h19
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/sched.h31
-rw-r--r--include/linux/seq_file.h12
-rw-r--r--include/linux/slub_def.h1
-rw-r--r--include/linux/swab.h309
21 files changed, 801 insertions, 310 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a26f565e818..327f60658d9 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -356,6 +356,7 @@ unifdef-y += virtio_balloon.h
unifdef-y += virtio_console.h
unifdef-y += virtio_pci.h
unifdef-y += virtio_ring.h
+unifdef-y += virtio_rng.h
unifdef-y += vt.h
unifdef-y += wait.h
unifdef-y += wanrouter.h
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 972b12bcfb3..2b8df8b420f 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -30,6 +30,8 @@
#ifndef _AGP_BACKEND_H
#define _AGP_BACKEND_H 1
+#include <linux/list.h>
+
enum chipset_type {
NOT_SUPPORTED,
SUPPORTED,
@@ -78,6 +80,8 @@ struct agp_memory {
bool is_bound;
bool is_flushed;
bool vmalloc_flag;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
};
#define AGP_NORMAL_MEMORY 0
@@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t,
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
extern int agp_bind_memory(struct agp_memory *, off_t);
extern int agp_unbind_memory(struct agp_memory *);
+extern int agp_rebind_memory(void);
extern void agp_enable(struct agp_bridge_data *, u32);
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
extern void agp_backend_release(struct agp_bridge_data *);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1abfe664c44..89781fd4885 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
+extern int bitmap_scnprintf_len(unsigned int nr_bits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 652470b687c..95837bfb525 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_nopanic(x) \
+ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_nopanic(x) \
+ __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h
new file mode 100644
index 00000000000..29f002d73d9
--- /dev/null
+++ b/include/linux/byteorder.h
@@ -0,0 +1,372 @@
+#ifndef _LINUX_BYTEORDER_H
+#define _LINUX_BYTEORDER_H
+
+#include <linux/types.h>
+#include <linux/swab.h>
+
+#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)
+# error Fix asm/byteorder.h to define one endianness
+#endif
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+# error Fix asm/byteorder.h to define arch endianness
+#endif
+
+#ifdef __LITTLE_ENDIAN
+# undef __LITTLE_ENDIAN
+# define __LITTLE_ENDIAN 1234
+#endif
+
+#ifdef __BIG_ENDIAN
+# undef __BIG_ENDIAN
+# define __BIG_ENDIAN 4321
+#endif
+
+#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD)
+# define __LITTLE_ENDIAN_BITFIELD
+#endif
+
+#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD)
+# define __BIG_ENDIAN_BITFIELD
+#endif
+
+#ifdef __LITTLE_ENDIAN
+# define __le16_to_cpu(x) ((__force __u16)(__le16)(x))
+# define __le32_to_cpu(x) ((__force __u32)(__le32)(x))
+# define __le64_to_cpu(x) ((__force __u64)(__le64)(x))
+# define __cpu_to_le16(x) ((__force __le16)(__u16)(x))
+# define __cpu_to_le32(x) ((__force __le32)(__u32)(x))
+# define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
+
+# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x))
+# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x))
+# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x))
+# define __cpu_to_be16(x) ((__force __be16)__swab16(x))
+# define __cpu_to_be32(x) ((__force __be32)__swab32(x))
+# define __cpu_to_be64(x) ((__force __be64)__swab64(x))
+#endif
+
+#ifdef __BIG_ENDIAN
+# define __be16_to_cpu(x) ((__force __u16)(__be16)(x))
+# define __be32_to_cpu(x) ((__force __u32)(__be32)(x))
+# define __be64_to_cpu(x) ((__force __u64)(__be64)(x))
+# define __cpu_to_be16(x) ((__force __be16)(__u16)(x))
+# define __cpu_to_be32(x) ((__force __be32)(__u32)(x))
+# define __cpu_to_be64(x) ((__force __be64)(__u64)(x))
+
+# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x))
+# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x))
+# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x))
+# define __cpu_to_le16(x) ((__force __le16)__swab16(x))
+# define __cpu_to_le32(x) ((__force __le32)__swab32(x))
+# define __cpu_to_le64(x) ((__force __le64)__swab64(x))
+#endif
+
+/*
+ * These helpers could be phased out over time as the base version
+ * handles constant folding.
+ */
+#define __constant_htonl(x) __cpu_to_be32(x)
+#define __constant_ntohl(x) __be32_to_cpu(x)
+#define __constant_htons(x) __cpu_to_be16(x)
+#define __constant_ntohs(x) __be16_to_cpu(x)
+
+#define __constant_le16_to_cpu(x) __le16_to_cpu(x)
+#define __constant_le32_to_cpu(x) __le32_to_cpu(x)
+#define __constant_le64_to_cpu(x) __le64_to_cpu(x)
+#define __constant_be16_to_cpu(x) __be16_to_cpu(x)
+#define __constant_be32_to_cpu(x) __be32_to_cpu(x)
+#define __constant_be64_to_cpu(x) __be64_to_cpu(x)
+
+#define __constant_cpu_to_le16(x) __cpu_to_le16(x)
+#define __constant_cpu_to_le32(x) __cpu_to_le32(x)
+#define __constant_cpu_to_le64(x) __cpu_to_le64(x)
+#define __constant_cpu_to_be16(x) __cpu_to_be16(x)
+#define __constant_cpu_to_be32(x) __cpu_to_be32(x)
+#define __constant_cpu_to_be64(x) __cpu_to_be64(x)
+
+static inline void __le16_to_cpus(__u16 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __cpu_to_le16s(__u16 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __le32_to_cpus(__u32 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __cpu_to_le32s(__u32 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __le64_to_cpus(__u64 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline void __cpu_to_le64s(__u64 *p)
+{
+#ifdef __BIG_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline void __be16_to_cpus(__u16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __cpu_to_be16s(__u16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab16s(p);
+#endif
+}
+
+static inline void __be32_to_cpus(__u32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __cpu_to_be32s(__u32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab32s(p);
+#endif
+}
+
+static inline void __be64_to_cpus(__u64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline void __cpu_to_be64s(__u64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ __swab64s(p);
+#endif
+}
+
+static inline __u16 __le16_to_cpup(const __le16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __u16)*p;
+#else
+ return __swab16p((__force __u16 *)p);
+#endif
+}
+
+static inline __u32 __le32_to_cpup(const __le32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __u32)*p;
+#else
+ return __swab32p((__force __u32 *)p);
+#endif
+}
+
+static inline __u64 __le64_to_cpup(const __le64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __u64)*p;
+#else
+ return __swab64p((__force __u64 *)p);
+#endif
+}
+
+static inline __le16 __cpu_to_le16p(const __u16 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __le16)*p;
+#else
+ return (__force __le16)__swab16p(p);
+#endif
+}
+
+static inline __le32 __cpu_to_le32p(const __u32 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __le32)*p;
+#else
+ return (__force __le32)__swab32p(p);
+#endif
+}
+
+static inline __le64 __cpu_to_le64p(const __u64 *p)
+{
+#ifdef __LITTLE_ENDIAN
+ return (__force __le64)*p;
+#else
+ return (__force __le64)__swab64p(p);
+#endif
+}
+
+static inline __u16 __be16_to_cpup(const __be16 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __u16)*p;
+#else
+ return __swab16p((__force __u16 *)p);
+#endif
+}
+
+static inline __u32 __be32_to_cpup(const __be32 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __u32)*p;
+#else
+ return __swab32p((__force __u32 *)p);
+#endif
+}
+
+static inline __u64 __be64_to_cpup(const __be64 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __u64)*p;
+#else
+ return __swab64p((__force __u64 *)p);
+#endif
+}
+
+static inline __be16 __cpu_to_be16p(const __u16 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be16)*p;
+#else
+ return (__force __be16)__swab16p(p);
+#endif
+}
+
+static inline __be32 __cpu_to_be32p(const __u32 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)*p;
+#else
+ return (__force __be32)__swab32p(p);
+#endif
+}
+
+static inline __be64 __cpu_to_be64p(const __u64 *p)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)*p;
+#else
+ return (__force __be64)__swab64p(p);
+#endif
+}
+
+#ifdef __KERNEL__
+
+# define le16_to_cpu __le16_to_cpu
+# define le32_to_cpu __le32_to_cpu
+# define le64_to_cpu __le64_to_cpu
+# define be16_to_cpu __be16_to_cpu
+# define be32_to_cpu __be32_to_cpu
+# define be64_to_cpu __be64_to_cpu
+# define cpu_to_le16 __cpu_to_le16
+# define cpu_to_le32 __cpu_to_le32
+# define cpu_to_le64 __cpu_to_le64
+# define cpu_to_be16 __cpu_to_be16
+# define cpu_to_be32 __cpu_to_be32
+# define cpu_to_be64 __cpu_to_be64
+
+# define le16_to_cpup __le16_to_cpup
+# define le32_to_cpup __le32_to_cpup
+# define le64_to_cpup __le64_to_cpup
+# define be16_to_cpup __be16_to_cpup
+# define be32_to_cpup __be32_to_cpup
+# define be64_to_cpup __be64_to_cpup
+# define cpu_to_le16p __cpu_to_le16p
+# define cpu_to_le32p __cpu_to_le32p
+# define cpu_to_le64p __cpu_to_le64p
+# define cpu_to_be16p __cpu_to_be16p
+# define cpu_to_be32p __cpu_to_be32p
+# define cpu_to_be64p __cpu_to_be64p
+
+# define le16_to_cpus __le16_to_cpus
+# define le32_to_cpus __le32_to_cpus
+# define le64_to_cpus __le64_to_cpus
+# define be16_to_cpus __be16_to_cpus
+# define be32_to_cpus __be32_to_cpus
+# define be64_to_cpus __be64_to_cpus
+# define cpu_to_le16s __cpu_to_le16s
+# define cpu_to_le32s __cpu_to_le32s
+# define cpu_to_le64s __cpu_to_le64s
+# define cpu_to_be16s __cpu_to_be16s
+# define cpu_to_be32s __cpu_to_be32s
+# define cpu_to_be64s __cpu_to_be64s
+
+/*
+ * They have to be macros in order to do the constant folding
+ * correctly - if the argument passed into a inline function
+ * it is no longer constant according to gcc..
+ */
+# undef ntohl
+# undef ntohs
+# undef htonl
+# undef htons
+
+# define ___htonl(x) __cpu_to_be32(x)
+# define ___htons(x) __cpu_to_be16(x)
+# define ___ntohl(x) __be32_to_cpu(x)
+# define ___ntohs(x) __be16_to_cpu(x)
+
+# define htonl(x) ___htonl(x)
+# define ntohl(x) ___ntohl(x)
+# define htons(x) ___htons(x)
+# define ntohs(x) ___ntohs(x)
+
+static inline void le16_add_cpu(__le16 *var, u16 val)
+{
+ *var = cpu_to_le16(le16_to_cpup(var) + val);
+}
+
+static inline void le32_add_cpu(__le32 *var, u32 val)
+{
+ *var = cpu_to_le32(le32_to_cpup(var) + val);
+}
+
+static inline void le64_add_cpu(__le64 *var, u64 val)
+{
+ *var = cpu_to_le64(le64_to_cpup(var) + val);
+}
+
+static inline void be16_add_cpu(__be16 *var, u16 val)
+{
+ *var = cpu_to_be16(be16_to_cpup(var) + val);
+}
+
+static inline void be32_add_cpu(__be32 *var, u32 val)
+{
+ *var = cpu_to_be32(be32_to_cpup(var) + val);
+}
+
+static inline void be64_add_cpu(__be64 *var, u64 val)
+{
+ *var = cpu_to_be64(be64_to_cpup(var) + val);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_BYTEORDER_H */
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index acbdbcc1605..6e199c8dfac 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -24,34 +24,8 @@
*/
#ifdef CONFIG_FIRMWARE_MEMMAP
-/**
- * Adds a firmware mapping entry. This function uses kmalloc() for memory
- * allocation. Use firmware_map_add_early() if you want to use the bootmem
- * allocator.
- *
- * That function must be called before late_initcall.
- *
- * @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
- * @type: Type of the memory range.
- *
- * Returns 0 on success, or -ENOMEM if no memory could be allocated.
- */
int firmware_map_add(resource_size_t start, resource_size_t end,
const char *type);
-
-/**
- * Adds a firmware mapping entry. This function uses the bootmem allocator
- * for memory allocation. Use firmware_map_add() if you want to use kmalloc().
- *
- * That function must be called before late_initcall.
- *
- * @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
- * @type: Type of the memory range.
- *
- * Returns 0 on success, or -ENOMEM if no memory could be allocated.
- */
int firmware_map_add_early(resource_size_t start, resource_size_t end,
const char *type);
diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h
deleted file mode 100644
index efef11db790..00000000000
--- a/include/linux/harrier_defs.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/*
- * include/linux/harrier_defs.h
- *
- * Definitions for Motorola MCG Harrier North Bridge & Memory controller
- *
- * Author: Dale Farnsworth
- * dale.farnsworth@mvista.com
- *
- * Extracted from asm-ppc/harrier.h by:
- * Randy Vinson
- * rvinson@mvista.com
- *
- * Copyright 2001-2002 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __ASMPPC_HARRIER_DEFS_H
-#define __ASMPPC_HARRIER_DEFS_H
-
-#define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000
-
-#define HARRIER_VEND_DEV_ID 0x1057480b
-
-#define HARRIER_VENI_OFF 0x00
-
-#define HARRIER_REVI_OFF 0x05
-#define HARRIER_UCTL_OFF 0xd0
-#define HARRIER_XTAL64_MASK 0x02
-
-#define HARRIER_MISC_CSR_OFF 0x1c
-#define HARRIER_RSTOUT 0x01000000
-#define HARRIER_SYSCON 0x08000000
-#define HARRIER_EREADY 0x10000000
-#define HARRIER_ERDYS 0x20000000
-
-/* Function exception registers */
-#define HARRIER_FEEN_OFF 0x40 /* enable */
-#define HARRIER_FEST_OFF 0x44 /* status */
-#define HARRIER_FEMA_OFF 0x48 /* mask */
-#define HARRIER_FECL_OFF 0x4c /* clear */
-
-#define HARRIER_FE_DMA 0x80
-#define HARRIER_FE_MIDB 0x40
-#define HARRIER_FE_MIM0 0x20
-#define HARRIER_FE_MIM1 0x10
-#define HARRIER_FE_MIP 0x08
-#define HARRIER_FE_UA0 0x04
-#define HARRIER_FE_UA1 0x02
-#define HARRIER_FE_ABT 0x01
-
-#define HARRIER_SERIAL_0_OFF 0xc0
-
-#define HARRIER_MBAR_OFF 0xe0
-#define HARRIER_MBAR_MSK 0xfffc0000
-#define HARRIER_MPIC_CSR_OFF 0xe4
-#define HARRIER_MPIC_OPI_ENABLE 0x40
-#define HARRIER_MPIC_IFEVP_OFF 0x10200
-#define HARRIER_MPIC_IFEVP_VECT_MSK 0xff
-#define HARRIER_MPIC_IFEDE_OFF 0x10210
-
-/*
- * Define the Memory Controller register offsets.
- */
-#define HARRIER_SDBA_OFF 0x110
-#define HARRIER_SDBB_OFF 0x114
-#define HARRIER_SDBC_OFF 0x118
-#define HARRIER_SDBD_OFF 0x11c
-#define HARRIER_SDBE_OFF 0x120
-#define HARRIER_SDBF_OFF 0x124
-#define HARRIER_SDBG_OFF 0x128
-#define HARRIER_SDBH_OFF 0x12c
-
-#define HARRIER_SDB_ENABLE 0x00000100
-#define HARRIER_SDB_SIZE_MASK 0xf
-#define HARRIER_SDB_SIZE_SHIFT 16
-#define HARRIER_SDB_BASE_MASK 0xff
-#define HARRIER_SDB_BASE_SHIFT 24
-
-/*
- * Define outbound register offsets.
- */
-#define HARRIER_OTAD0_OFF 0x220
-#define HARRIER_OTOF0_OFF 0x224
-#define HARRIER_OTAD1_OFF 0x228
-#define HARRIER_OTOF1_OFF 0x22c
-#define HARRIER_OTAD2_OFF 0x230
-#define HARRIER_OTOF2_OFF 0x234
-#define HARRIER_OTAD3_OFF 0x238
-#define HARRIER_OTOF3_OFF 0x23c
-
-#define HARRIER_OTADX_START_MSK 0xffff0000UL
-#define HARRIER_OTADX_END_MSK 0x0000ffffUL
-
-#define HARRIER_OTOFX_OFF_MSK 0xffff0000UL
-#define HARRIER_OTOFX_ENA 0x80UL
-#define HARRIER_OTOFX_WPE 0x10UL
-#define HARRIER_OTOFX_SGE 0x08UL
-#define HARRIER_OTOFX_RAE 0x04UL
-#define HARRIER_OTOFX_MEM 0x02UL
-#define HARRIER_OTOFX_IOM 0x01UL
-
-/*
- * Define generic message passing register offsets
- */
-/* Mirrored registers (visible from both PowerPC and PCI space) */
-#define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */
-#define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */
-#define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */
-#define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */
-#define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */
-
-#define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */
-#define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */
-#define HARRIER_MGID_OFF 0x18 /* inbound doorbells */
-
-/* PowerPC-only registers */
-#define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */
-
-/* PCI-only registers */
-#define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */
-#define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */
-#define HARRIER_MG_OMI0 (1<<4)
-#define HARRIER_MG_OMI1 (1<<5)
-
-#define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */
-
-/*
- * Define PCI configuration space register offsets
- */
-#define HARRIER_XCSR_TO_PCFS_OFF 0x300
-
-/*
- * Define message passing attribute register offset
- */
-#define HARRIER_MPAT_OFF 0x44
-
-/*
- * Define inbound attribute register offsets.
- */
-#define HARRIER_ITSZ0_OFF 0x48
-#define HARRIER_ITAT0_OFF 0x4c
-
-#define HARRIER_ITSZ1_OFF 0x50
-#define HARRIER_ITAT1_OFF 0x54
-
-#define HARRIER_ITSZ2_OFF 0x58
-#define HARRIER_ITAT2_OFF 0x5c
-
-#define HARRIER_ITSZ3_OFF 0x60
-#define HARRIER_ITAT3_OFF 0x64
-
-/* inbound translation size constants */
-#define HARRIER_ITSZ_MSK 0xff
-#define HARRIER_ITSZ_4KB 0x00
-#define HARRIER_ITSZ_8KB 0x01
-#define HARRIER_ITSZ_16KB 0x02
-#define HARRIER_ITSZ_32KB 0x03
-#define HARRIER_ITSZ_64KB 0x04
-#define HARRIER_ITSZ_128KB 0x05
-#define HARRIER_ITSZ_256KB 0x06
-#define HARRIER_ITSZ_512KB 0x07
-#define HARRIER_ITSZ_1MB 0x08
-#define HARRIER_ITSZ_2MB 0x09
-#define HARRIER_ITSZ_4MB 0x0A
-#define HARRIER_ITSZ_8MB 0x0B
-#define HARRIER_ITSZ_16MB 0x0C
-#define HARRIER_ITSZ_32MB 0x0D
-#define HARRIER_ITSZ_64MB 0x0E
-#define HARRIER_ITSZ_128MB 0x0F
-#define HARRIER_ITSZ_256MB 0x10
-#define HARRIER_ITSZ_512MB 0x11
-#define HARRIER_ITSZ_1GB 0x12
-#define HARRIER_ITSZ_2GB 0x13
-
-/* inbound translation offset */
-#define HARRIER_ITOF_SHIFT 0x10
-#define HARRIER_ITOF_MSK 0xffff
-
-/* inbound translation atttributes */
-#define HARRIER_ITAT_PRE (1<<3)
-#define HARRIER_ITAT_RAE (1<<4)
-#define HARRIER_ITAT_WPE (1<<5)
-#define HARRIER_ITAT_MEM (1<<6)
-#define HARRIER_ITAT_ENA (1<<7)
-#define HARRIER_ITAT_GBL (1<<16)
-
-#define HARRIER_LBA_OFF 0x80
-#define HARRIER_LBA_MSK (1<<31)
-
-#define HARRIER_XCSR_SIZE 1024
-
-/* macros to calculate message passing register offsets */
-#define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x)
-
-#define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x)
-
-/*
- * Define PCI configuration space register offsets
- */
-#define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0
-#define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1
-#define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2
-#define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3
-#define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4
-
-#define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x)
-
-#endif /* __ASMPPC_HARRIER_DEFS_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 4862398e05b..bf34c5f4c05 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -39,7 +39,6 @@
#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
#define I2C_DRIVERID_SAA7110 22 /* video decoder */
-#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */
#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
#define I2C_DRIVERID_PCF8583 25 /* real time clock */
#define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */
@@ -95,7 +94,6 @@
#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */
#define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */
#define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */
-#define I2C_HW_B_G400 0x010009 /* Matrox G400 */
#define I2C_HW_B_I810 0x01000a /* Intel I810 */
#define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */
#define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */
diff --git a/include/linux/init.h b/include/linux/init.h
index 11b84e10605..93538b696e3 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[];
extern initcall_t __security_initcall_start[], __security_initcall_end[];
/* Defined in init/main.c */
+extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
extern unsigned int reset_devices;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index aaa998f65c7..2651f805ba6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -108,6 +108,13 @@ struct completion;
struct pt_regs;
struct user;
+#ifdef CONFIG_PREEMPT_VOLUNTARY
+extern int _cond_resched(void);
+# define might_resched() _cond_resched()
+#else
+# define might_resched() do { } while (0)
+#endif
+
/**
* might_sleep - annotation for functions that can sleep
*
@@ -118,13 +125,6 @@ struct user;
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
-#else
-# define might_resched() do { } while (0)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
void __might_sleep(char *file, int line);
# define might_sleep() \
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
new file mode 100644
index 00000000000..e83c7f2036f
--- /dev/null
+++ b/include/linux/mfd/t7l66xb.h
@@ -0,0 +1,36 @@
+/*
+ * This file contains the definitions for the T7L66XB
+ *
+ * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef MFD_T7L66XB_H
+#define MFD_T7L66XB_H
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+
+struct t7l66xb_platform_data {
+ int (*enable_clk32k)(struct platform_device *dev);
+ void (*disable_clk32k)(struct platform_device *dev);
+ int (*enable)(struct platform_device *dev);
+ int (*disable)(struct platform_device *dev);
+ int (*suspend)(struct platform_device *dev);
+ int (*resume)(struct platform_device *dev);
+
+ int irq_base; /* The base for subdevice irqs */
+
+ struct tmio_nand_data *nand_data;
+};
+
+
+#define IRQ_T7L66XB_MMC (1)
+#define IRQ_T7L66XB_NAND (3)
+
+#define T7L66XB_NR_IRQS 8
+
+#endif
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
new file mode 100644
index 00000000000..fa06e0610b8
--- /dev/null
+++ b/include/linux/mfd/tc6387xb.h
@@ -0,0 +1,23 @@
+/*
+ * This file contains the definitions for the TC6387XB
+ *
+ * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ */
+#ifndef MFD_TC6387XB_H
+#define MFD_TC6387XB_H
+
+struct tc6387xb_platform_data {
+ int (*enable_clk32k)(struct platform_device *dev);
+ void (*disable_clk32k)(struct platform_device *dev);
+
+ int (*enable)(struct platform_device *dev);
+ int (*disable)(struct platform_device *dev);
+ int (*suspend)(struct platform_device *dev);
+ int (*resume)(struct platform_device *dev);
+};
+
+#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index 7cc824a58f7..fec7b3f7a81 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -14,8 +14,8 @@
* published by the Free Software Foundation.
*/
-#ifndef TC6393XB_H
-#define TC6393XB_H
+#ifndef MFD_TC6393XB_H
+#define MFD_TC6393XB_H
/* Also one should provide the CK3P6MI clock */
struct tc6393xb_platform_data {
@@ -29,7 +29,7 @@ struct tc6393xb_platform_data {
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
- int irq_base; /* a base for cascaded irq */
+ int irq_base; /* base for subdevice irqs */
int gpio_base;
struct tmio_nand_data *nand_data;
@@ -40,9 +40,6 @@ struct tc6393xb_platform_data {
*/
#define IRQ_TC6393_NAND 0
#define IRQ_TC6393_MMC 1
-#define IRQ_TC6393_OHCI 2
-#define IRQ_TC6393_SERIAL 3
-#define IRQ_TC6393_FB 4
#define TC6393XB_NR_IRQS 8
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 9438d8c9ac1..ec612e66391 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -1,6 +1,21 @@
#ifndef MFD_TMIO_H
#define MFD_TMIO_H
+#define tmio_ioread8(addr) readb(addr)
+#define tmio_ioread16(addr) readw(addr)
+#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
+#define tmio_ioread32(addr) \
+ (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16))
+
+#define tmio_iowrite8(val, addr) writeb((val), (addr))
+#define tmio_iowrite16(val, addr) writew((val), (addr))
+#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
+#define tmio_iowrite32(val, addr) \
+ do { \
+ writew((val), (addr)); \
+ writew((val) >> 16, (addr) + 2); \
+ } while (0)
+
/*
* data for the NAND controller
*/
@@ -10,8 +25,4 @@ struct tmio_nand_data {
unsigned int num_partitions;
};
-#define TMIO_NAND_CONFIG "tmio-nand-config"
-#define TMIO_NAND_CONTROL "tmio-nand-control"
-#define TMIO_NAND_IRQ "tmio-nand"
-
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 335288bff1b..fa651609b65 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
-#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST
/*
* get_user_pages_fast provides equivalent functionality to get_user_pages,
* operating on current and current->mm (force=0 and doesn't return any vmas).
@@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
-#else
-/*
- * Should probably be moved to asm-generic, and architectures can include it if
- * they don't implement their own get_user_pages_fast.
- */
-#define get_user_pages_fast(start, nr_pages, write, pages) \
-({ \
- struct mm_struct *mm = current->mm; \
- int ret; \
- \
- down_read(&mm->mmap_sem); \
- ret = get_user_pages(current, mm, start, nr_pages, \
- write, 0, pages, NULL); \
- up_read(&mm->mmap_sem); \
- \
- ret; \
-})
-#endif
-
/*
* A callback you can register to apply pressure to ageable caches.
*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 825be3878f6..c0e14008a3c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -641,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
+void pci_pme_active(struct pci_dev *dev, bool enable);
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
@@ -680,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus);
/* Proper probing supporting hot-pluggable devices */
int __must_check __pci_register_driver(struct pci_driver *, struct module *,
const char *mod_name);
-static inline int __must_check pci_register_driver(struct pci_driver *driver)
-{
- return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME);
-}
+
+/*
+ * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
+ */
+#define pci_register_driver(driver) \
+ __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
void pci_unregister_driver(struct pci_driver *dev);
void pci_remove_behind_bridge(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 35a78415acc..9ec2bcce8e8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2177,8 +2177,6 @@
#define PCI_DEVICE_ID_HERC_WIN 0x5732
#define PCI_DEVICE_ID_HERC_UNI 0x5832
-#define PCI_VENDOR_ID_RDC 0x17f3
-
#define PCI_VENDOR_ID_SITECOM 0x182d
#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5270d449ff9..5850bfb968a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern unsigned long long sched_clock(void);
-#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline void sched_clock_init(void)
-{
-}
-
-static inline u64 sched_clock_cpu(int cpu)
-{
- return sched_clock();
-}
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
}
@@ -1572,28 +1566,11 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
-
-#ifdef CONFIG_NO_HZ
-static inline void sched_clock_tick_stop(int cpu)
-{
-}
-
-static inline void sched_clock_tick_start(int cpu)
-{
-}
-#endif
-
-#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
-extern void sched_clock_init(void);
-extern u64 sched_clock_cpu(int cpu);
+#else
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
-#ifdef CONFIG_NO_HZ
-extern void sched_clock_tick_stop(int cpu);
-extern void sched_clock_tick_start(int cpu);
#endif
-#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index a66304a0995..a1783b229ef 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mutex.h>
+#include <linux/cpumask.h>
+#include <linux/nodemask.h>
struct seq_operations;
struct file;
@@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *);
int seq_dentry(struct seq_file *, struct dentry *, char *);
int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *esc);
+int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits);
+static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask)
+{
+ return seq_bitmap(m, mask->bits, NR_CPUS);
+}
+
+static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
+{
+ return seq_bitmap(m, mask->bits, MAX_NUMNODES);
+}
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_release(struct inode *, struct file *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5bad61a93f6..2f5c16b1aac 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -46,6 +46,7 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
+ unsigned long min_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
diff --git a/include/linux/swab.h b/include/linux/swab.h
new file mode 100644
index 00000000000..270d5c208a8
--- /dev/null
+++ b/include/linux/swab.h
@@ -0,0 +1,309 @@
+#ifndef _LINUX_SWAB_H
+#define _LINUX_SWAB_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+/*
+ * casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define __const_swab16(x) ((__u16)( \
+ (((__u16)(x) & (__u16)0x00ffU) << 8) | \
+ (((__u16)(x) & (__u16)0xff00U) >> 8)))
+
+#define __const_swab32(x) ((__u32)( \
+ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
+ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
+ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
+ (((__u32)(x) & (__u32)0xff000000UL) >> 24)))
+
+#define __const_swab64(x) ((__u64)( \
+ (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
+ (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
+ (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
+ (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
+ (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
+ (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
+ (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
+ (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
+
+#define __const_swahw32(x) ((__u32)( \
+ (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
+ (((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
+
+#define __const_swahb32(x) ((__u32)( \
+ (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
+ (((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
+
+/*
+ * Implement the following as inlines, but define the interface using
+ * macros to allow constant folding when possible:
+ * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
+ */
+
+static inline __attribute_const__ __u16 ___swab16(__u16 val)
+{
+#ifdef __arch_swab16
+ return __arch_swab16(val);
+#elif defined(__arch_swab16p)
+ return __arch_swab16p(&val);
+#else
+ return __const_swab16(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swab32(__u32 val)
+{
+#ifdef __arch_swab32
+ return __arch_swab32(val);
+#elif defined(__arch_swab32p)
+ return __arch_swab32p(&val);
+#else
+ return __const_swab32(val);
+#endif
+}
+
+static inline __attribute_const__ __u64 ___swab64(__u64 val)
+{
+#ifdef __arch_swab64
+ return __arch_swab64(val);
+#elif defined(__arch_swab64p)
+ return __arch_swab64p(&val);
+#elif defined(__SWAB_64_THRU_32__)
+ __u32 h = val >> 32;
+ __u32 l = val & ((1ULL << 32) - 1);
+ return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h)));
+#else
+ return __const_swab64(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swahw32(__u32 val)
+{
+#ifdef __arch_swahw32
+ return __arch_swahw32(val);
+#elif defined(__arch_swahw32p)
+ return __arch_swahw32p(&val);
+#else
+ return __const_swahw32(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swahb32(__u32 val)
+{
+#ifdef __arch_swahb32
+ return __arch_swahb32(val);
+#elif defined(__arch_swahb32p)
+ return __arch_swahb32p(&val);
+#else
+ return __const_swahb32(val);
+#endif
+}
+
+/**
+ * __swab16 - return a byteswapped 16-bit value
+ * @x: value to byteswap
+ */
+#define __swab16(x) \
+ (__builtin_constant_p((__u16)(x)) ? \
+ __const_swab16((x)) : \
+ ___swab16((x)))
+
+/**
+ * __swab32 - return a byteswapped 32-bit value
+ * @x: value to byteswap
+ */
+#define __swab32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ __const_swab32((x)) : \
+ ___swab32((x)))
+
+/**
+ * __swab64 - return a byteswapped 64-bit value
+ * @x: value to byteswap
+ */
+#define __swab64(x) \
+ (__builtin_constant_p((__u64)(x)) ? \
+ __const_swab64((x)) : \
+ ___swab64((x)))
+
+/**
+ * __swahw32 - return a word-swapped 32-bit value
+ * @x: value to wordswap
+ *
+ * __swahw32(0x12340000) is 0x00001234
+ */
+#define __swahw32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ __const_swahw32((x)) : \
+ ___swahw32((x)))
+
+/**
+ * __swahb32 - return a high and low byte-swapped 32-bit value
+ * @x: value to byteswap
+ *
+ * __swahb32(0x12345678) is 0x34127856
+ */
+#define __swahb32(x) \
+ (__builtin_constant_p((__u32)(x)) ? \
+ __const_swahb32((x)) : \
+ ___swahb32((x)))
+
+/**
+ * __swab16p - return a byteswapped 16-bit value from a pointer
+ * @p: pointer to a naturally-aligned 16-bit value
+ */
+static inline __u16 __swab16p(const __u16 *p)
+{
+#ifdef __arch_swab16p
+ return __arch_swab16p(p);
+#else
+ return __swab16(*p);
+#endif
+}
+
+/**
+ * __swab32p - return a byteswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ */
+static inline __u32 __swab32p(const __u32 *p)
+{
+#ifdef __arch_swab32p
+ return __arch_swab32p(p);
+#else
+ return __swab32(*p);
+#endif
+}
+
+/**
+ * __swab64p - return a byteswapped 64-bit value from a pointer
+ * @p: pointer to a naturally-aligned 64-bit value
+ */
+static inline __u64 __swab64p(const __u64 *p)
+{
+#ifdef __arch_swab64p
+ return __arch_swab64p(p);
+#else
+ return __swab64(*p);
+#endif
+}
+
+/**
+ * __swahw32p - return a wordswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahw32() for details of wordswapping.
+ */
+static inline __u32 __swahw32p(const __u32 *p)
+{
+#ifdef __arch_swahw32p
+ return __arch_swahw32p(p);
+#else
+ return __swahw32(*p);
+#endif
+}
+
+/**
+ * __swahb32p - return a high and low byteswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahb32() for details of high/low byteswapping.
+ */
+static inline __u32 __swahb32p(const __u32 *p)
+{
+#ifdef __arch_swahb32p
+ return __arch_swahb32p(p);
+#else
+ return __swahb32(*p);
+#endif
+}
+
+/**
+ * __swab16s - byteswap a 16-bit value in-place
+ * @p: pointer to a naturally-aligned 16-bit value
+ */
+static inline void __swab16s(__u16 *p)
+{
+#ifdef __arch_swab16s
+ __arch_swab16s(p);
+#else
+ *p = __swab16p(p);
+#endif
+}
+/**
+ * __swab32s - byteswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ */
+static inline void __swab32s(__u32 *p)
+{
+#ifdef __arch_swab32s
+ __arch_swab32s(p);
+#else
+ *p = __swab32p(p);
+#endif
+}
+
+/**
+ * __swab64s - byteswap a 64-bit value in-place
+ * @p: pointer to a naturally-aligned 64-bit value
+ */
+static inline void __swab64s(__u64 *p)
+{
+#ifdef __arch_swab64s
+ __arch_swab64s(p);
+#else
+ *p = __swab64p(p);
+#endif
+}
+
+/**
+ * __swahw32s - wordswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahw32() for details of wordswapping
+ */
+static inline void __swahw32s(__u32 *p)
+{
+#ifdef __arch_swahw32s
+ __arch_swahw32s(p);
+#else
+ *p = __swahw32p(p);
+#endif
+}
+
+/**
+ * __swahb32s - high and low byteswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahb32() for details of high and low byte swapping
+ */
+static inline void __swahb32s(__u32 *p)
+{
+#ifdef __arch_swahb32s
+ __arch_swahb32s(p);
+#else
+ *p = __swahb32p(p);
+#endif
+}
+
+#ifdef __KERNEL__
+# define swab16 __swab16
+# define swab32 __swab32
+# define swab64 __swab64
+# define swahw32 __swahw32
+# define swahb32 __swahb32
+# define swab16p __swab16p
+# define swab32p __swab32p
+# define swab64p __swab64p
+# define swahw32p __swahw32p
+# define swahb32p __swahb32p
+# define swab16s __swab16s
+# define swab32s __swab32s
+# define swab64s __swab64s
+# define swahw32s __swahw32s
+# define swahb32s __swahb32s
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SWAB_H */