aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bug.h10
-rw-r--r--include/asm-generic/sections.h6
-rw-r--r--include/asm-mips/cevt-r4k.h46
-rw-r--r--include/asm-mips/irqflags.h26
-rw-r--r--include/asm-mips/mipsregs.h6
-rw-r--r--include/asm-mips/pgtable-32.h2
-rw-r--r--include/asm-mips/smtc.h8
-rw-r--r--include/asm-mips/sn/mapped_kernel.h8
-rw-r--r--include/asm-mips/stackframe.h72
-rw-r--r--include/asm-parisc/sections.h5
-rw-r--r--include/asm-x86/acpi.h2
-rw-r--r--include/asm-x86/cpufeature.h1
-rw-r--r--include/asm-x86/idle.h2
-rw-r--r--include/asm-x86/kgdb.h24
-rw-r--r--include/asm-x86/uaccess_64.h1
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cnt32_to_63.h80
-rw-r--r--include/linux/completion.h41
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/hrtimer.h18
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/memstick.h97
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/pci.h8
-rw-r--r--include/linux/pnp.h7
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/ramfs.h1
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/smb.h2
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/net/9p/9p.h1
-rw-r--r--include/net/9p/transport.h9
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/sctp/sm.h3
-rw-r--r--include/scsi/scsi.h14
40 files changed, 433 insertions, 115 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index a3f738cffdb..edc6ba82e09 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -97,6 +97,16 @@ extern void warn_slowpath(const char *file, const int line,
unlikely(__ret_warn_once); \
})
+#define WARN_ONCE(condition, format...) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN(!__warned, format)) \
+ __warned = 1; \
+ unlikely(__ret_warn_once); \
+})
+
#define WARN_ON_RATELIMIT(condition, state) \
WARN_ON((condition) && __ratelimit(state))
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 8feeae1f236..79a7ff925bf 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,4 +14,10 @@ extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __initdata_begin[], __initdata_end[];
extern char __start_rodata[], __end_rodata[];
+/* function descriptor handling (if any). Override
+ * in asm/sections.h */
+#ifndef dereference_function_descriptor
+#define dereference_function_descriptor(p) (p)
+#endif
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h
new file mode 100644
index 00000000000..fa4328f9124
--- /dev/null
+++ b/include/asm-mips/cevt-r4k.h
@@ -0,0 +1,46 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Kevin D. Kissell
+ */
+
+/*
+ * Definitions used for common event timer implementation
+ * for MIPS 4K-type processors and their MIPS MT variants.
+ * Avoids unsightly extern declarations in C files.
+ */
+#ifndef __ASM_CEVT_R4K_H
+#define __ASM_CEVT_R4K_H
+
+DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+
+void mips_event_handler(struct clock_event_device *dev);
+int c0_compare_int_usable(void);
+void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
+irqreturn_t c0_compare_interrupt(int, void *);
+
+extern struct irqaction c0_compare_irqaction;
+extern int cp0_timer_irq_installed;
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+#endif /* __ASM_CEVT_R4K_H */
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h
index 881e8866501..701ec0ba8fa 100644
--- a/include/asm-mips/irqflags.h
+++ b/include/asm-mips/irqflags.h
@@ -38,8 +38,17 @@ __asm__(
" .set pop \n"
" .endm");
+extern void smtc_ipi_replay(void);
+
static inline void raw_local_irq_enable(void)
{
+#ifdef CONFIG_MIPS_MT_SMTC
+ /*
+ * SMTC kernel needs to do a software replay of queued
+ * IPIs, at the cost of call overhead on each local_irq_enable()
+ */
+ smtc_ipi_replay();
+#endif
__asm__ __volatile__(
"raw_local_irq_enable"
: /* no outputs */
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void)
: "memory");
}
+
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
@@ -185,15 +195,14 @@ __asm__(
" .set pop \n"
" .endm \n");
-extern void smtc_ipi_replay(void);
static inline void raw_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
-#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
+#ifdef CONFIG_MIPS_MT_SMTC
/*
- * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
+ * SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags)
: "memory");
}
+static inline void __raw_local_irq_restore(unsigned long flags)
+{
+ unsigned long __tmp1;
+
+ __asm__ __volatile__(
+ "raw_local_irq_restore\t%0"
+ : "=r" (__tmp1)
+ : "0" (flags)
+ : "memory");
+}
+
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index a46f8e258e6..979866000da 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \
{ \
unsigned int res; \
unsigned int omt; \
- unsigned int flags; \
+ unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \
{ \
unsigned int res; \
unsigned int omt; \
- unsigned int flags; \
+ unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \
{ \
unsigned int res; \
unsigned int omt; \
- unsigned int flags; \
+ unsigned long flags; \
\
local_irq_save(flags); \
\
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4396e9ffd41..55813d6150c 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define PMD_ORDER 1
#define PTE_ORDER 0
-#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index 3639b28f80d..ea60bf08dcb 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -6,6 +6,7 @@
*/
#include <asm/mips_mt.h>
+#include <asm/smtc_ipi.h>
/*
* System-wide SMTC status information
@@ -38,14 +39,15 @@ struct mm_struct;
struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
-
+void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid);
-extern int mipsmt_build_cpu_map(int startslot);
-extern void mipsmt_prepare_cpus(void);
+extern int smtc_build_cpu_map(int startslot);
+extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
+
/*
* Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to
diff --git a/include/asm-mips/sn/mapped_kernel.h b/include/asm-mips/sn/mapped_kernel.h
index c3dd5d0d525..721496a0bb9 100644
--- a/include/asm-mips/sn/mapped_kernel.h
+++ b/include/asm-mips/sn/mapped_kernel.h
@@ -5,6 +5,8 @@
#ifndef __ASM_SN_MAPPED_KERNEL_H
#define __ASM_SN_MAPPED_KERNEL_H
+#include <linux/mmzone.h>
+
/*
* Note on how mapped kernels work: the text and data section is
* compiled at cksseg segment (LOADADDR = 0xc001c000), and the
@@ -29,10 +31,8 @@
#define MAPPED_ADDR_RO_TO_PHYS(x) (x - REP_BASE)
#define MAPPED_ADDR_RW_TO_PHYS(x) (x - REP_BASE - 16777216)
-#define MAPPED_KERN_RO_PHYSBASE(n) \
- (PLAT_NODE_DATA(n)->kern_vars.kv_ro_baseaddr)
-#define MAPPED_KERN_RW_PHYSBASE(n) \
- (PLAT_NODE_DATA(n)->kern_vars.kv_rw_baseaddr)
+#define MAPPED_KERN_RO_PHYSBASE(n) (hub_data(n)->kern_vars.kv_ro_baseaddr)
+#define MAPPED_KERN_RW_PHYSBASE(n) (hub_data(n)->kern_vars.kv_rw_baseaddr)
#define MAPPED_KERN_RO_TO_PHYS(x) \
((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 051e1af0bb9..4c37c4e5f72 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -297,14 +297,31 @@
#ifdef CONFIG_MIPS_MT_SMTC
.set mips32r2
/*
- * This may not really be necessary if ints are already
- * inhibited here.
+ * We need to make sure the read-modify-write
+ * of Status below isn't perturbed by an interrupt
+ * or cross-TC access, so we need to do at least a DMT,
+ * protected by an interrupt-inhibit. But setting IXMT
+ * also creates a few-cycle window where an IPI could
+ * be queued and not be detected before potentially
+ * returning to a WAIT or user-mode loop. It must be
+ * replayed.
+ *
+ * We're in the middle of a context switch, and
+ * we can't dispatch it directly without trashing
+ * some registers, so we'll try to detect this unlikely
+ * case and program a software interrupt in the VPE,
+ * as would be done for a cross-VPE IPI. To accomodate
+ * the handling of that case, we're doing a DVPE instead
+ * of just a DMT here to protect against other threads.
+ * This is a lot of cruft to cover a tiny window.
+ * If you can find a better design, implement it!
+ *
*/
mfc0 v0, CP0_TCSTATUS
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
- DMT 5 # dmt a1
+ DVPE 5 # dvpe a1
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS
@@ -325,17 +342,50 @@
*/
LONG_L v1, PT_TCSTATUS(sp)
_ehb
- mfc0 v0, CP0_TCSTATUS
+ mfc0 a0, CP0_TCSTATUS
andi v1, TCSTATUS_IXMT
- /* We know that TCStatua.IXMT should be set from above */
- xori v0, v0, TCSTATUS_IXMT
- or v0, v0, v1
- mtc0 v0, CP0_TCSTATUS
- _ehb
- andi a1, a1, VPECONTROL_TE
+ bnez v1, 0f
+
+/*
+ * We'd like to detect any IPIs queued in the tiny window
+ * above and request an software interrupt to service them
+ * when we ERET.
+ *
+ * Computing the offset into the IPIQ array of the executing
+ * TC's IPI queue in-line would be tedious. We use part of
+ * the TCContext register to hold 16 bits of offset that we
+ * can add in-line to find the queue head.
+ */
+ mfc0 v0, CP0_TCCONTEXT
+ la a2, IPIQ
+ srl v0, v0, 16
+ addu a2, a2, v0
+ LONG_L v0, 0(a2)
+ beqz v0, 0f
+/*
+ * If we have a queue, provoke dispatch within the VPE by setting C_SW1
+ */
+ mfc0 v0, CP0_CAUSE
+ ori v0, v0, C_SW1
+ mtc0 v0, CP0_CAUSE
+0:
+ /*
+ * This test should really never branch but
+ * let's be prudent here. Having atomized
+ * the shared register modifications, we can
+ * now EVPE, and must do so before interrupts
+ * are potentially re-enabled.
+ */
+ andi a1, a1, MVPCONTROL_EVP
beqz a1, 1f
- emt
+ evpe
1:
+ /* We know that TCStatua.IXMT should be set from above */
+ xori a0, a0, TCSTATUS_IXMT
+ or a0, a0, v1
+ mtc0 a0, CP0_TCSTATUS
+ _ehb
+
.set mips0
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp)
diff --git a/include/asm-parisc/sections.h b/include/asm-parisc/sections.h
index fdd43ec42ec..9d13c3507ad 100644
--- a/include/asm-parisc/sections.h
+++ b/include/asm-parisc/sections.h
@@ -4,4 +4,9 @@
/* nothing to see, move along */
#include <asm-generic/sections.h>
+#ifdef CONFIG_64BIT
+#undef dereference_function_descriptor
+void *dereference_function_descriptor(void *);
+#endif
+
#endif
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 635d764dc13..35d1743b57a 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
+ else if (boot_cpu_has(X86_FEATURE_AMDC1E))
+ return 1;
else
return max_cstate;
}
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 9489283a4bc..cfcfb0a806b 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -81,6 +81,7 @@
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
index d240e5b30a4..cbb64912361 100644
--- a/include/asm-x86/idle.h
+++ b/include/asm-x86/idle.h
@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n);
void enter_idle(void);
void exit_idle(void);
+void c1e_remove_cpu(int cpu);
+
#endif
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
index 484c47554f3..94d63db1036 100644
--- a/include/asm-x86/kgdb.h
+++ b/include/asm-x86/kgdb.h
@@ -39,12 +39,13 @@ enum regnames {
GDB_FS, /* 14 */
GDB_GS, /* 15 */
};
+#define NUMREGBYTES ((GDB_GS+1)*4)
#else /* ! CONFIG_X86_32 */
-enum regnames {
+enum regnames64 {
GDB_AX, /* 0 */
- GDB_DX, /* 1 */
+ GDB_BX, /* 1 */
GDB_CX, /* 2 */
- GDB_BX, /* 3 */
+ GDB_DX, /* 3 */
GDB_SI, /* 4 */
GDB_DI, /* 5 */
GDB_BP, /* 6 */
@@ -58,18 +59,15 @@ enum regnames {
GDB_R14, /* 14 */
GDB_R15, /* 15 */
GDB_PC, /* 16 */
- GDB_PS, /* 17 */
};
-#endif /* CONFIG_X86_32 */
-/*
- * Number of bytes of registers:
- */
-#ifdef CONFIG_X86_32
-# define NUMREGBYTES 64
-#else
-# define NUMREGBYTES ((GDB_PS+1)*8)
-#endif
+enum regnames32 {
+ GDB_PS = 34,
+ GDB_CS,
+ GDB_SS,
+};
+#define NUMREGBYTES ((GDB_SS+1)*4)
+#endif /* CONFIG_X86_32 */
static inline void arch_kgdb_breakpoint(void)
{
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 515d4dce96b..45806d60bcb 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/prefetch.h>
+#include <linux/lockdep.h>
#include <asm/page.h>
/*
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 1ce19c1ef0e..8a12d718c16 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -745,7 +745,7 @@ static inline int ata_ok(u8 status)
static inline int lba_28_ok(u64 block, u32 n_block)
{
/* check the ending block number */
- return ((block + n_block - 1) < ((u64)1 << 28)) && (n_block <= 256);
+ return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256);
}
static inline int lba_48_ok(u64 block, u32 n_block)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 44710d7e7bf..53ea933cf60 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -843,8 +843,6 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
*/
extern int blk_verify_command(struct blk_cmd_filter *filter,
unsigned char *cmd, int has_write_perm);
-extern int blk_register_filter(struct gendisk *disk);
-extern void blk_unregister_filter(struct gendisk *disk);
extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define MAX_PHYS_SEGMENTS 128
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
new file mode 100644
index 00000000000..8c0f9505b48
--- /dev/null
+++ b/include/linux/cnt32_to_63.h
@@ -0,0 +1,80 @@
+/*
+ * Extend a 32-bit counter to 63 bits
+ *
+ * Author: Nicolas Pitre
+ * Created: December 3, 2006
+ * Copyright: MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_CNT32_TO_63_H__
+#define __LINUX_CNT32_TO_63_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* this is used only to give gcc a clue about good code generation */
+union cnt32_to_63 {
+ struct {
+#if defined(__LITTLE_ENDIAN)
+ u32 lo, hi;
+#elif defined(__BIG_ENDIAN)
+ u32 hi, lo;
+#endif
+ };
+ u64 val;
+};
+
+
+/**
+ * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter
+ * @cnt_lo: The low part of the counter
+ *
+ * Many hardware clock counters are only 32 bits wide and therefore have
+ * a relatively short period making wrap-arounds rather frequent. This
+ * is a problem when implementing sched_clock() for example, where a 64-bit
+ * non-wrapping monotonic value is expected to be returned.
+ *
+ * To overcome that limitation, let's extend a 32-bit counter to 63 bits
+ * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
+ * by the hardware while bits 32 to 62 are stored in memory. The top bit in
+ * memory is used to synchronize with the hardware clock half-period. When
+ * the top bit of both counters (hardware and in memory) differ then the
+ * memory is updated with a new value, incrementing it when the hardware
+ * counter wraps around.
+ *
+ * Because a word store in memory is atomic then the incremented value will
+ * always be in synch with the top bit indicating to any potential concurrent
+ * reader if the value in memory is up to date or not with regards to the
+ * needed increment. And any race in updating the value in memory is harmless
+ * as the same value would simply be stored more than once.
+ *
+ * The only restriction for the algorithm to work properly is that this
+ * code must be executed at least once per each half period of the 32-bit
+ * counter to properly update the state bit in memory. This is usually not a
+ * problem in practice, but if it is then a kernel timer could be scheduled
+ * to manage for this code to be executed often enough.
+ *
+ * Note that the top bit (bit 63) in the returned value should be considered
+ * as garbage. It is not cleared here because callers are likely to use a
+ * multiplier on the returned value which can get rid of the top bit
+ * implicitly by making the multiplier even, therefore saving on a runtime
+ * clear-bit instruction. Otherwise caller must remember to clear the top
+ * bit explicitly.
+ */
+#define cnt32_to_63(cnt_lo) \
+({ \
+ static volatile u32 __m_cnt_hi; \
+ union cnt32_to_63 __x; \
+ __x.hi = __m_cnt_hi; \
+ __x.lo = (cnt_lo); \
+ if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
+ __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
+ __x.val; \
+})
+
+#endif
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 02ef8835999..4a6b604ef7e 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,6 +10,18 @@
#include <linux/wait.h>
+/**
+ * struct completion - structure used to maintain state for a "completion"
+ *
+ * This is the opaque structure used to maintain the state for a "completion".
+ * Completions currently use a FIFO to queue threads that have to wait for
+ * the "completion" event.
+ *
+ * See also: complete(), wait_for_completion() (and friends _timeout,
+ * _interruptible, _interruptible_timeout, and _killable), init_completion(),
+ * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
+ * INIT_COMPLETION().
+ */
struct completion {
unsigned int done;
wait_queue_head_t wait;
@@ -21,6 +33,14 @@ struct completion {
#define COMPLETION_INITIALIZER_ONSTACK(work) \
({ init_completion(&work); work; })
+/**
+ * DECLARE_COMPLETION: - declare and initialize a completion structure
+ * @work: identifier for the completion structure
+ *
+ * This macro declares and initializes a completion structure. Generally used
+ * for static declarations. You should use the _ONSTACK variant for automatic
+ * variables.
+ */
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
@@ -29,6 +49,13 @@ struct completion {
* completions - so we use the _ONSTACK() variant for those that
* are on the kernel stack:
*/
+/**
+ * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
+ * @work: identifier for the completion structure
+ *
+ * This macro declares and initializes a completion structure on the kernel
+ * stack.
+ */
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
@@ -36,6 +63,13 @@ struct completion {
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
#endif
+/**
+ * init_completion: - Initialize a dynamically allocated completion
+ * @x: completion structure that is to be initialized
+ *
+ * This inline function will initialize a dynamically created completion
+ * structure.
+ */
static inline void init_completion(struct completion *x)
{
x->done = 0;
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
+/**
+ * INIT_COMPLETION: - reinitialize a completion structure
+ * @x: completion structure to be reinitialized
+ *
+ * This macro should be used to reinitialize a completion structure so it can
+ * be reused. This is especially important after complete_all() is used.
+ */
#define INIT_COMPLETION(x) ((x).done = 0)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e8f450c499b..2691926fb50 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void)
static inline void rebuild_sched_domains(void)
{
- partition_sched_domains(0, NULL, NULL);
+ partition_sched_domains(1, NULL, NULL);
}
#endif /* !CONFIG_CPUSETS */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 6d93dce61cb..2f245fe63bd 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -47,14 +47,22 @@ enum hrtimer_restart {
* HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
* HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
* does not restart the timer
- * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context
- * Special mode for tick emultation
+ * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
+ * Special mode for tick emulation and
+ * scheduler timer. Such timers are per
+ * cpu and not allowed to be migrated on
+ * cpu unplug.
+ * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
+ * with timer->base lock unlocked
+ * used for timers which call wakeup to
+ * avoid lock order problems with rq->lock
*/
enum hrtimer_cb_mode {
HRTIMER_CB_SOFTIRQ,
HRTIMER_CB_IRQSAFE,
HRTIMER_CB_IRQSAFE_NO_RESTART,
- HRTIMER_CB_IRQSAFE_NO_SOFTIRQ,
+ HRTIMER_CB_IRQSAFE_PERCPU,
+ HRTIMER_CB_IRQSAFE_UNLOCKED,
};
/*
@@ -67,9 +75,10 @@ enum hrtimer_cb_mode {
* 0x02 callback function running
* 0x04 callback pending (high resolution mode)
*
- * Special case:
+ * Special cases:
* 0x03 callback function running and enqueued
* (was requeued on another CPU)
+ * 0x09 timer was migrated on CPU hotunplug
* The "callback function running and enqueued" status is only possible on
* SMP. It happens for example when a posix timer expired and the callback
* queued a signal. Between dropping the lock which protects the posix timer
@@ -87,6 +96,7 @@ enum hrtimer_cb_mode {
#define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02
#define HRTIMER_STATE_PENDING 0x04
+#define HRTIMER_STATE_MIGRATE 0x08
/**
* struct hrtimer - the basic hrtimer structure
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 1524829f73f..6514db8fd2e 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -366,7 +366,9 @@ enum {
/* Currently on a filemark */
IDE_AFLAG_FILEMARK = (1 << 25),
/* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = (1 << 26)
+ IDE_AFLAG_MEDIUM_PRESENT = (1 << 26),
+
+ IDE_AFLAG_NO_AUTOCLOSE = (1 << 27),
};
struct ide_drive_s {
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 8d3b7a9afd1..350033e8f4e 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -159,9 +159,9 @@ extern struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name);
-#define devm_release_region(start,n) \
+#define devm_release_region(dev, start, n) \
__devm_release_region(dev, &ioport_resource, (start), (n))
-#define devm_release_mem_region(start,n) \
+#define devm_release_mem_region(dev, start, n) \
__devm_release_region(dev, &iomem_resource, (start), (n))
extern void __devm_release_region(struct device *dev, struct resource *parent,
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index a9f998a3f48..d0c37e68223 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -21,30 +21,30 @@
struct ms_status_register {
unsigned char reserved;
unsigned char interrupt;
-#define MEMSTICK_INT_CMDNAK 0x0001
-#define MEMSTICK_INT_IOREQ 0x0008
-#define MEMSTICK_INT_IOBREQ 0x0010
-#define MEMSTICK_INT_BREQ 0x0020
-#define MEMSTICK_INT_ERR 0x0040
-#define MEMSTICK_INT_CED 0x0080
+#define MEMSTICK_INT_CMDNAK 0x01
+#define MEMSTICK_INT_IOREQ 0x08
+#define MEMSTICK_INT_IOBREQ 0x10
+#define MEMSTICK_INT_BREQ 0x20
+#define MEMSTICK_INT_ERR 0x40
+#define MEMSTICK_INT_CED 0x80
unsigned char status0;
-#define MEMSTICK_STATUS0_WP 0x0001
-#define MEMSTICK_STATUS0_SL 0x0002
-#define MEMSTICK_STATUS0_BF 0x0010
-#define MEMSTICK_STATUS0_BE 0x0020
-#define MEMSTICK_STATUS0_FB0 0x0040
-#define MEMSTICK_STATUS0_MB 0x0080
+#define MEMSTICK_STATUS0_WP 0x01
+#define MEMSTICK_STATUS0_SL 0x02
+#define MEMSTICK_STATUS0_BF 0x10
+#define MEMSTICK_STATUS0_BE 0x20
+#define MEMSTICK_STATUS0_FB0 0x40
+#define MEMSTICK_STATUS0_MB 0x80
unsigned char status1;
-#define MEMSTICK_STATUS1_UCFG 0x0001
-#define MEMSTICK_STATUS1_FGER 0x0002
-#define MEMSTICK_STATUS1_UCEX 0x0004
-#define MEMSTICK_STATUS1_EXER 0x0008
-#define MEMSTICK_STATUS1_UCDT 0x0010
-#define MEMSTICK_STATUS1_DTER 0x0020
-#define MEMSTICK_STATUS1_FBI 0x0040
-#define MEMSTICK_STATUS1_MB 0x0080
+#define MEMSTICK_STATUS1_UCFG 0x01
+#define MEMSTICK_STATUS1_FGER 0x02
+#define MEMSTICK_STATUS1_UCEX 0x04
+#define MEMSTICK_STATUS1_EXER 0x08
+#define MEMSTICK_STATUS1_UCDT 0x10
+#define MEMSTICK_STATUS1_DTER 0x20
+#define MEMSTICK_STATUS1_FB1 0x40
+#define MEMSTICK_STATUS1_MB 0x80
} __attribute__((packed));
struct ms_id_register {
@@ -56,32 +56,32 @@ struct ms_id_register {
struct ms_param_register {
unsigned char system;
-#define MEMSTICK_SYS_ATEN 0xc0
-#define MEMSTICK_SYS_BAMD 0x80
#define MEMSTICK_SYS_PAM 0x08
+#define MEMSTICK_SYS_BAMD 0x80
unsigned char block_address_msb;
unsigned short block_address;
unsigned char cp;
-#define MEMSTICK_CP_BLOCK 0x0000
-#define MEMSTICK_CP_PAGE 0x0020
-#define MEMSTICK_CP_EXTRA 0x0040
-#define MEMSTICK_CP_OVERWRITE 0x0080
+#define MEMSTICK_CP_BLOCK 0x00
+#define MEMSTICK_CP_PAGE 0x20
+#define MEMSTICK_CP_EXTRA 0x40
+#define MEMSTICK_CP_OVERWRITE 0x80
unsigned char page_address;
} __attribute__((packed));
struct ms_extra_data_register {
unsigned char overwrite_flag;
-#define MEMSTICK_OVERWRITE_UPDATA 0x0010
-#define MEMSTICK_OVERWRITE_PAGE 0x0060
-#define MEMSTICK_OVERWRITE_BLOCK 0x0080
+#define MEMSTICK_OVERWRITE_UDST 0x10
+#define MEMSTICK_OVERWRITE_PGST1 0x20
+#define MEMSTICK_OVERWRITE_PGST0 0x40
+#define MEMSTICK_OVERWRITE_BKST 0x80
unsigned char management_flag;
-#define MEMSTICK_MANAGEMENT_SYSTEM 0x0004
-#define MEMSTICK_MANAGEMENT_TRANS_TABLE 0x0008
-#define MEMSTICK_MANAGEMENT_COPY 0x0010
-#define MEMSTICK_MANAGEMENT_ACCESS 0x0020
+#define MEMSTICK_MANAGEMENT_SYSFLG 0x04
+#define MEMSTICK_MANAGEMENT_ATFLG 0x08
+#define MEMSTICK_MANAGEMENT_SCMS1 0x10
+#define MEMSTICK_MANAGEMENT_SCMS0 0x20
unsigned short logical_address;
} __attribute__((packed));
@@ -96,9 +96,9 @@ struct ms_register {
struct mspro_param_register {
unsigned char system;
-#define MEMSTICK_SYS_SERIAL 0x80
#define MEMSTICK_SYS_PAR4 0x00
#define MEMSTICK_SYS_PAR8 0x40
+#define MEMSTICK_SYS_SERIAL 0x80
unsigned short data_count;
unsigned int data_address;
@@ -147,7 +147,7 @@ struct ms_register_addr {
unsigned char w_length;
} __attribute__((packed));
-enum {
+enum memstick_tpc {
MS_TPC_READ_MG_STATUS = 0x01,
MS_TPC_READ_LONG_DATA = 0x02,
MS_TPC_READ_SHORT_DATA = 0x03,
@@ -167,7 +167,7 @@ enum {
MS_TPC_SET_CMD = 0x0e
};
-enum {
+enum memstick_command {
MS_CMD_BLOCK_END = 0x33,
MS_CMD_RESET = 0x3c,
MS_CMD_BLOCK_WRITE = 0x55,
@@ -201,8 +201,6 @@ enum {
/*** Driver structures and functions ***/
-#define MEMSTICK_PART_SHIFT 3
-
enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE };
#define MEMSTICK_POWER_OFF 0
@@ -215,24 +213,27 @@ enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE };
struct memstick_host;
struct memstick_driver;
+struct memstick_device_id {
+ unsigned char match_flags;
#define MEMSTICK_MATCH_ALL 0x01
+ unsigned char type;
#define MEMSTICK_TYPE_LEGACY 0xff
#define MEMSTICK_TYPE_DUO 0x00
#define MEMSTICK_TYPE_PRO 0x01
+ unsigned char category;
#define MEMSTICK_CATEGORY_STORAGE 0xff
#define MEMSTICK_CATEGORY_STORAGE_DUO 0x00
+#define MEMSTICK_CATEGORY_IO 0x01
+#define MEMSTICK_CATEGORY_IO_PRO 0x10
-#define MEMSTICK_CLASS_GENERIC 0xff
-#define MEMSTICK_CLASS_GENERIC_DUO 0x00
-
-
-struct memstick_device_id {
- unsigned char match_flags;
- unsigned char type;
- unsigned char category;
unsigned char class;
+#define MEMSTICK_CLASS_FLASH 0xff
+#define MEMSTICK_CLASS_DUO 0x00
+#define MEMSTICK_CLASS_ROM 0x01
+#define MEMSTICK_CLASS_RO 0x02
+#define MEMSTICK_CLASS_WP 0x03
};
struct memstick_request {
@@ -319,9 +320,9 @@ void memstick_suspend_host(struct memstick_host *host);
void memstick_resume_host(struct memstick_host *host);
void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
- struct scatterlist *sg);
+ const struct scatterlist *sg);
void memstick_init_req(struct memstick_request *mrq, unsigned char tpc,
- void *buf, size_t length);
+ const void *buf, size_t length);
int memstick_next_req(struct memstick_host *host,
struct memstick_request **mrq);
void memstick_new_req(struct memstick_host *host);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 655ea0d1ee1..b2f94446831 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -141,6 +141,10 @@ enum {
MLX4_STAT_RATE_OFFSET = 5
};
+enum {
+ MLX4_MTT_FLAG_PRESENT = 1
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 443bc7cd8c6..428328a05fa 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -751,8 +751,9 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
*
* This function returns the next zone at or below a given zone index that is
* within the allowed nodemask using a cursor as the starting point for the
- * search. The zoneref returned is a cursor that is used as the next starting
- * point for future calls to next_zones_zonelist().
+ * search. The zoneref returned is a cursor that represents the current zone
+ * being examined. It should be advanced by one before calling
+ * next_zones_zonelist again.
*/
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
@@ -768,9 +769,8 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
- * used to iterate the zonelist with next_zones_zonelist. The cursor should
- * not be used by the caller as it does not match the value of the zone
- * returned.
+ * used to iterate the zonelist with next_zones_zonelist by advancing it by
+ * one before calling.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
@@ -795,7 +795,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
- z = next_zones_zonelist(z, highidx, nodemask, &zone)) \
+ z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
/**
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c0e14008a3c..98dc6243a70 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -534,7 +534,7 @@ extern void pci_sort_breadthfirst(void);
#ifdef CONFIG_PCI_LEGACY
struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
unsigned int device,
- const struct pci_dev *from);
+ struct pci_dev *from);
struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
@@ -550,7 +550,7 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
- const struct pci_dev *from);
+ struct pci_dev *from);
struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
@@ -816,7 +816,7 @@ _PCI_NOP_ALL(write,)
static inline struct pci_dev *pci_find_device(unsigned int vendor,
unsigned int device,
- const struct pci_dev *from)
+ struct pci_dev *from)
{
return NULL;
}
@@ -838,7 +838,7 @@ static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
unsigned int device,
unsigned int ss_vendor,
unsigned int ss_device,
- const struct pci_dev *from)
+ struct pci_dev *from)
{
return NULL;
}
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 1ce54b63085..be764e514e3 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -21,7 +21,14 @@ struct pnp_dev;
/*
* Resource Management
*/
+#ifdef CONFIG_PNP
struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
+#else
+static inline struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num)
+{
+ return NULL;
+}
+#endif
static inline int pnp_resource_valid(struct resource *res)
{
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5afc1b23346..cf793bbbd05 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -104,8 +104,8 @@ struct prop_local_single {
* snapshot of the last seen global state
* and a lock protecting this state
*/
- int shift;
unsigned long period;
+ int shift;
spinlock_t lock; /* protect the snapshot state */
};
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index b160fb18e8d..37aaf2b3986 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt);
#ifndef CONFIG_MMU
+extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad1..d8e699b5585 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -451,8 +451,8 @@ struct signal_struct {
* - everyone except group_exit_task is stopped during signal delivery
* of fatal signals, group_exit_task processes the signal.
*/
- struct task_struct *group_exit_task;
int notify_count;
+ struct task_struct *group_exit_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
@@ -897,7 +897,7 @@ struct sched_class {
void (*yield_task) (struct rq *rq);
int (*select_task_rq)(struct task_struct *p, int sync);
- void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -1010,8 +1010,8 @@ struct sched_entity {
struct sched_rt_entity {
struct list_head run_list;
- unsigned int time_slice;
unsigned long timeout;
+ unsigned int time_slice;
int nr_cpus_allowed;
struct sched_rt_entity *back;
diff --git a/include/linux/smb.h b/include/linux/smb.h
index caa43b2370c..82fefddc598 100644
--- a/include/linux/smb.h
+++ b/include/linux/smb.h
@@ -11,7 +11,9 @@
#include <linux/types.h>
#include <linux/magic.h>
+#ifdef __KERNEL__
#include <linux/time.h>
+#endif
enum smb_protocol {
SMB_PROTOCOL_NONE,
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 5da9794b2d7..b106fd8e0d5 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_STACKTRACE_H
#define __LINUX_STACKTRACE_H
+struct task_struct;
+
#ifdef CONFIG_STACKTRACE
struct stack_trace {
unsigned int nr_entries, max_entries;
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index b3d3e27c629..c3626c0ba9d 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -596,4 +596,5 @@ int p9_idpool_check(int id, struct p9_idpool *p);
int p9_error_init(void);
int p9_errstr2errno(char *, int);
int p9_trans_fd_init(void);
+void p9_trans_fd_exit(void);
#endif /* NET_9P_H */
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 0db3a4038dc..3ca737120a9 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,6 +26,8 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
+#include <linux/module.h>
+
/**
* enum p9_trans_status - different states of underlying transports
* @Connected: transport is connected and healthy
@@ -91,9 +93,12 @@ struct p9_trans_module {
int maxsize; /* max message size of transport */
int def; /* this transport should be default */
struct p9_trans * (*create)(const char *, char *, int, unsigned char);
+ struct module *owner;
};
void v9fs_register_trans(struct p9_trans_module *m);
-struct p9_trans_module *v9fs_match_trans(const substring_t *name);
-struct p9_trans_module *v9fs_default_trans(void);
+void v9fs_unregister_trans(struct p9_trans_module *m);
+struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name);
+struct p9_trans_module *v9fs_get_default_trans(void);
+void v9fs_put_trans(struct p9_trans_module *m);
#endif /* NET_9P_TRANSPORT_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index cbf75109468..46a43b721dd 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -325,7 +325,8 @@ int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
+struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type);
+int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_auth(struct hci_conn *conn);
int hci_conn_encrypt(struct hci_conn *conn);
int hci_conn_change_link_key(struct hci_conn *conn);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 95c660c9719..91324908fcc 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -208,6 +208,9 @@ extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
struct inet_timewait_death_row *twdr);
+extern void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
+ struct inet_timewait_death_row *twdr, int family);
+
static inline
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 18024b8cecb..208fe5a3854 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -702,7 +702,7 @@ static inline int nla_len(const struct nlattr *nla)
*/
static inline int nla_ok(const struct nlattr *nla, int remaining)
{
- return remaining >= sizeof(*nla) &&
+ return remaining >= (int) sizeof(*nla) &&
nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 24811732bdb..029a54a0239 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -227,6 +227,9 @@ struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
const struct sctp_chunk *,
const __u8 *,
const size_t );
+struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *,
+ const struct sctp_chunk *,
+ struct sctp_paramhdr *);
struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *,
const struct sctp_transport *,
const void *payload,
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 5c40cc537d4..192f8716aa9 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -309,6 +309,20 @@ struct scsi_lun {
};
/*
+ * The Well Known LUNS (SAM-3) in our int representation of a LUN
+ */
+#define SCSI_W_LUN_BASE 0xc100
+#define SCSI_W_LUN_REPORT_LUNS (SCSI_W_LUN_BASE + 1)
+#define SCSI_W_LUN_ACCESS_CONTROL (SCSI_W_LUN_BASE + 2)
+#define SCSI_W_LUN_TARGET_LOG_PAGE (SCSI_W_LUN_BASE + 3)
+
+static inline int scsi_is_wlun(unsigned int lun)
+{
+ return (lun & 0xff00) == SCSI_W_LUN_BASE;
+}
+
+
+/*
* MESSAGE CODES
*/