aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt37
-rw-r--r--Documentation/networking/xfrm_sysctl.txt4
-rw-r--r--Documentation/vm/slub.txt135
-rw-r--r--arch/i386/kernel/smpboot.c5
-rw-r--r--arch/m68k/Kconfig13
-rw-r--r--arch/m68k/Makefile1
-rw-r--r--arch/m68k/kernel/Makefile3
-rw-r--r--arch/m68k/kernel/module.c31
-rw-r--r--arch/m68k/kernel/module.lds7
-rw-r--r--arch/m68k/kernel/setup.c37
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds5
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds5
-rw-r--r--arch/m68k/mm/init.c119
-rw-r--r--arch/m68k/mm/memory.c73
-rw-r--r--arch/m68k/mm/motorola.c104
-rw-r--r--arch/m68k/sun3/config.c2
-rw-r--r--arch/sparc/lib/atomic32.c4
-rw-r--r--arch/sparc64/kernel/entry.S72
-rw-r--r--crypto/cryptd.c4
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/char/drm/drm_drawable.c41
-rw-r--r--drivers/char/drm/drm_pciids.h7
-rw-r--r--drivers/char/drm/i915_irq.c2
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/pci/quirks.c18
-rw-r--r--drivers/pcmcia/at91_cf.c13
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/video/neofb.c30
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--include/asm-m68k/mmzone.h9
-rw-r--r--include/asm-m68k/module.h34
-rw-r--r--include/asm-m68k/motorola_pgtable.h10
-rw-r--r--include/asm-m68k/page.h77
-rw-r--r--include/asm-m68k/pgalloc.h3
-rw-r--r--include/asm-m68k/pgtable.h17
-rw-r--r--include/asm-m68k/sun3_pgtable.h4
-rw-r--r--include/asm-m68k/virtconvert.h49
-rw-r--r--include/asm-sparc64/hypervisor.h168
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/xfrm.h1
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slub.c7
-rw-r--r--net/bridge/br_fdb.c14
-rw-r--r--net/bridge/br_stp.c3
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/core/utils.c6
-rw-r--r--net/ipv4/fib_frontend.c11
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_probe.c5
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/xfrm4_input.c6
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c2
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/ip6_fib.c9
-rw-r--r--net/ipv6/xfrm6_input.c6
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c1
-rw-r--r--net/mac80211/ieee80211.c6
-rw-r--r--net/mac80211/ieee80211_sta.c4
-rw-r--r--net/packet/af_packet.c56
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c15
66 files changed, 930 insertions, 438 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index aae2282600c..ce91560229f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1132,9 +1132,9 @@ and is between 256 and 4096 characters. It is defined in the file
when set.
Format: <int>
- noaliencache [MM, NUMA] Disables the allcoation of alien caches in
- the slab allocator. Saves per-node memory, but will
- impact performance on real NUMA hardware.
+ noaliencache [MM, NUMA, SLAB] Disables the allocation of alien
+ caches in the slab allocator. Saves per-node memory,
+ but will impact performance.
noalign [KNL,ARM]
@@ -1613,6 +1613,37 @@ and is between 256 and 4096 characters. It is defined in the file
slram= [HW,MTD]
+ slub_debug [MM, SLUB]
+ Enabling slub_debug allows one to determine the culprit
+ if slab objects become corrupted. Enabling slub_debug
+ creates guard zones around objects and poisons objects
+ when not in use. Also tracks the last alloc / free.
+ For more information see Documentation/vm/slub.txt.
+
+ slub_max_order= [MM, SLUB]
+ Determines the maximum allowed order for slabs. Setting
+ this too high may cause fragmentation.
+ For more information see Documentation/vm/slub.txt.
+
+ slub_min_objects= [MM, SLUB]
+ The minimum objects per slab. SLUB will increase the
+ slab order up to slub_max_order to generate a
+ sufficiently big slab to satisfy the number of objects.
+ The higher the number of objects the smaller the overhead
+ of tracking slabs.
+ For more information see Documentation/vm/slub.txt.
+
+ slub_min_order= [MM, SLUB]
+ Determines the mininum page order for slabs. Must be
+ lower than slub_max_order
+ For more information see Documentation/vm/slub.txt.
+
+ slub_nomerge [MM, SLUB]
+ Disable merging of slabs of similar size. May be
+ necessary if there is some reason to distinguish
+ allocs to different slabs.
+ For more information see Documentation/vm/slub.txt.
+
smart2= [HW]
Format: <io1>[,<io2>[,...,<io8>]]
diff --git a/Documentation/networking/xfrm_sysctl.txt b/Documentation/networking/xfrm_sysctl.txt
new file mode 100644
index 00000000000..5bbd16792fe
--- /dev/null
+++ b/Documentation/networking/xfrm_sysctl.txt
@@ -0,0 +1,4 @@
+/proc/sys/net/core/xfrm_* Variables:
+
+xfrm_acq_expires - INTEGER
+ default 30 - hard timeout in seconds for acquire requests
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 727c8d81aea..1523320abd8 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -1,13 +1,9 @@
Short users guide for SLUB
--------------------------
-First of all slub should transparently replace SLAB. If you enable
-SLUB then everything should work the same (Note the word "should".
-There is likely not much value in that word at this point).
-
The basic philosophy of SLUB is very different from SLAB. SLAB
requires rebuilding the kernel to activate debug options for all
-SLABS. SLUB always includes full debugging but its off by default.
+slab caches. SLUB always includes full debugging but it is off by default.
SLUB can enable debugging only for selected slabs in order to avoid
an impact on overall system performance which may make a bug more
difficult to find.
@@ -76,13 +72,28 @@ of objects.
Careful with tracing: It may spew out lots of information and never stop if
used on the wrong slab.
-SLAB Merging
+Slab merging
------------
-If no debugging is specified then SLUB may merge similar slabs together
+If no debug options are specified then SLUB may merge similar slabs together
in order to reduce overhead and increase cache hotness of objects.
slabinfo -a displays which slabs were merged together.
+Slab validation
+---------------
+
+SLUB can validate all object if the kernel was booted with slub_debug. In
+order to do so you must have the slabinfo tool. Then you can do
+
+slabinfo -v
+
+which will test all objects. Output will be generated to the syslog.
+
+This also works in a more limited way if boot was without slab debug.
+In that case slabinfo -v simply tests all reachable objects. Usually
+these are in the cpu slabs and the partial slabs. Full slabs are not
+tracked by SLUB in a non debug situation.
+
Getting more performance
------------------------
@@ -91,9 +102,9 @@ list_lock once in a while to deal with partial slabs. That overhead is
governed by the order of the allocation for each slab. The allocations
can be influenced by kernel parameters:
-slub_min_objects=x (default 8)
+slub_min_objects=x (default 4)
slub_min_order=x (default 0)
-slub_max_order=x (default 4)
+slub_max_order=x (default 1)
slub_min_objects allows to specify how many objects must at least fit
into one slab in order for the allocation order to be acceptable.
@@ -109,5 +120,107 @@ longer be checked. This is useful to avoid SLUB trying to generate
super large order pages to fit slub_min_objects of a slab cache with
large object sizes into one high order page.
-
-Christoph Lameter, <clameter@sgi.com>, April 10, 2007
+SLUB Debug output
+-----------------
+
+Here is a sample of slub debug output:
+
+*** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58
+ Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
+ Redzone 0xc90f6d28: 00 cc cc cc .
+FreePointer 0xc90f6d2c -> 0xc90f6d58
+Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554
+Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+ [<c010523d>] dump_trace+0x63/0x1eb
+ [<c01053df>] show_trace_log_lvl+0x1a/0x2f
+ [<c010601d>] show_trace+0x12/0x14
+ [<c0106035>] dump_stack+0x16/0x18
+ [<c017e0fa>] object_err+0x143/0x14b
+ [<c017e2cc>] check_object+0x66/0x234
+ [<c017eb43>] __slab_free+0x239/0x384
+ [<c017f446>] kfree+0xa6/0xc6
+ [<c02e2335>] get_modalias+0xb9/0xf5
+ [<c02e23b7>] dmi_dev_uevent+0x27/0x3c
+ [<c027866a>] dev_uevent+0x1ad/0x1da
+ [<c0205024>] kobject_uevent_env+0x20a/0x45b
+ [<c020527f>] kobject_uevent+0xa/0xf
+ [<c02779f1>] store_uevent+0x4f/0x58
+ [<c027758e>] dev_attr_store+0x29/0x2f
+ [<c01bec4f>] sysfs_write_file+0x16e/0x19c
+ [<c0183ba7>] vfs_write+0xd1/0x15a
+ [<c01841d7>] sys_write+0x3d/0x72
+ [<c0104112>] sysenter_past_esp+0x5f/0x99
+ [<b7f7b410>] 0xb7f7b410
+ =======================
+@@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b
+
+
+
+If SLUB encounters a corrupted object then it will perform the following
+actions:
+
+1. Isolation and report of the issue
+
+This will be a message in the system log starting with
+
+*** SLUB <slab cache affected>: <What went wrong>@<object address>
+offset=<offset of object into slab> flags=<slabflags>
+inuse=<objects in use in this slab> freelist=<first free object in slab>
+
+2. Report on how the problem was dealt with in order to ensure the continued
+operation of the system.
+
+These are messages in the system log beginning with
+
+@@@ SLUB <slab cache affected>: <corrective action taken>
+
+
+In the above sample SLUB found that the Redzone of an active object has
+been overwritten. Here a string of 8 characters was written into a slab that
+has the length of 8 characters. However, a 8 character string needs a
+terminating 0. That zero has overwritten the first byte of the Redzone field.
+After reporting the details of the issue encountered the @@@ SLUB message
+tell us that SLUB has restored the redzone to its proper value and then
+system operations continue.
+
+Various types of lines can follow the @@@ SLUB line:
+
+Bytes b4 <address> : <bytes>
+ Show a few bytes before the object where the problem was detected.
+ Can be useful if the corruption does not stop with the start of the
+ object.
+
+Object <address> : <bytes>
+ The bytes of the object. If the object is inactive then the bytes
+ typically contain poisoning values. Any non-poison value shows a
+ corruption by a write after free.
+
+Redzone <address> : <bytes>
+ The redzone following the object. The redzone is used to detect
+ writes after the object. All bytes should always have the same
+ value. If there is any deviation then it is due to a write after
+ the object boundary.
+
+Freepointer
+ The pointer to the next free object in the slab. May become
+ corrupted if overwriting continues after the red zone.
+
+Last alloc:
+Last free:
+ Shows the address from which the object was allocated/freed last.
+ We note the pid, the time and the CPU that did so. This is usually
+ the most useful information to figure out where things went wrong.
+ Here get_modalias() did an kmalloc(8) instead of a kmalloc(9).
+
+Filler <address> : <bytes>
+ Unused data to fill up the space in order to get the next object
+ properly aligned. In the debug case we make sure that there are
+ at least 4 bytes of filler. This allow for the detection of writes
+ before the object.
+
+Following the filler will be a stackdump. That stackdump describes the
+location where the error was detected. The cause of the corruption is more
+likely to be found by looking at the information about the last alloc / free.
+
+Christoph Lameter, <clameter@sgi.com>, May 23, 2007
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 08f07a74a9d..88baed1e7e8 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -943,10 +943,9 @@ exit:
static void smp_tune_scheduling(void)
{
- unsigned long cachesize; /* kB */
-
if (cpu_khz) {
- cachesize = boot_cpu_data.x86_cache_size;
+ /* cache size in kB */
+ long cachesize = boot_cpu_data.x86_cache_size;
if (cachesize > 0)
max_cache_size = cachesize * 1024;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b8536c7c087..85cdd23b044 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -355,8 +355,9 @@ config RMW_INSNS
adventurous.
config SINGLE_MEMORY_CHUNK
- bool "Use one physical chunk of memory only"
- depends on ADVANCED && !SUN3
+ bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
+ default y if SUN3
+ select NEED_MULTIPLE_NODES
help
Ignore all but the first contiguous chunk of physical memory for VM
purposes. This will save a few bytes kernel size and may speed up
@@ -377,6 +378,14 @@ config 060_WRITETHROUGH
is hardwired on. The 53c710 SCSI driver is known to suffer from
this problem.
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool !SINGLE_MEMORY_CHUNK
+
+config NODES_SHIFT
+ int
+ default "3"
+ depends on !SINGLE_MEMORY_CHUNK
+
source "mm/Kconfig"
endmenu
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index c20831a7e1a..aa383a5ea7a 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -19,6 +19,7 @@ COMPILE_ARCH = $(shell uname -m)
# override top level makefile
AS += -m68020
LDFLAGS := -m m68kelf
+LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
ifneq ($(COMPILE_ARCH),$(ARCH))
# prefix for cross-compiling binaries
CROSS_COMPILE = m68k-linux-gnu-
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 0b68ab8d63d..a806208c7fb 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -9,13 +9,12 @@ else
endif
extra-y += vmlinux.lds
-obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \
+obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o
devres-y = ../../../kernel/irq/devres.o
obj-$(CONFIG_PCI) += bios32.o
-obj-$(CONFIG_MODULES) += module.o
obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
EXTRA_AFLAGS := -traditional
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index 3b1a2ff61dd..774862bc697 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -1,3 +1,9 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
@@ -11,6 +17,8 @@
#define DEBUGP(fmt...)
#endif
+#ifdef CONFIG_MODULES
+
void *module_alloc(unsigned long size)
{
if (size == 0)
@@ -118,11 +126,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
- struct module *me)
+ struct module *mod)
{
+ module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
+
return 0;
}
void module_arch_cleanup(struct module *mod)
{
}
+
+#endif /* CONFIG_MODULES */
+
+void module_fixup(struct module *mod, struct m68k_fixup_info *start,
+ struct m68k_fixup_info *end)
+{
+ struct m68k_fixup_info *fixup;
+
+ for (fixup = start; fixup < end; fixup++) {
+ switch (fixup->type) {
+ case m68k_fixup_memoffset:
+ *(u32 *)fixup->addr = m68k_memoffset;
+ break;
+ case m68k_fixup_vnode_shift:
+ *(u16 *)fixup->addr += m68k_virt_to_node_shift;
+ break;
+ }
+ }
+}
diff --git a/arch/m68k/kernel/module.lds b/arch/m68k/kernel/module.lds
new file mode 100644
index 00000000000..fda94fa3824
--- /dev/null
+++ b/arch/m68k/kernel/module.lds
@@ -0,0 +1,7 @@
+SECTIONS {
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ }
+}
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 61031935669..215c7bd4392 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -60,14 +60,12 @@ extern unsigned long availmem;
int m68k_num_memory;
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
unsigned long m68k_memoffset;
EXPORT_SYMBOL(m68k_memoffset);
-#endif
struct mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
-static struct mem_info m68k_ramdisk;
+struct mem_info m68k_ramdisk;
static char m68k_command_line[CL_SIZE];
@@ -208,9 +206,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
extern int _etext, _edata, _end;
-#ifndef CONFIG_SUN3
- unsigned long endmem, startmem;
-#endif
int i;
/* The bootinfo is located right after the kernel bss */
@@ -320,30 +315,16 @@ void __init setup_arch(char **cmdline_p)
panic("No configuration setup");
}
-#ifndef CONFIG_SUN3
- startmem= m68k_memory[0].addr;
- endmem = startmem + m68k_memory[0].size;
- high_memory = (void *)PAGE_OFFSET;
- for (i = 0; i < m68k_num_memory; i++) {
- m68k_memory[i].size &= MASK_256K;
- if (m68k_memory[i].addr < startmem)
- startmem = m68k_memory[i].addr;
- if (m68k_memory[i].addr+m68k_memory[i].size > endmem)
- endmem = m68k_memory[i].addr+m68k_memory[i].size;
- high_memory += m68k_memory[i].size;
- }
-
- availmem += init_bootmem_node(NODE_DATA(0), availmem >> PAGE_SHIFT,
- startmem >> PAGE_SHIFT, endmem >> PAGE_SHIFT);
-
- for (i = 0; i < m68k_num_memory; i++)
- free_bootmem(m68k_memory[i].addr, m68k_memory[i].size);
-
- reserve_bootmem(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
+ paging_init();
+#ifndef CONFIG_SUN3
+ for (i = 1; i < m68k_num_memory; i++)
+ free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
+ m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
- reserve_bootmem(m68k_ramdisk.addr, m68k_ramdisk.size);
+ reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
+ m68k_ramdisk.addr, m68k_ramdisk.size);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
@@ -362,8 +343,6 @@ void __init setup_arch(char **cmdline_p)
#endif /* !CONFIG_SUN3 */
- paging_init();
-
/* set ISA defs early as possible */
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
#if defined(CONFIG_Q40)
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 78f139226a1..40f02b128f2 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -60,6 +60,11 @@ SECTIONS
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ }
SECURITY_INIT
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(8192);
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index c8999b2db23..f06425b6d20 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -54,6 +54,11 @@ __init_begin = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ }
SECURITY_INIT
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(8192);
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index ab90213e5c5..f1de19e1dde 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -7,6 +7,7 @@
* to motorola.c and sun3mmu.c
*/
+#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -31,6 +32,37 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES];
+
+pg_data_t pg_data_map[MAX_NUMNODES];
+EXPORT_SYMBOL(pg_data_map);
+
+int m68k_virt_to_node_shift;
+
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+pg_data_t *pg_data_table[65];
+EXPORT_SYMBOL(pg_data_table);
+#endif
+
+void m68k_setup_node(int node)
+{
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+ struct mem_info *info = m68k_memory + node;
+ int i, end;
+
+ i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
+ end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
+ for (; i <= end; i++) {
+ if (pg_data_table[i])
+ printk("overlap at %u for chunk %u\n", i, node);
+ pg_data_table[i] = pg_data_map + node;
+ }
+#endif
+ pg_data_map[node].bdata = bootmem_data + node;
+ node_set_online(node);
+}
+
+
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
@@ -40,52 +72,51 @@ void *empty_zero_page;
void show_mem(void)
{
- unsigned long i;
- int free = 0, total = 0, reserved = 0, shared = 0;
- int cached = 0;
-
- printk("\nMem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!page_count(mem_map+i))
- free++;
- else
- shared += page_count(mem_map+i) - 1;
- }
- printk("%d pages of RAM\n",total);
- printk("%d free pages\n",free);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
+ pg_data_t *pgdat;
+ int free = 0, total = 0, reserved = 0, shared = 0;
+ int cached = 0;
+ int i;
+
+ printk("\nMem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ for_each_online_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat->node_mem_map + i;
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (!page_count(page))
+ free++;
+ else
+ shared += page_count(page) - 1;
+ }
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
}
extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
-extern char _text, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
+extern char _text[], _etext[];
+extern char __init_begin[], __init_end[];
extern pmd_t *zero_pgtable;
void __init mem_init(void)
{
+ pg_data_t *pgdat;
int codepages = 0;
int datapages = 0;
int initpages = 0;
- unsigned long tmp;
-#ifndef CONFIG_SUN3
int i;
-#endif
-
- max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT);
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
@@ -93,19 +124,25 @@ void __init mem_init(void)
#endif
/* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
-
- for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) {
- if (PageReserved(virt_to_page(tmp))) {
- if (tmp >= (unsigned long)&_text
- && tmp < (unsigned long)&_etext)
+ totalram_pages = num_physpages = 0;
+ for_each_online_pgdat(pgdat) {
+ num_physpages += pgdat->node_present_pages;
+
+ totalram_pages += free_all_bootmem_node(pgdat);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat->node_mem_map + i;
+ char *addr = page_to_virt(page);
+
+ if (!PageReserved(page))
+ continue;
+ if (addr >= _text &&
+ addr < _etext)
codepages++;
- else if (tmp >= (unsigned long) &__init_begin
- && tmp < (unsigned long) &__init_end)
+ else if (addr >= __init_begin &&
+ addr < __init_end)
initpages++;
else
datapages++;
- continue;
}
}
@@ -124,7 +161,7 @@ void __init mem_init(void)
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
+ totalram_pages << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10));
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 13c0b4ad01e..b7473525b43 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -127,67 +127,6 @@ int free_pointer_table (pmd_t *ptable)
return 0;
}
-#ifdef DEBUG_INVALID_PTOV
-int mm_inv_cnt = 5;
-#endif
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-/*
- * The following two routines map from a physical address to a kernel
- * virtual address and vice versa.
- */
-unsigned long mm_vtop(unsigned long vaddr)
-{
- int i=0;
- unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
-
- do {
- if (voff < m68k_memory[i].size) {
-#ifdef DEBUGPV
- printk ("VTOP(%p)=%lx\n", vaddr,
- m68k_memory[i].addr + voff);
-#endif
- return m68k_memory[i].addr + voff;
- }
- voff -= m68k_memory[i].size;
- } while (++i < m68k_num_memory);
-
- /* As a special case allow `__pa(high_memory)'. */
- if (voff == 0)
- return m68k_memory[i-1].addr + m68k_memory[i-1].size;
-
- return -1;
-}
-EXPORT_SYMBOL(mm_vtop);
-
-unsigned long mm_ptov (unsigned long paddr)
-{
- int i = 0;
- unsigned long poff, voff = PAGE_OFFSET;
-
- do {
- poff = paddr - m68k_memory[i].addr;
- if (poff < m68k_memory[i].size) {
-#ifdef DEBUGPV
- printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);
-#endif
- return poff + voff;
- }
- voff += m68k_memory[i].size;
- } while (++i < m68k_num_memory);
-
-#ifdef DEBUG_INVALID_PTOV
- if (mm_inv_cnt > 0) {
- mm_inv_cnt--;
- printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
- paddr, __builtin_return_address(0));
- }
-#endif
- return -1;
-}
-EXPORT_SYMBOL(mm_ptov);
-#endif
-
/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
@@ -354,15 +293,3 @@ void cache_push (unsigned long paddr, int len)
}
EXPORT_SYMBOL(cache_push);
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-int mm_end_of_chunk (unsigned long addr, int len)
-{
- int i;
-
- for (i = 0; i < m68k_num_memory; i++)
- if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(mm_end_of_chunk);
-#endif
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index afcccdc6ad4..7d571a2b44d 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -43,6 +43,11 @@ unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
+/* size of memory already mapped in head.S */
+#define INIT_MAPPED_SIZE (4UL<<20)
+
+extern unsigned long availmem;
+
static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
@@ -98,19 +103,20 @@ static pmd_t * __init kernel_ptr_table(void)
return last_pgtable;
}
-static unsigned long __init
-map_chunk (unsigned long addr, long size)
+static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
- static unsigned long virtaddr = PAGE_OFFSET;
- unsigned long physaddr;
+ unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
- physaddr = (addr | m68k_supervisor_cachemode |
- _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ size = m68k_memory[node].size;
+ physaddr = m68k_memory[node].addr;
+ virtaddr = (unsigned long)phys_to_virt(physaddr);
+ physaddr |= m68k_supervisor_cachemode |
+ _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
if (CPU_IS_040_OR_060)
physaddr |= _PAGE_GLOBAL040;
@@ -190,8 +196,6 @@ map_chunk (unsigned long addr, long size)
#ifdef DEBUG
printk("\n");
#endif
-
- return virtaddr;
}
/*
@@ -200,15 +204,16 @@ map_chunk (unsigned long addr, long size)
*/
void __init paging_init(void)
{
- int chunk;
- unsigned long mem_avail = 0;
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+ unsigned long min_addr, max_addr;
+ unsigned long addr, size, end;
+ int i;
#ifdef DEBUG
{
extern unsigned long availmem;
- printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
- kernel_pg_dir, availmem, start_mem, end_mem);
+ printk ("start of paging_init (%p, %lx)\n",
+ kernel_pg_dir, availmem);
}
#endif
@@ -222,24 +227,62 @@ void __init paging_init(void)
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
+ min_addr = m68k_memory[0].addr;
+ max_addr = min_addr + m68k_memory[0].size;
+ for (i = 1; i < m68k_num_memory;) {
+ if (m68k_memory[i].addr < min_addr) {
+ printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
+ m68k_memory[i].addr, m68k_memory[i].size);
+ printk("Fix your bootloader or use a memfile to make use of this area!\n");
+ m68k_num_memory--;
+ memmove(m68k_memory + i, m68k_memory + i + 1,
+ (m68k_num_memory - i) * sizeof(struct mem_info));
+ continue;
+ }
+ addr = m68k_memory[i].addr + m68k_memory[i].size;
+ if (addr > max_addr)
+ max_addr = addr;
+ i++;
+ }
+ m68k_memoffset = min_addr - PAGE_OFFSET;
+ m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
+
+ module_fixup(NULL, __start_fixup, __stop_fixup);
+ flush_icache();
+
+ high_memory = phys_to_virt(max_addr);
+
+ min_low_pfn = availmem >> PAGE_SHIFT;
+ max_low_pfn = max_addr >> PAGE_SHIFT;
+
+ for (i = 0; i < m68k_num_memory; i++) {
+ addr = m68k_memory[i].addr;
+ end = addr + m68k_memory[i].size;
+ m68k_setup_node(i);
+ availmem = PAGE_ALIGN(availmem);
+ availmem += init_bootmem_node(NODE_DATA(i),
+ availmem >> PAGE_SHIFT,
+ addr >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+ }
+
/*
* Map the physical memory available into the kernel virtual
- * address space. It may allocate some memory for page
- * tables and thus modify availmem.
+ * address space. First initialize the bootmem allocator with
+ * the memory we already mapped, so map_node() has something
+ * to allocate.
*/
+ addr = m68k_memory[0].addr;
+ size = m68k_memory[0].size;
+ free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
+ map_node(0);
+ if (size > INIT_MAPPED_SIZE)
+ free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);
- for (chunk = 0; chunk < m68k_num_memory; chunk++) {
- mem_avail = map_chunk (m68k_memory[chunk].addr,
- m68k_memory[chunk].size);
-
- }
+ for (i = 1; i < m68k_num_memory; i++)
+ map_node(i);
flush_tlb_all();
-#ifdef DEBUG
- printk ("memory available is %ldKB\n", mem_avail >> 10);
- printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
-#endif
/*
* initialize the bad page table and bad page to point
@@ -256,14 +299,11 @@ void __init paging_init(void)
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
- zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ?
- (mach_max_dma_address+1) : (unsigned long)high_memory);
- zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0];
-
- zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] >>= PAGE_SHIFT;
-
- free_area_init(zones_size);
+ for (i = 0; i < m68k_num_memory; i++) {
+ zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
+ free_area_init_node(i, pg_data_map + i, zones_size,
+ m68k_memory[i].addr >> PAGE_SHIFT, NULL);
+ }
}
extern char __init_begin, __init_end;
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 4851b8437a8..c0fbd278fbb 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -21,6 +21,7 @@
#include <asm/contregs.h>
#include <asm/movs.h>
#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/sun3-head.h>
#include <asm/sun3mmu.h>
#include <asm/rtc.h>
@@ -127,6 +128,7 @@ void __init sun3_bootmem_alloc(unsigned long memory_start, unsigned long memory_
high_memory = (void *)memory_end;
availmem = memory_start;
+ m68k_setup_node(0);
availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages);
availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 617d29832e1..cbddeb38ffd 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -124,10 +124,10 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
unsigned long flags;
u32 prev;
- spin_lock_irqsave(ATOMIC_HASH(addr), flags);
+ spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
if ((prev = *ptr) == old)
*ptr = new;
- spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
+ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return (unsigned long)prev;
}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 8f10dda0f5c..ed712e0b337 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -2498,3 +2498,75 @@ sun4v_vintr_set_target:
retl
nop
.size sun4v_vintr_set_target, .-sun4v_vintr_set_target
+
+ /* %o0: NCS sub-function
+ * %o1: sub-function arg real-address
+ * %o2: sub-function arg size
+ *
+ * returns %o0: status
+ */
+ .globl sun4v_ncs_request
+ .type sun4v_ncs_request,#function
+sun4v_ncs_request:
+ mov HV_FAST_NCS_REQUEST, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_ncs_request, .-sun4v_ncs_request
+
+ .globl sun4v_scv_send
+ .type sun4v_scv_send,#function
+sun4v_scv_send:
+ save %sp, -192, %sp
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov HV_FAST_SVC_SEND, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%i3]
+ ret
+ restore
+ .size sun4v_scv_send, .-sun4v_scv_send
+
+ .globl sun4v_scv_recv
+ .type sun4v_scv_recv,#function
+sun4v_scv_recv:
+ save %sp, -192, %sp
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov HV_FAST_SVC_RECV, %o5
+ ta HV_FAST_TRAP
+ stx %o1, [%i3]
+ ret
+ restore
+ .size sun4v_scv_recv, .-sun4v_scv_recv
+
+ .globl sun4v_scv_getstatus
+ .type sun4v_scv_getstatus,#function
+sun4v_scv_getstatus:
+ mov HV_FAST_SVC_GETSTATUS, %o5
+ mov %o1, %o4
+ ta HV_FAST_TRAP
+ stx %o1, [%o4]
+ retl
+ nop
+ .size sun4v_scv_getstatus, .-sun4v_scv_getstatus
+
+ .globl sun4v_scv_setstatus
+ .type sun4v_scv_setstatus,#function
+sun4v_scv_setstatus:
+ mov HV_FAST_SVC_SETSTATUS, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_scv_setstatus, .-sun4v_scv_setstatus
+
+ .globl sun4v_scv_clrstatus
+ .type sun4v_scv_clrstatus,#function
+sun4v_scv_clrstatus:
+ mov HV_FAST_SVC_CLRSTATUS, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ .size sun4v_scv_clrstatus, .-sun4v_scv_clrstatus
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 3ff4e1f0f03..ac6dce2e759 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -298,7 +298,7 @@ static inline int cryptd_create_thread(struct cryptd_state *state,
mutex_init(&state->mutex);
crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN);
- state->task = kthread_create(fn, state, name);
+ state->task = kthread_run(fn, state, name);
if (IS_ERR(state->task))
return PTR_ERR(state->task);
@@ -316,6 +316,8 @@ static int cryptd_thread(void *data)
struct cryptd_state *state = data;
int stop;
+ current->flags |= PF_NOFREEZE;
+
do {
struct crypto_async_request *req, *backlog;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3ca9c610c11..af625147df6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3783,6 +3783,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
{ "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
+ { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
/* NCQ hard hangs device under heavier load, needs hard power cycle */
{ "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
/* Blacklist entries taken from Silicon Image 3124/3132
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index de37d5f7456..b33313be254 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -172,38 +172,49 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
bitfield_length = idx + 1;
- if (idx != id / (8 * sizeof(*bitfield)))
- bitfield = drm_alloc(bitfield_length *
- sizeof(*bitfield), DRM_MEM_BUFS);
+ bitfield = NULL;
- if (!bitfield && bitfield_length) {
- bitfield = dev->drw_bitfield;
- bitfield_length = dev->drw_bitfield_length;
+ if (bitfield_length) {
+ if (bitfield_length != dev->drw_bitfield_length)
+ bitfield = drm_alloc(bitfield_length *
+ sizeof(*bitfield),
+ DRM_MEM_BUFS);
+
+ if (!bitfield) {
+ bitfield = dev->drw_bitfield;
+ bitfield_length = dev->drw_bitfield_length;
+ }
}
}
if (bitfield != dev->drw_bitfield) {
info_length = 8 * sizeof(*bitfield) * bitfield_length;
- info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
+ if (info_length) {
+ info = drm_alloc(info_length * sizeof(*info),
+ DRM_MEM_BUFS);
- if (!info && info_length) {
- info = dev->drw_info;
- info_length = dev->drw_info_length;
- }
+ if (!info) {
+ info = dev->drw_info;
+ info_length = dev->drw_info_length;
+ }
+ } else
+ info = NULL;
spin_lock_irqsave(&dev->drw_lock, irqflags);
- memcpy(bitfield, dev->drw_bitfield, bitfield_length *
- sizeof(*bitfield));
+ if (bitfield)
+ memcpy(bitfield, dev->drw_bitfield, bitfield_length *
+ sizeof(*bitfield));
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
dev->drw_bitfield_length, DRM_MEM_BUFS);
dev->drw_bitfield = bitfield;
dev->drw_bitfield_length = bitfield_length;
if (info != dev->drw_info) {
- memcpy(info, dev->drw_info, info_length *
- sizeof(*info));
+ if (info)
+ memcpy(info, dev->drw_info, info_length *
+ sizeof(*info));
drm_free(dev->drw_info, sizeof(*info) *
dev->drw_info_length, DRM_MEM_BUFS);
dev->drw_info = info;
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 31cdde83713..177ccc07f96 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -102,13 +102,20 @@
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 78c1ae28f17..b92062a239f 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -582,7 +582,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9fe3a38883e..59b9943b077 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4920,7 +4920,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
if (pci_set_mwi(pdev))
- printk(KERN_WARNING PFX "Could enable MWI for %s\n",
+ printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
pci_name(pdev));
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6ccc2e95930..1cff65fb9c4 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1625,18 +1625,20 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
#ifdef CONFIG_PCI_MSI
-/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely
- * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other busses controlled by the chipset even if Linux is not aware of it.
- * Instead of setting the flag on all busses in the machine, simply disable MSI
- * globally.
+/* Some chipsets do not support MSI. We cannot easily rely on setting
+ * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
+ * some other busses controlled by the chipset even if Linux is not
+ * aware of it. Instead of setting the flag on all busses in the
+ * machine, simply disable MSI globally.
*/
-static void __init quirk_svw_msi(struct pci_dev *dev)
+static void __init quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -1649,8 +1651,6 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 948efc775a7..eb6abd3f922 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -336,16 +336,21 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(board->det_pin);
if (board->irq_pin)
enable_irq_wake(board->irq_pin);
- } else {
- disable_irq_wake(board->det_pin);
- if (board->irq_pin)
- disable_irq_wake(board->irq_pin);
}
return 0;
}
static int at91_cf_resume(struct platform_device *pdev)
{
+ struct at91_cf_socket *cf = platform_get_drvdata(pdev);
+ struct at91_cf_data *board = cf->board;
+
+ if (device_may_wakeup(&pdev->dev)) {
+ disable_irq_wake(board->det_pin);
+ if (board->irq_pin)
+ disable_irq_wake(board->irq_pin);
+ }
+
pcmcia_socket_dev_resume(&pdev->dev);
return 0;
}
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 262f01e6859..44e039865aa 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
+#include <linux/mm.h>
#include <asm/system.h>
#include <asm/uaccess.h>
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index bd30aba242d..731d7a5c5aa 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -1286,34 +1286,36 @@ static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
if (regno >= fb->cmap.len || regno > 255)
return -EINVAL;
- switch (fb->var.bits_per_pixel) {
- case 8:
+ if (fb->var.bits_per_pixel <= 8) {
outb(regno, 0x3c8);
outb(red >> 10, 0x3c9);
outb(green >> 10, 0x3c9);
outb(blue >> 10, 0x3c9);
- break;
- case 16:
- ((u32 *) fb->pseudo_palette)[regno] =
+ } else if (regno < 16) {
+ switch (fb->var.bits_per_pixel) {
+ case 16:
+ ((u32 *) fb->pseudo_palette)[regno] =
((red & 0xf800)) | ((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
- break;
- case 24:
- ((u32 *) fb->pseudo_palette)[regno] =
+ break;
+ case 24:
+ ((u32 *) fb->pseudo_palette)[regno] =
((red & 0xff00) << 8) | ((green & 0xff00)) |
((blue & 0xff00) >> 8);
- break;
+ break;
#ifdef NO_32BIT_SUPPORT_YET
- case 32:
- ((u32 *) fb->pseudo_palette)[regno] =
+ case 32:
+ ((u32 *) fb->pseudo_palette)[regno] =
((transp & 0xff00) << 16) | ((red & 0xff00) << 8) |
((green & 0xff00)) | ((blue & 0xff00) >> 8);
- break;
+ break;
#endif
- default:
- return 1;
+ default:
+ return 1;
+ }
}
+
return 0;
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 2dac3ad2c44..2c55dd94a1d 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -17,6 +17,8 @@
#include <linux/rxrpc.h>
#include <linux/key.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
+
#include "afs.h"
#include "afs_vl.h"
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 074791ce4ab..b532a730cec 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -140,7 +140,7 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
- ni->name[i] = 0;
+ ni->name[na->name_len] = 0;
}
return 0;
}
diff --git a/include/asm-m68k/mmzone.h b/include/asm-m68k/mmzone.h
new file mode 100644
index 00000000000..e1f1ec7b700
--- /dev/null
+++ b/include/asm-m68k/mmzone.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_M68K_MMZONE_H_
+#define _ASM_M68K_MMZONE_H_
+
+extern pg_data_t pg_data_map[];
+
+#define NODE_DATA(nid) (&pg_data_map[nid])
+#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
+
+#endif /* _ASM_M68K_MMZONE_H_ */
diff --git a/include/asm-m68k/module.h b/include/asm-m68k/module.h
index c6d75af2d8d..382d20a6fc1 100644
--- a/include/asm-m68k/module.h
+++ b/include/asm-m68k/module.h
@@ -1,7 +1,39 @@
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
-struct mod_arch_specific { };
+
+struct mod_arch_specific {
+ struct m68k_fixup_info *fixup_start, *fixup_end;
+};
+
+#define MODULE_ARCH_INIT { \
+ .fixup_start = __start_fixup, \
+ .fixup_end = __stop_fixup, \
+}
+
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
+
+
+enum m68k_fixup_type {
+ m68k_fixup_memoffset,
+ m68k_fixup_vnode_shift,
+};
+
+struct m68k_fixup_info {
+ enum m68k_fixup_type type;
+ void *addr;
+};
+
+#define m68k_fixup(type, addr) \
+ " .section \".m68k_fixup\",\"aw\"\n" \
+ " .long " #type "," #addr "\n" \
+ " .previous\n"
+
+extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
+
+struct module;
+extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
+ struct m68k_fixup_info *end);
+
#endif /* _ASM_M68K_MODULE_H */
diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h
index 61e4406ed96..b5b78c01eb6 100644
--- a/include/asm-m68k/motorola_pgtable.h
+++ b/include/asm-m68k/motorola_pgtable.h
@@ -130,7 +130,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; })
-#define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pte_page(pte) virt_to_page(__va(pte_val(pte)))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
@@ -143,7 +143,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
while (--__i >= 0) \
*__ptr++ = 0; \
})
-#define pmd_page(pmd) (mem_map + ((unsigned long)(__va(pmd_val(pmd)) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pgd_none(pgd) (!pgd_val(pgd))
@@ -223,10 +223,10 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
-#define pte_offset_map(pmdp,address) ((pte_t *)kmap(pmd_page(*pmdp)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
-#define pte_unmap(pte) kunmap(pte)
-#define pte_unmap_nested(pte) kunmap(pte)
+#define pte_unmap(pte) ((void)0)
+#define pte_unmap_nested(pte) ((void)0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h
index fcc165ddd09..9e6d0d6debd 100644
--- a/include/asm-m68k/page.h
+++ b/include/asm-m68k/page.h
@@ -27,6 +27,8 @@
#ifndef __ASSEMBLY__
+#include <asm/module.h>
+
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
@@ -114,18 +116,33 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifndef __ASSEMBLY__
+extern unsigned long m68k_memoffset;
+
#ifndef CONFIG_SUN3
#define WANT_PAGE_VIRTUAL
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
-extern unsigned long m68k_memoffset;
-#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset)
-#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset))
-#else
-#define __pa(vaddr) virt_to_phys((void *)(vaddr))
-#define __va(paddr) phys_to_virt((unsigned long)(paddr))
-#endif
+static inline unsigned long ___pa(void *vaddr)
+{
+ unsigned long paddr;
+ asm (
+ "1: addl #0,%0\n"
+ m68k_fixup(%c2, 1b+2)
+ : "=r" (paddr)
+ : "0" (vaddr), "i" (m68k_fixup_memoffset));
+ return paddr;
+}
+#define __pa(vaddr) ___pa((void *)(vaddr))
+static inline void *__va(unsigned long paddr)
+{
+ void *vaddr;
+ asm (
+ "1: subl #0,%0\n"
+ m68k_fixup(%c2, 1b+2)
+ : "=r" (vaddr)
+ : "0" (paddr), "i" (m68k_fixup_memoffset));
+ return vaddr;
+}
#else /* !CONFIG_SUN3 */
/* This #define is a horrible hack to suppress lots of warnings. --m */
@@ -161,11 +178,47 @@ static inline void *__va(unsigned long x)
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+extern int m68k_virt_to_node_shift;
+
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#define __virt_to_node(addr) (&pg_data_map[0])
+#else
+extern struct pglist_data *pg_data_table[];
+
+static inline __attribute_const__ int __virt_to_node_shift(void)
+{
+ int shift;
+
+ asm (
+ "1: moveq #0,%0\n"
+ m68k_fixup(%c1, 1b)
+ : "=d" (shift)
+ : "i" (m68k_fixup_vnode_shift));
+ return shift;
+}
+
+#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
+#endif
-#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
-#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
+#define virt_to_page(addr) ({ \
+ pfn_to_page(virt_to_pfn(addr)); \
+})
+#define page_to_virt(page) ({ \
+ pfn_to_virt(page_to_pfn(page)); \
+})
+
+#define pfn_to_page(pfn) ({ \
+ unsigned long __pfn = (pfn); \
+ struct pglist_data *pgdat; \
+ pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
+ pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
+})
+#define page_to_pfn(_page) ({ \
+ struct page *__p = (_page); \
+ struct pglist_data *pgdat; \
+ pgdat = &pg_data_map[page_to_nid(__p)]; \
+ ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
+})
#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
diff --git a/include/asm-m68k/pgalloc.h b/include/asm-m68k/pgalloc.h
index a9cfb4b99d8..4cb1a57ab76 100644
--- a/include/asm-m68k/pgalloc.h
+++ b/include/asm-m68k/pgalloc.h
@@ -8,11 +8,12 @@
#include <asm/virtconvert.h>
-
#ifdef CONFIG_SUN3
#include <asm/sun3_pgalloc.h>
#else
#include <asm/motorola_pgalloc.h>
#endif
+extern void m68k_setup_node(int node);
+
#endif /* M68K_PGALLOC_H */
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h
index 555b87a1f7e..778a4c538eb 100644
--- a/include/asm-m68k/pgtable.h
+++ b/include/asm-m68k/pgtable.h
@@ -107,22 +107,7 @@ extern void *empty_zero_page;
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 2
-/*
- * Check if the addr/len goes up to the end of a physical
- * memory chunk. Used for DMA functions.
- */
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
-/*
- * It makes no sense to consider whether we cross a memory boundary if
- * we support just one physical chunk of memory.
- */
-static inline int mm_end_of_chunk(unsigned long addr, int len)
-{
- return 0;
-}
-#else
-int mm_end_of_chunk (unsigned long addr, int len);
-#endif
+#define mm_end_of_chunk(addr, len) 0
extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index 5156a28a18d..b9e62c1e7ae 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -132,8 +132,8 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
#define pfn_pte(pfn, pgprot) \
({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
-#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
-#define pmd_page(pmd) (mem_map+((__pmd_page(pmd) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pte_page(pte) virt_to_page(__pte_page(pte))
+#define pmd_page(pmd) virt_to_page(__pmd_page(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
diff --git a/include/asm-m68k/virtconvert.h b/include/asm-m68k/virtconvert.h
index 83a87c9b1a1..dea32fbc7e5 100644
--- a/include/asm-m68k/virtconvert.h
+++ b/include/asm-m68k/virtconvert.h
@@ -8,56 +8,35 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
+#include <linux/mmzone.h>
#include <asm/setup.h>
#include <asm/page.h>
-#ifdef CONFIG_AMIGA
-#include <asm/amigahw.h>
-#endif
-
/*
* Change virtual addresses to physical addresses and vv.
*/
-#ifndef CONFIG_SUN3
-extern unsigned long mm_vtop(unsigned long addr) __attribute_const__;
-extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
-#else
-static inline unsigned long mm_vtop(unsigned long vaddr)
-{
- return __pa(vaddr);
-}
-
-static inline unsigned long mm_ptov(unsigned long paddr)
-{
- return (unsigned long)__va(paddr);
-}
-#endif
-
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
-static inline unsigned long virt_to_phys(void *vaddr)
-{
- return (unsigned long)vaddr - PAGE_OFFSET + m68k_memory[0].addr;
-}
-
-static inline void * phys_to_virt(unsigned long paddr)
-{
- return (void *)(paddr - m68k_memory[0].addr + PAGE_OFFSET);
-}
-#else
static inline unsigned long virt_to_phys(void *address)
{
- return mm_vtop((unsigned long)address);
+ return __pa(address);
}
static inline void *phys_to_virt(unsigned long address)
{
- return (void *) mm_ptov(address);
+ return __va(address);
}
-#endif
/* Permanent address of a page. */
-#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#define page_to_phys(page) \
+ __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
+#else
+#define page_to_phys(_page) ({ \
+ struct page *__page = _page; \
+ struct pglist_data *pgdat; \
+ pgdat = pg_data_table[page_to_nid(__page)]; \
+ page_to_pfn(__page) << PAGE_SHIFT; \
+})
+#endif
/*
* IO bus memory addresses are 1:1 with the physical address,
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
index 5cdb1ff0483..4a43075a061 100644
--- a/include/asm-sparc64/hypervisor.h
+++ b/include/asm-sparc64/hypervisor.h
@@ -1097,6 +1097,80 @@ extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
*/
#define HV_FAST_MACH_GET_SOFT_STATE 0x71
+/* svc_send()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_SVC_SEND
+ * ARG0: service ID
+ * ARG1: buffer real address
+ * ARG2: buffer size
+ * RET0: STATUS
+ * RET1: sent_bytes
+ *
+ * Be careful, all output registers are clobbered by this operation,
+ * so for example it is not possible to save away a value in %o4
+ * across the trap.
+ */
+#define HV_FAST_SVC_SEND 0x80
+
+/* svc_recv()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_SVC_RECV
+ * ARG0: service ID
+ * ARG1: buffer real address
+ * ARG2: buffer size
+ * RET0: STATUS
+ * RET1: recv_bytes
+ *
+ * Be careful, all output registers are clobbered by this operation,
+ * so for example it is not possible to save away a value in %o4
+ * across the trap.
+ */
+#define HV_FAST_SVC_RECV 0x81
+
+/* svc_getstatus()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_SVC_GETSTATUS
+ * ARG0: service ID
+ * RET0: STATUS
+ * RET1: status bits
+ */
+#define HV_FAST_SVC_GETSTATUS 0x82
+
+/* svc_setstatus()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_SVC_SETSTATUS
+ * ARG0: service ID
+ * ARG1: bits to set
+ * RET0: STATUS
+ */
+#define HV_FAST_SVC_SETSTATUS 0x83
+
+/* svc_clrstatus()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_SVC_CLRSTATUS
+ * ARG0: service ID
+ * ARG1: bits to clear
+ * RET0: STATUS
+ */
+#define HV_FAST_SVC_CLRSTATUS 0x84
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_svc_send(unsigned long svc_id,
+ unsigned long buffer,
+ unsigned long buffer_size,
+ unsigned long *sent_bytes);
+extern unsigned long sun4v_svc_recv(unsigned long svc_id,
+ unsigned long buffer,
+ unsigned long buffer_size,
+ unsigned long *recv_bytes);
+extern unsigned long sun4v_svc_getstatus(unsigned long svc_id,
+ unsigned long *status_bits);
+extern unsigned long sun4v_svc_setstatus(unsigned long svc_id,
+ unsigned long status_bits);
+extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
+ unsigned long status_bits);
+#endif
+
/* Trap trace services.
*
* The hypervisor provides a trap tracing capability for privileged
@@ -2724,6 +2798,100 @@ struct hv_mmu_statistics {
*/
#define HV_FAST_MMUSTAT_INFO 0x103
+/* NCS crypto services */
+
+/* ncs_request() sub-function numbers */
+#define HV_NCS_QCONF 0x01
+#define HV_NCS_QTAIL_UPDATE 0x02
+
+#ifndef __ASSEMBLY__
+struct hv_ncs_queue_entry {
+ /* MAU Control Register */
+ unsigned long mau_control;
+#define MAU_CONTROL_INV_PARITY 0x0000000000002000
+#define MAU_CONTROL_STRAND 0x0000000000001800
+#define MAU_CONTROL_BUSY 0x0000000000000400
+#define MAU_CONTROL_INT 0x0000000000000200
+#define MAU_CONTROL_OP 0x00000000000001c0
+#define MAU_CONTROL_OP_SHIFT 6
+#define MAU_OP_LOAD_MA_MEMORY 0x0
+#define MAU_OP_STORE_MA_MEMORY 0x1
+#define MAU_OP_MODULAR_MULT 0x2
+#define MAU_OP_MODULAR_REDUCE 0x3
+#define MAU_OP_MODULAR_EXP_LOOP 0x4
+#define MAU_CONTROL_LEN 0x000000000000003f
+#define MAU_CONTROL_LEN_SHIFT 0
+
+ /* Real address of bytes to load or store bytes
+ * into/out-of the MAU.
+ */
+ unsigned long mau_mpa;
+
+ /* Modular Arithmetic MA Offset Register. */
+ unsigned long mau_ma;
+
+ /* Modular Arithmetic N Prime Register. */
+ unsigned long mau_np;
+};
+
+struct hv_ncs_qconf_arg {
+ unsigned long mid; /* MAU ID, 1 per core on Niagara */
+ unsigned long base; /* Real address base of queue */
+ unsigned long end; /* Real address end of queue */
+ unsigned long num_ents; /* Number of entries in queue */
+};
+
+struct hv_ncs_qtail_update_arg {
+ unsigned long mid; /* MAU ID, 1 per core on Niagara */
+ unsigned long tail; /* New tail index to use */
+ unsigned long syncflag; /* only SYNCFLAG_SYNC is implemented */
+#define HV_NCS_SYNCFLAG_SYNC 0x00
+#define HV_NCS_SYNCFLAG_ASYNC 0x01
+};
+#endif
+
+/* ncs_request()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_NCS_REQUEST
+ * ARG0: NCS sub-function
+ * ARG1: sub-function argument real address
+ * ARG2: size in bytes of sub-function argument
+ * RET0: status
+ *
+ * The MAU chip of the Niagara processor is not directly accessible
+ * to privileged code, instead it is programmed indirectly via this
+ * hypervisor API.
+ *
+ * The interfaces defines a queue of MAU operations to perform.
+ * Privileged code registers a queue with the hypervisor by invoking
+ * this HVAPI with the HV_NCS_QCONF sub-function, which defines the
+ * base, end, and number of entries of the queue. Each queue entry
+ * contains a MAU register struct block.
+ *
+ * The privileged code then proceeds to add entries to the queue and
+ * then invoke the HV_NCS_QTAIL_UPDATE sub-function. Since only
+ * synchronous operations are supported by the current hypervisor,
+ * HV_NCS_QTAIL_UPDATE will run all the pending queue entries to
+ * completion and return HV_EOK, or return an error code.
+ *
+ * The real address of the sub-function argument must be aligned on at
+ * least an 8-byte boundary.
+ *
+ * The tail argument of HV_NCS_QTAIL_UPDATE is an index, not a byte
+ * offset, into the queue and must be less than or equal the 'num_ents'
+ * argument given in the HV_NCS_QCONF call.
+ */
+#define HV_FAST_NCS_REQUEST 0x110
+
+#ifndef __ASSEMBLY__
+extern unsigned long sun4v_ncs_request(unsigned long request,
+ unsigned long arg_ra,
+ unsigned long arg_size);
+#endif
+
+#define HV_FAST_FIRE_GET_PERFREG 0x120
+#define HV_FAST_FIRE_SET_PERFREG 0x121
+
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 09ea01a8a99..648bd1f0912 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -209,9 +209,8 @@ enum {
DEVCONF_RTR_PROBE_INTERVAL,
DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
DEVCONF_PROXY_NDP,
- __DEVCONF_OPTIMISTIC_DAD,
- DEVCONF_ACCEPT_SOURCE_ROUTE,
DEVCONF_OPTIMISTIC_DAD,
+ DEVCONF_ACCEPT_SOURCE_ROUTE,
DEVCONF_MAX
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f671cd2f133..3a70f553b28 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -910,6 +910,17 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
return 0;
}
+/* same as netif_rx_complete, except that local_irq_save(flags)
+ * has already been issued
+ */
+static inline void __netif_rx_complete(struct net_device *dev)
+{
+ BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
+ list_del(&dev->poll_list);
+ smp_mb__before_clear_bit();
+ clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
/* Remove interface from poll list: it must be in the poll list
* on current cpu. This primitive is called by dev->poll(), when
* it completes the work. The device cannot be out of poll list at this
@@ -920,10 +931,7 @@ static inline void netif_rx_complete(struct net_device *dev)
unsigned long flags;
local_irq_save(flags);
- BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
- list_del(&dev->poll_list);
- smp_mb__before_clear_bit();
- clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
+ __netif_rx_complete(dev);
local_irq_restore(flags);
}
@@ -940,17 +948,6 @@ static inline void netif_poll_enable(struct net_device *dev)
clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
}
-/* same as netif_rx_complete, except that local_irq_save(flags)
- * has already been issued
- */
-static inline void __netif_rx_complete(struct net_device *dev)
-{
- BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
- list_del(&dev->poll_list);
- smp_mb__before_clear_bit();
- clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
-}
-
static inline void netif_tx_lock(struct net_device *dev)
{
spin_lock(&dev->_xmit_lock);
diff --git a/include/net/sock.h b/include/net/sock.h
index 689b886038d..dfeb8b13024 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,13 +218,13 @@ struct sock {
atomic_t sk_rmem_alloc;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
+ int sk_sndbuf;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
struct sk_buff_head sk_async_wait_queue;
int sk_wmem_queued;
int sk_forward_alloc;
gfp_t sk_allocation;
- int sk_sndbuf;
int sk_route_caps;
int sk_gso_type;
int sk_rcvlowat;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e22b4f0305a..a8af9ae0017 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -254,6 +254,12 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
return seq3 - seq2 >= seq1 - seq2;
}
+static inline int tcp_too_many_orphans(struct sock *sk, int num)
+{
+ return (num > sysctl_tcp_max_orphans) ||
+ (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
+}
extern struct proto tcp_prot;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 39ef925d39d..90185e8b335 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -237,7 +237,6 @@ extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
-#define XFRM_ACQ_EXPIRES 30
struct xfrm_tmpl;
extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d8970623c56..bd8e33582d2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2689,7 +2689,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
map = alloc_bootmem_node(pgdat, size);
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
-#ifdef CONFIG_FLATMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
* With no DISCONTIG, the global mem_map is just set as node 0's
*/
diff --git a/mm/slub.c b/mm/slub.c
index 3e5aefcb407..238c5a6ee89 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2435,6 +2435,7 @@ void __init kmem_cache_init(void)
*/
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
sizeof(struct kmem_cache_node), GFP_KERNEL);
+ kmalloc_caches[0].refcount = -1;
#endif
/* Able to allocate the per node structures */
@@ -2482,6 +2483,12 @@ static int slab_unmergeable(struct kmem_cache *s)
if (s->ctor)
return 1;
+ /*
+ * We may have set a slab to be unmergeable during bootstrap.
+ */
+ if (s->refcount < 0)
+ return 1;
+
return 0;
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 91b017016d5..3fc69729381 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -121,6 +121,7 @@ void br_fdb_cleanup(unsigned long _data)
{
struct net_bridge *br = (struct net_bridge *)_data;
unsigned long delay = hold_time(br);
+ unsigned long next_timer = jiffies + br->forward_delay;
int i;
spin_lock_bh(&br->hash_lock);
@@ -129,14 +130,21 @@ void br_fdb_cleanup(unsigned long _data)
struct hlist_node *h, *n;
hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
- if (!f->is_static &&
- time_before_eq(f->ageing_timer + delay, jiffies))
+ unsigned long this_timer;
+ if (f->is_static)
+ continue;
+ this_timer = f->ageing_timer + delay;
+ if (time_before_eq(this_timer, jiffies))
fdb_delete(f);
+ else if (this_timer < next_timer)
+ next_timer = this_timer;
}
}
spin_unlock_bh(&br->hash_lock);
- mod_timer(&br->gc_timer, jiffies + HZ/10);
+ /* Add HZ/4 to ensure we round the jiffies upwards to be after the next
+ * timer, otherwise we might round down and will have no-op run. */
+ mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4));
}
/* Completely flush all dynamic entries in forwarding database.*/
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 0e035d6162c..e38034aa56f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -178,7 +178,8 @@ void br_transmit_config(struct net_bridge_port *p)
br_send_config_bpdu(p, &bpdu);
p->topology_change_ack = 0;
p->config_pending = 0;
- mod_timer(&p->hold_timer, jiffies + BR_HOLD_TIME);
+ mod_timer(&p->hold_timer,
+ round_jiffies(jiffies + BR_HOLD_TIME));
}
}
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 24e0ca4a313..77f5255e691 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -42,7 +42,7 @@ static void br_hello_timer_expired(unsigned long arg)
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
- mod_timer(&br->hello_timer, jiffies + br->hello_time);
+ mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time));
}
spin_unlock(&br->lock);
}
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index f34aca041a2..6d5ea976204 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -25,6 +25,7 @@ extern int sysctl_core_destroy_delay;
extern u32 sysctl_xfrm_aevent_etime;
extern u32 sysctl_xfrm_aevent_rseqth;
extern int sysctl_xfrm_larval_drop;
+extern u32 sysctl_xfrm_acq_expires;
#endif
ctl_table core_table[] = {
@@ -127,6 +128,14 @@ ctl_table core_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "xfrm_acq_expires",
+ .data = &sysctl_xfrm_acq_expires,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
#endif /* CONFIG_XFRM */
#endif /* CONFIG_NET */
{
diff --git a/net/core/utils.c b/net/core/utils.c
index adecfd281ae..2030bb8c2d3 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -139,16 +139,16 @@ int in4_pton(const char *src, int srclen,
while(1) {
int c;
c = xdigit2bin(srclen > 0 ? *s : '\0', delim);
- if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM))) {
+ if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) {
goto out;
}
- if (c & (IN6PTON_DOT | IN6PTON_DELIM)) {
+ if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
if (w == 0)
goto out;
*d++ = w & 0xff;
w = 0;
i++;
- if (c & IN6PTON_DELIM) {
+ if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) {
if (i != 4)
goto out;
break;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 837f2957fa8..9ad1f6252a9 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -250,8 +250,6 @@ e_inval:
return -EINVAL;
}
-#ifndef CONFIG_IP_NOSIOCRT
-
static inline __be32 sk_extract_addr(struct sockaddr *addr)
{
return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
@@ -443,15 +441,6 @@ int ip_rt_ioctl(unsigned int cmd, void __user *arg)
return -EINVAL;
}
-#else
-
-int ip_rt_ioctl(unsigned int cmd, void *arg)
-{
- return -EINVAL;
-}
-
-#endif
-
struct nla_policy rtm_ipv4_policy[RTA_MAX+1] __read_mostly = {
[RTA_DST] = { .type = NLA_U32 },
[RTA_SRC] = { .type = NLA_U32 },
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bd4c295f5d7..766314505c0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1674,9 +1674,8 @@ adjudge_to_death:
}
if (sk->sk_state != TCP_CLOSE) {
sk_stream_mem_reclaim(sk);
- if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
- (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
+ if (tcp_too_many_orphans(sk,
+ atomic_read(sk->sk_prot->orphan_count))) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
"sockets\n");
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 3938d5dbdf2..760165a0800 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -80,7 +80,8 @@ static void printl(const char *fmt, ...)
kfifo_put(tcpw.fifo, tbuf, len);
wake_up(&tcpw.wait);
-}
+} __attribute__ ((format (printf, 1, 2)));
+
/*
* Hook inserted to be called before each receive packet.
@@ -95,7 +96,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* Only update if port matches */
if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port)
&& (full || tp->snd_cwnd != tcpw.lastcwnd)) {
- printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u\n",
+ printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u %u\n",
NIPQUAD(inet->saddr), ntohs(inet->sport),
NIPQUAD(inet->daddr), ntohs(inet->dport),
skb->len, tp->snd_nxt, tp->snd_una,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 2ca97b20929..e61340150ba 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -78,9 +78,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
if (sk->sk_err_soft)
orphans <<= 1;
- if (orphans >= sysctl_tcp_max_orphans ||
- (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
+ if (tcp_too_many_orphans(sk, orphans)) {
if (net_ratelimit())
printk(KERN_INFO "Out of socket memory\n");
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 5ceca951d73..fa1902dc81b 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -139,10 +139,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
nf_reset(skb);
if (decaps) {
- if (!(skb->dev->flags&IFF_LOOPBACK)) {
- dst_release(skb->dst);
- skb->dst = NULL;
- }
+ dst_release(skb->dst);
+ skb->dst = NULL;
netif_rx(skb);
return 0;
} else {
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index a2f2e6a5ec5..9963700e74c 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -85,6 +85,8 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
+ skb->protocol = htons(ETH_P_IP);
+
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
return 0;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index b696c840120..128f94c79c6 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -247,7 +247,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
memcpy(tmp_base, top_iph, sizeof(tmp_base));
tmp_ext = NULL;
- extlen = skb_transport_offset(skb) + sizeof(struct ipv6hdr);
+ extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr);
if (extlen) {
extlen += sizeof(*tmp_ext);
tmp_ext = kmalloc(extlen, GFP_ATOMIC);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ca08ee88d07..662a7d9681f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -619,14 +619,6 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
ins = &fn->leaf;
- if (fn->fn_flags&RTN_TL_ROOT &&
- fn->leaf == &ip6_null_entry &&
- !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){
- fn->leaf = rt;
- rt->u.dst.rt6_next = NULL;
- goto out;
- }
-
for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
/*
* Search for duplicates
@@ -666,7 +658,6 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
* insert node
*/
-out:
rt->u.dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index d7ed8aa56ec..c858537cec4 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -104,10 +104,8 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi)
nf_reset(skb);
if (decaps) {
- if (!(skb->dev->flags&IFF_LOOPBACK)) {
- dst_release(skb->dst);
- skb->dst = NULL;
- }
+ dst_release(skb->dst);
+ skb->dst = NULL;
netif_rx(skb);
return -1;
} else {
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index a6c0cdf46ad..9fc95bc6509 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -80,6 +80,7 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT);
ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
+ skb->protocol = htons(ETH_P_IPV6);
return 0;
}
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 6e36df67f8d..4e84f24fd43 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -2474,6 +2474,8 @@ static int ieee80211_open(struct net_device *dev)
if (sdata->type == IEEE80211_IF_TYPE_STA &&
!local->user_space_mlme)
netif_carrier_off(dev);
+ else
+ netif_carrier_on(dev);
netif_start_queue(dev);
return 0;
@@ -3278,8 +3280,10 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
return TXRX_DROP;
}
}
- while ((skb = __skb_dequeue(&entry->skb_list)))
+ while ((skb = __skb_dequeue(&entry->skb_list))) {
memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
+ dev_kfree_skb(skb);
+ }
/* Complete frame has been reassembled - process it now */
rx->fragmented = 1;
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index 3e07e9d6fa4..9f30ae4c2ab 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -1155,6 +1155,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev,
if (status_code != WLAN_STATUS_SUCCESS) {
printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
dev->name, status_code);
+ if (status_code == WLAN_STATUS_REASSOC_NO_ASSOC)
+ ifsta->prev_bssid_set = 0;
return;
}
@@ -2995,7 +2997,7 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct sta_info *sta;
- struct ieee80211_sub_if_data *sdata = NULL;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
/* TODO: Could consider removing the least recently used entry and
* allow new one to be added. */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 02e401cd683..f8b83014ccc 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -83,22 +83,6 @@
#include <net/inet_common.h>
#endif
-#define CONFIG_SOCK_PACKET 1
-
-/*
- Proposed replacement for SIOC{ADD,DEL}MULTI and
- IFF_PROMISC, IFF_ALLMULTI flags.
-
- It is more expensive, but I believe,
- it is really correct solution: reentereble, safe and fault tolerant.
-
- IFF_PROMISC/IFF_ALLMULTI/SIOC{ADD/DEL}MULTI are faked by keeping
- reference count and global flag, so that real status is
- (gflag|(count != 0)), so that we can use obsolete faulty interface
- not harming clever users.
- */
-#define CONFIG_PACKET_MULTICAST 1
-
/*
Assumptions:
- if device has no dev->hard_header routine, it adds and removes ll header
@@ -159,7 +143,6 @@ static atomic_t packet_socks_nr;
/* Private packet socket structures. */
-#ifdef CONFIG_PACKET_MULTICAST
struct packet_mclist
{
struct packet_mclist *next;
@@ -179,7 +162,7 @@ struct packet_mreq_max
unsigned short mr_alen;
unsigned char mr_address[MAX_ADDR_LEN];
};
-#endif
+
#ifdef CONFIG_PACKET_MMAP
static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
#endif
@@ -205,9 +188,7 @@ struct packet_sock {
origdev:1;
int ifindex; /* bound device */
__be16 num;
-#ifdef CONFIG_PACKET_MULTICAST
struct packet_mclist *mclist;
-#endif
#ifdef CONFIG_PACKET_MMAP
atomic_t mapped;
unsigned int pg_vec_order;
@@ -263,7 +244,6 @@ static void packet_sock_destruct(struct sock *sk)
static const struct proto_ops packet_ops;
-#ifdef CONFIG_SOCK_PACKET
static const struct proto_ops packet_ops_spkt;
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
@@ -435,7 +415,6 @@ out_unlock:
dev_put(dev);
return err;
}
-#endif
static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
unsigned int res)
@@ -851,9 +830,7 @@ static int packet_release(struct socket *sock)
__sock_put(sk);
}
-#ifdef CONFIG_PACKET_MULTICAST
packet_flush_mclist(sk);
-#endif
#ifdef CONFIG_PACKET_MMAP
if (po->pg_vec) {
@@ -936,8 +913,6 @@ out_unlock:
* Bind a packet socket to a device
*/
-#ifdef CONFIG_SOCK_PACKET
-
static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk=sock->sk;
@@ -960,7 +935,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
}
return err;
}
-#endif
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
@@ -1012,11 +986,8 @@ static int packet_create(struct socket *sock, int protocol)
if (!capable(CAP_NET_RAW))
return -EPERM;
- if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW
-#ifdef CONFIG_SOCK_PACKET
- && sock->type != SOCK_PACKET
-#endif
- )
+ if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
+ sock->type != SOCK_PACKET)
return -ESOCKTNOSUPPORT;
sock->state = SS_UNCONNECTED;
@@ -1027,10 +998,9 @@ static int packet_create(struct socket *sock, int protocol)
goto out;
sock->ops = &packet_ops;
-#ifdef CONFIG_SOCK_PACKET
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;
-#endif
+
sock_init_data(sock, sk);
po = pkt_sk(sk);
@@ -1046,10 +1016,10 @@ static int packet_create(struct socket *sock, int protocol)
spin_lock_init(&po->bind_lock);
po->prot_hook.func = packet_rcv;
-#ifdef CONFIG_SOCK_PACKET
+
if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;
-#endif
+
po->prot_hook.af_packet_priv = sk;
if (proto) {
@@ -1169,7 +1139,6 @@ out:
return err;
}
-#ifdef CONFIG_SOCK_PACKET
static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
@@ -1190,7 +1159,6 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
return 0;
}
-#endif
static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
@@ -1221,7 +1189,6 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
return 0;
}
-#ifdef CONFIG_PACKET_MULTICAST
static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what)
{
switch (i->type) {
@@ -1349,7 +1316,6 @@ static void packet_flush_mclist(struct sock *sk)
}
rtnl_unlock();
}
-#endif
static int
packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
@@ -1362,7 +1328,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return -ENOPROTOOPT;
switch(optname) {
-#ifdef CONFIG_PACKET_MULTICAST
case PACKET_ADD_MEMBERSHIP:
case PACKET_DROP_MEMBERSHIP:
{
@@ -1383,7 +1348,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
ret = packet_mc_drop(sk, &mreq);
return ret;
}
-#endif
+
#ifdef CONFIG_PACKET_MMAP
case PACKET_RX_RING:
{
@@ -1506,11 +1471,10 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
switch (msg) {
case NETDEV_UNREGISTER:
-#ifdef CONFIG_PACKET_MULTICAST
if (po->mclist)
packet_dev_mclist(dev, po->mclist, -1);
- // fallthrough
-#endif
+ /* fallthrough */
+
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
spin_lock(&po->bind_lock);
@@ -1856,7 +1820,6 @@ out:
#endif
-#ifdef CONFIG_SOCK_PACKET
static const struct proto_ops packet_ops_spkt = {
.family = PF_PACKET,
.owner = THIS_MODULE,
@@ -1877,7 +1840,6 @@ static const struct proto_ops packet_ops_spkt = {
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
-#endif
static const struct proto_ops packet_ops = {
.family = PF_PACKET,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b8bab89616a..64a375178c5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -26,10 +26,11 @@
#include <net/xfrm.h>
#include <net/ip.h>
#include <linux/audit.h>
+#include <linux/cache.h>
#include "xfrm_hash.h"
-int sysctl_xfrm_larval_drop;
+int sysctl_xfrm_larval_drop __read_mostly;
DEFINE_MUTEX(xfrm_cfg_mutex);
EXPORT_SYMBOL(xfrm_cfg_mutex);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9955ff4da0a..372f06eb8bb 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -21,18 +21,21 @@
#include <linux/cache.h>
#include <asm/uaccess.h>
#include <linux/audit.h>
+#include <linux/cache.h>
#include "xfrm_hash.h"
struct sock *xfrm_nl;
EXPORT_SYMBOL(xfrm_nl);
-u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME;
+u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
-u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE;
+u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
+u32 sysctl_xfrm_acq_expires __read_mostly = 30;
+
/* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
@@ -622,8 +625,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
hlist_add_head(&x->byspi, xfrm_state_byspi+h);
}
- x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
- x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
+ x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
+ x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
add_timer(&x->timer);
xfrm_state_num++;
xfrm_hash_grow_check(x->bydst.next != NULL);
@@ -772,9 +775,9 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
x->props.family = family;
x->props.mode = mode;
x->props.reqid = reqid;
- x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
+ x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
xfrm_state_hold(x);
- x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
+ x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
add_timer(&x->timer);
hlist_add_head(&x->bydst, xfrm_state_bydst+h);
h = xfrm_src_hash(daddr, saddr, family);