aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/hw_random.txt59
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c11
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c8
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/sparc64/kernel/ds.c3
-rw-r--r--arch/sparc64/kernel/head.S8
-rw-r--r--arch/sparc64/kernel/process.c3
-rw-r--r--arch/sparc64/kernel/smp.c17
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c3
-rw-r--r--arch/sparc64/kernel/trampoline.S188
-rw-r--r--arch/sparc64/mm/init.c38
-rw-r--r--arch/x86/mm/ioremap.c6
-rw-r--r--drivers/char/hw_random/Kconfig9
-rw-r--r--drivers/connector/cn_queue.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/net/bnx2x.c36
-rw-r--r--drivers/net/fec_mpc52xx_phy.c3
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--include/asm-sparc64/hvtramp.h2
-rw-r--r--include/asm-sparc64/spitfire.h2
-rw-r--r--include/asm-x86/io_32.h6
-rw-r--r--include/asm-x86/io_64.h6
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--kernel/acct.c23
-rw-r--r--kernel/marker.c31
-rw-r--r--kernel/printk.c83
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--lib/iomap.c2
-rw-r--r--mm/bootmem.c25
-rw-r--r--mm/vmscan.c27
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/atm/clip.c19
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/socket.c7
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--security/smack/smackfs.c35
42 files changed, 328 insertions, 385 deletions
diff --git a/Documentation/hw_random.txt b/Documentation/hw_random.txt
index bb58c36b584..690f52550c8 100644
--- a/Documentation/hw_random.txt
+++ b/Documentation/hw_random.txt
@@ -1,33 +1,26 @@
- Hardware driver for Intel/AMD/VIA Random Number Generators (RNG)
- Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
- Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
-
Introduction:
- The hw_random device driver is software that makes use of a
+ The hw_random framework is software that makes use of a
special hardware feature on your CPU or motherboard,
- a Random Number Generator (RNG).
+ a Random Number Generator (RNG). The software has two parts:
+ a core providing the /dev/hw_random character device and its
+ sysfs support, plus a hardware-specific driver that plugs
+ into that core.
- In order to make effective use of this device driver, you
+ To make the most effective use of these mechanisms, you
should download the support software as well. Download the
latest version of the "rng-tools" package from the
hw_random driver's official Web site:
http://sourceforge.net/projects/gkernel/
-About the Intel RNG hardware, from the firmware hub datasheet:
-
- The Firmware Hub integrates a Random Number Generator (RNG)
- using thermal noise generated from inherently random quantum
- mechanical properties of silicon. When not generating new random
- bits the RNG circuitry will enter a low power state. Intel will
- provide a binary software driver to give third party software
- access to our RNG for use as a security feature. At this time,
- the RNG is only to be used with a system in an OS-present state.
+ Those tools use /dev/hw_random to fill the kernel entropy pool,
+ which is used internally and exported by the /dev/urandom and
+ /dev/random special files.
Theory of operation:
- Character driver. Using the standard open()
+ CHARACTER DEVICE. Using the standard open()
and read() system calls, you can read random data from
the hardware RNG device. This data is NOT CHECKED by any
fitness tests, and could potentially be bogus (if the
@@ -36,9 +29,37 @@ Theory of operation:
a security-conscious person would run fitness tests on the
data before assuming it is truly random.
- /dev/hwrandom is char device major 10, minor 183.
+ The rng-tools package uses such tests in "rngd", and lets you
+ run them by hand with a "rngtest" utility.
+
+ /dev/hw_random is char device major 10, minor 183.
+
+ CLASS DEVICE. There is a /sys/class/misc/hw_random node with
+ two unique attributes, "rng_available" and "rng_current". The
+ "rng_available" attribute lists the hardware-specific drivers
+ available, while "rng_current" lists the one which is currently
+ connected to /dev/hw_random. If your system has more than one
+ RNG available, you may change the one used by writing a name from
+ the list in "rng_available" into "rng_current".
+
+==========================================================================
+
+ Hardware driver for Intel/AMD/VIA Random Number Generators (RNG)
+ Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+
+
+About the Intel RNG hardware, from the firmware hub datasheet:
+
+ The Firmware Hub integrates a Random Number Generator (RNG)
+ using thermal noise generated from inherently random quantum
+ mechanical properties of silicon. When not generating new random
+ bits the RNG circuitry will enter a low power state. Intel will
+ provide a binary software driver to give third party software
+ access to our RNG for use as a security feature. At this time,
+ the RNG is only to be used with a system in an OS-present state.
-Driver notes:
+Intel RNG Driver notes:
* FIXME: support poll(2)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 650b0d8aa89..508e2a2c986 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1130,6 +1130,10 @@ and is between 256 and 4096 characters. It is defined in the file
memmap=nn[KMG]$ss[KMG]
[KNL,ACPI] Mark specific memory as reserved.
Region of memory to be used, from ss to ss+nn.
+ Example: Exclude memory from 0x18690000-0x1869ffff
+ memmap=64K$0x18690000
+ or
+ memmap=0x10000$0x18690000
meye.*= [HW] Set MotionEye Camera parameters
See Documentation/video4linux/meye.txt.
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 590f1f67c87..a83dfa3cf40 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -351,9 +351,14 @@ static void __init htab_init_page_sizes(void)
mmu_vmalloc_psize = MMU_PAGE_64K;
if (mmu_linear_psize == MMU_PAGE_4K)
mmu_linear_psize = MMU_PAGE_64K;
- if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
- mmu_io_psize = MMU_PAGE_64K;
- else
+ if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
+ /*
+ * Don't use 64k pages for ioremap on pSeries, since
+ * that would stop us accessing the HEA ethernet.
+ */
+ if (!machine_is(pseries))
+ mmu_io_psize = MMU_PAGE_64K;
+ } else
mmu_ci_restrictions = 1;
}
#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index f589999361e..64ec7d62936 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -52,6 +52,10 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
int i, tasknum = -1;
struct bcom_task *tsk;
+ /* Don't try to do anything if bestcomm init failed */
+ if (!bcom_eng)
+ return NULL;
+
/* Get and reserve a task num */
spin_lock(&bcom_eng->lock);
@@ -484,8 +488,8 @@ mpc52xx_bcom_remove(struct of_device *op)
}
static struct of_device_id mpc52xx_bcom_of_match[] = {
- { .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", },
- { .type = "dma-controller", .compatible = "mpc5200-bestcomm", },
+ { .compatible = "fsl,mpc5200-bestcomm", },
+ { .compatible = "mpc5200-bestcomm", },
{},
};
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index ae0dbf4c1d6..0f2dfb0aaa6 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -906,7 +906,7 @@ static int __init init_ipic_sysfs(void)
{
int rc;
- if (!primary_ipic->regs)
+ if (!primary_ipic || !primary_ipic->regs)
return -ENODEV;
printk(KERN_DEBUG "Registering ipic with sysfs...\n");
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index bd76482077b..edb74f5a118 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -972,8 +972,7 @@ static void process_ds_work(void)
LIST_HEAD(todo);
spin_lock_irqsave(&ds_lock, flags);
- list_splice(&ds_work_list, &todo);
- INIT_LIST_HEAD(&ds_work_list);
+ list_splice_init(&ds_work_list, &todo);
spin_unlock_irqrestore(&ds_lock, flags);
list_for_each_entry_safe(qp, tmp, &todo, list) {
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 44b105c04dd..34f8ff57c56 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -288,8 +288,12 @@ sun4v_chip_type:
/* Leave arg2 as-is, prom_mmu_ihandle_cache */
mov -1, %l3
stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
- sethi %hi(8 * 1024 * 1024), %l3
- stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB)
+ /* 4MB align the kernel image size. */
+ set (_end - KERNBASE), %l3
+ set ((4 * 1024 * 1024) - 1), %l4
+ add %l3, %l4, %l3
+ andn %l3, %l4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB)
sethi %hi(KERNBASE), %l3
stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index e116e38b160..acf8c5250aa 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
}
out:
return error;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index cc454731d87..5a1126b363a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -284,14 +284,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
- extern int bigkernel;
struct hvtramp_descr *hdesc;
unsigned long trampoline_ra;
struct trap_per_cpu *tb;
u64 tte_vaddr, tte_data;
unsigned long hv_err;
+ int i;
- hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
+ hdesc = kzalloc(sizeof(*hdesc) +
+ (sizeof(struct hvtramp_mapping) *
+ num_kernel_image_mappings - 1),
+ GFP_KERNEL);
if (!hdesc) {
printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
"hvtramp_descr.\n");
@@ -299,7 +302,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
}
hdesc->cpu = cpu;
- hdesc->num_mappings = (bigkernel ? 2 : 1);
+ hdesc->num_mappings = num_kernel_image_mappings;
tb = &trap_block[cpu];
tb->hdesc = hdesc;
@@ -312,13 +315,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
tte_vaddr = (unsigned long) KERNBASE;
tte_data = kern_locked_tte_data;
- hdesc->maps[0].vaddr = tte_vaddr;
- hdesc->maps[0].tte = tte_data;
- if (bigkernel) {
+ for (i = 0; i < hdesc->num_mappings; i++) {
+ hdesc->maps[i].vaddr = tte_vaddr;
+ hdesc->maps[i].tte = tte_data;
tte_vaddr += 0x400000;
tte_data += 0x400000;
- hdesc->maps[1].vaddr = tte_vaddr;
- hdesc->maps[1].tte = tte_data;
}
trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index deaba2bd053..2455fa49887 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs)
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
}
out:
return error;
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 4ae2e525d68..56ff5521134 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -105,7 +105,7 @@ startup_continue:
wr %g2, 0, %tick_cmpr
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
- * We lock 2 consequetive entries if we are 'bigkernel'.
+ * We lock 'num_kernel_image_mappings' consequetive entries.
*/
sethi %hi(prom_entry_lock), %g2
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
@@ -119,6 +119,29 @@ startup_continue:
add %l2, -(192 + 128), %sp
flushw
+ /* Setup the loop variables:
+ * %l3: VADDR base
+ * %l4: TTE base
+ * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
+ * %l6: Number of TTE entries to map
+ * %l7: Highest TTE entry number, we count down
+ */
+ sethi %hi(KERNBASE), %l3
+ sethi %hi(kern_locked_tte_data), %l4
+ ldx [%l4 + %lo(kern_locked_tte_data)], %l4
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+ add %l6, 1, %l6
+
+ mov 15, %l7
+ BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
+
+ mov 63, %l7
+2:
+
+3:
+ /* Lock into I-MMU */
sethi %hi(call_method), %g2
or %g2, %lo(call_method), %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
@@ -132,63 +155,26 @@ startup_continue:
sethi %hi(prom_mmu_ihandle_cache), %g2
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
-
- mov 15, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
- mov 63, %g2
-1:
- stx %g2, [%sp + 2047 + 128 + 0x38]
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x08], %o1
- call %o1
- add %sp, (2047 + 128), %o0
+ /* Each TTE maps 4MB, convert index to offset. */
+ sllx %l5, 22, %g1
- sethi %hi(bigkernel), %g2
- lduw [%g2 + %lo(bigkernel)], %g2
- brz,pt %g2, do_dtlb
- nop
+ add %l3, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
+ add %l4, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
- sethi %hi(call_method), %g2
- or %g2, %lo(call_method), %g2
- stx %g2, [%sp + 2047 + 128 + 0x00]
- mov 5, %g2
- stx %g2, [%sp + 2047 + 128 + 0x08]
- mov 1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x10]
- sethi %hi(itlb_load), %g2
- or %g2, %lo(itlb_load), %g2
- stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(prom_mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE + 0x400000), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- sethi %hi(0x400000), %g1
- add %g2, %g1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
-
- mov 14, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
-
- mov 62, %g2
-1:
+ /* TTE index is highest minus loop index. */
+ sub %l7, %l5, %g2
stx %g2, [%sp + 2047 + 128 + 0x38]
+
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
-do_dtlb:
+ /* Lock into D-MMU */
sethi %hi(call_method), %g2
or %g2, %lo(call_method), %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
@@ -202,65 +188,30 @@ do_dtlb:
sethi %hi(prom_mmu_ihandle_cache), %g2
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
- mov 15, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+ /* Each TTE maps 4MB, convert index to offset. */
+ sllx %l5, 22, %g1
- mov 63, %g2
-1:
+ add %l3, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
+ add %l4, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
+ /* TTE index is highest minus loop index. */
+ sub %l7, %l5, %g2
stx %g2, [%sp + 2047 + 128 + 0x38]
+
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
- sethi %hi(bigkernel), %g2
- lduw [%g2 + %lo(bigkernel)], %g2
- brz,pt %g2, do_unlock
+ add %l5, 1, %l5
+ cmp %l5, %l6
+ bne,pt %xcc, 3b
nop
- sethi %hi(call_method), %g2
- or %g2, %lo(call_method), %g2
- stx %g2, [%sp + 2047 + 128 + 0x00]
- mov 5, %g2
- stx %g2, [%sp + 2047 + 128 + 0x08]
- mov 1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x10]
- sethi %hi(dtlb_load), %g2
- or %g2, %lo(dtlb_load), %g2
- stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(prom_mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE + 0x400000), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- sethi %hi(0x400000), %g1
- add %g2, %g1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
-
- mov 14, %g2
- BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
-
- mov 62, %g2
-1:
-
- stx %g2, [%sp + 2047 + 128 + 0x38]
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x08], %o1
- call %o1
- add %sp, (2047 + 128), %o0
-
-do_unlock:
sethi %hi(prom_entry_lock), %g2
stb %g0, [%g2 + %lo(prom_entry_lock)]
membar #StoreStore | #StoreLoad
@@ -269,47 +220,36 @@ do_unlock:
nop
niagara_lock_tlb:
+ sethi %hi(KERNBASE), %l3
+ sethi %hi(kern_locked_tte_data), %l4
+ ldx [%l4 + %lo(kern_locked_tte_data)], %l4
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+ add %l6, 1, %l6
+
+1:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE), %o0
+ sllx %l5, 22, %g2
+ add %l3, %g2, %o0
clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ add %l4, %g2, %o2
mov HV_MMU_IMMU, %o3
ta HV_FAST_TRAP
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE), %o0
+ sllx %l5, 22, %g2
+ add %l3, %g2, %o0
clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
+ add %l4, %g2, %o2
mov HV_MMU_DMMU, %o3
ta HV_FAST_TRAP
- sethi %hi(bigkernel), %g2
- lduw [%g2 + %lo(bigkernel)], %g2
- brz,pt %g2, after_lock_tlb
+ add %l5, 1, %l5
+ cmp %l5, %l6
+ bne,pt %xcc, 1b
nop
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE + 0x400000), %o0
- clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
- sethi %hi(0x400000), %o3
- add %o2, %o3, %o2
- mov HV_MMU_IMMU, %o3
- ta HV_FAST_TRAP
-
- mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
- sethi %hi(KERNBASE + 0x400000), %o0
- clr %o1
- sethi %hi(kern_locked_tte_data), %o2
- ldx [%o2 + %lo(kern_locked_tte_data)], %o2
- sethi %hi(0x400000), %o3
- add %o2, %o3, %o2
- mov HV_MMU_DMMU, %o3
- ta HV_FAST_TRAP
-
after_lock_tlb:
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
wr %g0, 0, %fprs
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b5c30416fda..466fd6cffac 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -166,7 +166,7 @@ unsigned long sparc64_kern_pri_context __read_mostly;
unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
unsigned long sparc64_kern_sec_context __read_mostly;
-int bigkernel = 0;
+int num_kernel_image_mappings;
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_t dcpage_flushes = ATOMIC_INIT(0);
@@ -572,7 +572,7 @@ static unsigned long kern_large_tte(unsigned long paddr);
static void __init remap_kernel(void)
{
unsigned long phys_page, tte_vaddr, tte_data;
- int tlb_ent = sparc64_highest_locked_tlbent();
+ int i, tlb_ent = sparc64_highest_locked_tlbent();
tte_vaddr = (unsigned long) KERNBASE;
phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
@@ -582,27 +582,20 @@ static void __init remap_kernel(void)
/* Now lock us into the TLBs via Hypervisor or OBP. */
if (tlb_type == hypervisor) {
- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
- hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
- if (bigkernel) {
- tte_vaddr += 0x400000;
- tte_data += 0x400000;
+ for (i = 0; i < num_kernel_image_mappings; i++) {
hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
+ tte_vaddr += 0x400000;
+ tte_data += 0x400000;
}
} else {
- prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
- prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
- if (bigkernel) {
- tlb_ent -= 1;
- prom_dtlb_load(tlb_ent,
- tte_data + 0x400000,
- tte_vaddr + 0x400000);
- prom_itlb_load(tlb_ent,
- tte_data + 0x400000,
- tte_vaddr + 0x400000);
+ for (i = 0; i < num_kernel_image_mappings; i++) {
+ prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
+ prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
+ tte_vaddr += 0x400000;
+ tte_data += 0x400000;
}
- sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+ sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
}
if (tlb_type == cheetah_plus) {
sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
@@ -1352,12 +1345,9 @@ void __init paging_init(void)
shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
real_end = (unsigned long)_end;
- if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
- bigkernel = 1;
- if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
- prom_printf("paging_init: Kernel > 8MB, too large.\n");
- prom_halt();
- }
+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+ printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
+ num_kernel_image_mappings);
/* Set kernel pgd to upper alias so physical page computations
* work.
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 8fe576baa14..4afaba0ed72 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -106,7 +106,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
enum ioremap_mode mode)
{
unsigned long pfn, offset, last_addr, vaddr;
@@ -193,13 +193,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
*
* Must be freed with iounmap.
*/
-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
{
return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
}
EXPORT_SYMBOL(ioremap_nocache);
-void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
return __ioremap(phys_addr, size, IOR_MODE_CACHED);
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 6bbd4fa50f3..8d6c2089d2a 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -9,7 +9,14 @@ config HW_RANDOM
Hardware Random Number Generator Core infrastructure.
To compile this driver as a module, choose M here: the
- module will be called rng-core.
+ module will be called rng-core. This provides a device
+ that's usually called /dev/hw_random, and which exposes one
+ of possibly several hardware random number generators.
+
+ These hardware random number generators do not feed directly
+ into the kernel's random number generator. That is usually
+ handled by the "rngd" daemon. Documentation/hw_random.txt
+ has more information.
If unsure, say Y.
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 5732ca3259f..b6fe7e7a2c2 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -146,7 +146,7 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
dev->nls = nls;
- dev->cn_queue = create_workqueue(dev->name);
+ dev->cn_queue = create_singlethread_workqueue(dev->name);
if (!dev->cn_queue) {
kfree(dev);
return NULL;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index eeaaa9dce6e..ad1880c6751 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -408,7 +408,6 @@ static int block2mtd_setup2(const char *val)
if (token[1]) {
ret = parse_num(&erase_size, token[1]);
if (ret) {
- kfree(name);
parse_err("illegal erase size");
}
}
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c
index 8af142ccf37..de32b3fba32 100644
--- a/drivers/net/bnx2x.c
+++ b/drivers/net/bnx2x.c
@@ -63,8 +63,8 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
-#define DRV_MODULE_VERSION "1.40.22"
-#define DRV_MODULE_RELDATE "2007/11/27"
+#define DRV_MODULE_VERSION "1.42.3"
+#define DRV_MODULE_RELDATE "2008/3/9"
#define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung. */
@@ -8008,38 +8008,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
- switch (cmd->port) {
- case PORT_TP:
- if (!(bp->supported & SUPPORTED_TP)) {
- DP(NETIF_MSG_LINK, "TP not supported\n");
- return -EINVAL;
- }
-
- if (bp->phy_flags & PHY_XGXS_FLAG) {
- bnx2x_link_reset(bp);
- bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
- bnx2x_phy_deassert(bp);
- }
- break;
-
- case PORT_FIBRE:
- if (!(bp->supported & SUPPORTED_FIBRE)) {
- DP(NETIF_MSG_LINK, "FIBRE not supported\n");
- return -EINVAL;
- }
-
- if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
- bnx2x_link_reset(bp);
- bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
- bnx2x_phy_deassert(bp);
- }
- break;
-
- default:
- DP(NETIF_MSG_LINK, "Unknown port type\n");
- return -EINVAL;
- }
-
if (cmd->autoneg == AUTONEG_ENABLE) {
if (!(bp->supported & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 1837584c450..6a3ac4ea97e 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -109,7 +109,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
int irq = irq_of_parse_and_map(child, 0);
if (irq != NO_IRQ) {
const u32 *id = of_get_property(child, "reg", NULL);
- bus->irq[*id] = irq;
+ if (id)
+ bus->irq[*id] = irq;
}
}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 97212799c51..4291458955e 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -912,7 +912,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
* rx ring - must call napi_disable(), which
* schedule_timeout()'s if polling is already disabled.
*/
- work_done += gem_rx(gp, budget);
+ work_done += gem_rx(gp, budget - work_done);
if (work_done >= budget)
return work_done;
diff --git a/include/asm-sparc64/hvtramp.h b/include/asm-sparc64/hvtramp.h
index c7dd6ad056d..b2b9b947b3a 100644
--- a/include/asm-sparc64/hvtramp.h
+++ b/include/asm-sparc64/hvtramp.h
@@ -16,7 +16,7 @@ struct hvtramp_descr {
__u64 fault_info_va;
__u64 fault_info_pa;
__u64 thread_reg;
- struct hvtramp_mapping maps[2];
+ struct hvtramp_mapping maps[1];
};
extern void hv_cpu_startup(unsigned long hvdescr_pa);
diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h
index 63b7040e813..985ea7e3199 100644
--- a/include/asm-sparc64/spitfire.h
+++ b/include/asm-sparc64/spitfire.h
@@ -63,6 +63,8 @@ extern void cheetah_enable_pcache(void);
SPITFIRE_HIGHEST_LOCKED_TLBENT : \
CHEETAH_HIGHEST_LOCKED_TLBENT)
+extern int num_kernel_image_mappings;
+
/* The data cache is write through, so this just invalidates the
* specified line.
*/
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 58d2c45cd0b..d4d8fbd9378 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -114,13 +114,13 @@ static inline void * phys_to_virt(unsigned long address)
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);
-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
/*
* The default ioremap() behavior is non-cached:
*/
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index f64a59cc396..db0be2011a3 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -158,13 +158,13 @@ extern void early_iounmap(void *addr, unsigned long size);
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);
-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
/*
* The default ioremap() behavior is non-cached:
*/
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 57ed3e323d9..ea806732b08 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -389,7 +389,7 @@ void sctp_v6_del_protocol(void);
#else /* #ifdef defined(CONFIG_IPV6) */
-static inline void sctp_v6_pf_init(void) { return 0; }
+static inline void sctp_v6_pf_init(void) { return; }
static inline void sctp_v6_pf_exit(void) { return; }
static inline int sctp_v6_protosw_init(void) { return 0; }
static inline void sctp_v6_protosw_exit(void) { return; }
diff --git a/kernel/acct.c b/kernel/acct.c
index 521dfa53cb9..91e1cfd734d 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -58,6 +58,7 @@
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <linux/blkdev.h> /* sector_div */
+#include <linux/pid_namespace.h>
/*
* These constants control the amount of freespace that suspend and
@@ -74,7 +75,7 @@ int acct_parm[3] = {4, 2, 30};
/*
* External references and all of the globals.
*/
-static void do_acct_process(struct file *);
+static void do_acct_process(struct pid_namespace *ns, struct file *);
/*
* This structure is used so that all the data protected by lock
@@ -86,6 +87,7 @@ struct acct_glbs {
volatile int active;
volatile int needcheck;
struct file *file;
+ struct pid_namespace *ns;
struct timer_list timer;
};
@@ -175,9 +177,11 @@ out:
static void acct_file_reopen(struct file *file)
{
struct file *old_acct = NULL;
+ struct pid_namespace *old_ns = NULL;
if (acct_globals.file) {
old_acct = acct_globals.file;
+ old_ns = acct_globals.ns;
del_timer(&acct_globals.timer);
acct_globals.active = 0;
acct_globals.needcheck = 0;
@@ -185,6 +189,7 @@ static void acct_file_reopen(struct file *file)
}
if (file) {
acct_globals.file = file;
+ acct_globals.ns = get_pid_ns(task_active_pid_ns(current));
acct_globals.needcheck = 0;
acct_globals.active = 1;
/* It's been deleted if it was used before so this is safe */
@@ -196,8 +201,9 @@ static void acct_file_reopen(struct file *file)
if (old_acct) {
mnt_unpin(old_acct->f_path.mnt);
spin_unlock(&acct_globals.lock);
- do_acct_process(old_acct);
+ do_acct_process(old_ns, old_acct);
filp_close(old_acct, NULL);
+ put_pid_ns(old_ns);
spin_lock(&acct_globals.lock);
}
}
@@ -419,7 +425,7 @@ static u32 encode_float(u64 value)
/*
* do_acct_process does all actual work. Caller holds the reference to file.
*/
-static void do_acct_process(struct file *file)
+static void do_acct_process(struct pid_namespace *ns, struct file *file)
{
struct pacct_struct *pacct = &current->signal->pacct;
acct_t ac;
@@ -481,8 +487,10 @@ static void do_acct_process(struct file *file)
ac.ac_gid16 = current->gid;
#endif
#if ACCT_VERSION==3
- ac.ac_pid = current->tgid;
- ac.ac_ppid = current->real_parent->tgid;
+ ac.ac_pid = task_tgid_nr_ns(current, ns);
+ rcu_read_lock();
+ ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns);
+ rcu_read_unlock();
#endif
spin_lock_irq(&current->sighand->siglock);
@@ -578,6 +586,7 @@ void acct_collect(long exitcode, int group_dead)
void acct_process(void)
{
struct file *file = NULL;
+ struct pid_namespace *ns;
/*
* accelerate the common fastpath:
@@ -592,8 +601,10 @@ void acct_process(void)
return;
}
get_file(file);
+ ns = get_pid_ns(acct_globals.ns);
spin_unlock(&acct_globals.lock);
- do_acct_process(file);
+ do_acct_process(ns, file);
fput(file);
+ put_pid_ns(ns);
}
diff --git a/kernel/marker.c b/kernel/marker.c
index 48a4ea5afff..041c33e3e95 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -104,18 +104,18 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
char ptype;
/*
- * disabling preemption to make sure the teardown of the callbacks can
- * be done correctly when they are in modules and they insure RCU read
- * coherency.
+ * preempt_disable does two things : disabling preemption to make sure
+ * the teardown of the callbacks can be done correctly when they are in
+ * modules and they insure RCU read coherency.
*/
preempt_disable();
- ptype = ACCESS_ONCE(mdata->ptype);
+ ptype = mdata->ptype;
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
* so we put an explicit smp_rmb() here. */
smp_rmb();
- func = ACCESS_ONCE(mdata->single.func);
+ func = mdata->single.func;
/* Must read the ptr before private data. They are not data
* dependant, so we put an explicit smp_rmb() here. */
smp_rmb();
@@ -133,7 +133,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
* in the fast path, so put the explicit barrier here.
*/
smp_read_barrier_depends();
- multi = ACCESS_ONCE(mdata->multi);
+ multi = mdata->multi;
for (i = 0; multi[i].func; i++) {
va_start(args, fmt);
multi[i].func(multi[i].probe_private, call_private, fmt,
@@ -161,13 +161,13 @@ void marker_probe_cb_noarg(const struct marker *mdata,
char ptype;
preempt_disable();
- ptype = ACCESS_ONCE(mdata->ptype);
+ ptype = mdata->ptype;
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
* so we put an explicit smp_rmb() here. */
smp_rmb();
- func = ACCESS_ONCE(mdata->single.func);
+ func = mdata->single.func;
/* Must read the ptr before private data. They are not data
* dependant, so we put an explicit smp_rmb() here. */
smp_rmb();
@@ -183,7 +183,7 @@ void marker_probe_cb_noarg(const struct marker *mdata,
* in the fast path, so put the explicit barrier here.
*/
smp_read_barrier_depends();
- multi = ACCESS_ONCE(mdata->multi);
+ multi = mdata->multi;
for (i = 0; multi[i].func; i++)
multi[i].func(multi[i].probe_private, call_private, fmt,
&args);
@@ -551,9 +551,9 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
/*
* Disable a marker and its probe callback.
- * Note: only after a synchronize_sched() issued after setting elem->call to the
- * empty function insures that the original callback is not used anymore. This
- * insured by preemption disabling around the call site.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by preempt_disable around the call site.
*/
static void disable_marker(struct marker *elem)
{
@@ -565,8 +565,8 @@ static void disable_marker(struct marker *elem)
elem->ptype = 0; /* single probe */
/*
* Leave the private data and id there, because removal is racy and
- * should be done only after a synchronize_sched(). These are never used
- * until the next initialization anyway.
+ * should be done only after an RCU period. These are never used until
+ * the next initialization anyway.
*/
}
@@ -601,9 +601,6 @@ void marker_update_probe_range(struct marker *begin,
/*
* Update probes, removing the faulty probes.
- * Issues a synchronize_sched() when no reference to the module passed
- * as parameter is found in the probes so the probe module can be
- * safely unloaded from now on.
*
* Internal callback only changed before the first probe is connected to it.
* Single probe private data can only be changed on 0 -> 1 and 2 -> 1
diff --git a/kernel/printk.c b/kernel/printk.c
index 9adc2a473e6..c46a20a19a1 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -616,6 +616,40 @@ asmlinkage int printk(const char *fmt, ...)
/* cpu currently holding logbuf_lock */
static volatile unsigned int printk_cpu = UINT_MAX;
+/*
+ * Can we actually use the console at this time on this cpu?
+ *
+ * Console drivers may assume that per-cpu resources have
+ * been allocated. So unless they're explicitly marked as
+ * being able to cope (CON_ANYTIME) don't call them until
+ * this CPU is officially up.
+ */
+static inline int can_use_console(unsigned int cpu)
+{
+ return cpu_online(cpu) || have_callable_console();
+}
+
+/*
+ * Try to get console ownership to actually show the kernel
+ * messages from a 'printk'. Return true (and with the
+ * console_semaphore held, and 'console_locked' set) if it
+ * is successful, false otherwise.
+ *
+ * This gets called with the 'logbuf_lock' spinlock held and
+ * interrupts disabled. It should return with 'lockbuf_lock'
+ * released but interrupts still disabled.
+ */
+static int acquire_console_semaphore_for_printk(unsigned int cpu)
+{
+ int retval = 0;
+
+ if (can_use_console(cpu))
+ retval = !try_acquire_console_sem();
+ printk_cpu = UINT_MAX;
+ spin_unlock(&logbuf_lock);
+ return retval;
+}
+
const char printk_recursion_bug_msg [] =
KERN_CRIT "BUG: recent printk recursion!\n";
static int printk_recursion_bug;
@@ -725,43 +759,22 @@ asmlinkage int vprintk(const char *fmt, va_list args)
log_level_unknown = 1;
}
- if (!down_trylock(&console_sem)) {
- /*
- * We own the drivers. We can drop the spinlock and
- * let release_console_sem() print the text, maybe ...
- */
- console_locked = 1;
- printk_cpu = UINT_MAX;
- spin_unlock(&logbuf_lock);
+ /*
+ * Try to acquire and then immediately release the
+ * console semaphore. The release will do all the
+ * actual magic (print out buffers, wake up klogd,
+ * etc).
+ *
+ * The acquire_console_semaphore_for_printk() function
+ * will release 'logbuf_lock' regardless of whether it
+ * actually gets the semaphore or not.
+ */
+ if (acquire_console_semaphore_for_printk(this_cpu))
+ release_console_sem();
- /*
- * Console drivers may assume that per-cpu resources have
- * been allocated. So unless they're explicitly marked as
- * being able to cope (CON_ANYTIME) don't call them until
- * this CPU is officially up.
- */
- if (cpu_online(smp_processor_id()) || have_callable_console()) {
- console_may_schedule = 0;
- release_console_sem();
- } else {
- /* Release by hand to avoid flushing the buffer. */
- console_locked = 0;
- up(&console_sem);
- }
- lockdep_on();
- raw_local_irq_restore(flags);
- } else {
- /*
- * Someone else owns the drivers. We drop the spinlock, which
- * allows the semaphore holder to proceed and to call the
- * console drivers with the output which we just produced.
- */
- printk_cpu = UINT_MAX;
- spin_unlock(&logbuf_lock);
- lockdep_on();
+ lockdep_on();
out_restore_irqs:
- raw_local_irq_restore(flags);
- }
+ raw_local_irq_restore(flags);
preempt_enable();
return printed_len;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 671af612b76..a3fa587c350 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -191,8 +191,12 @@ static void change_clocksource(void)
tick_clock_notify();
+ /*
+ * We're holding xtime lock and waking up klogd would deadlock
+ * us on enqueue. So no printing!
printk(KERN_INFO "Time: %s clocksource has been installed.\n",
clock->name);
+ */
}
#else
static inline void change_clocksource(void) { }
diff --git a/lib/iomap.c b/lib/iomap.c
index db004a9ff50..dd6ca48fe6b 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioport_unmap);
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
- unsigned long start = pci_resource_start(dev, bar);
+ resource_size_t start = pci_resource_start(dev, bar);
unsigned long len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index f6ff4337b42..2ccea700968 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -125,6 +125,7 @@ static int __init reserve_bootmem_core(bootmem_data_t *bdata,
BUG_ON(!size);
BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
+ BUG_ON(addr < bdata->node_boot_start);
sidx = PFN_DOWN(addr - bdata->node_boot_start);
eidx = PFN_UP(addr + size - bdata->node_boot_start);
@@ -156,21 +157,31 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
unsigned long sidx, eidx;
unsigned long i;
+ BUG_ON(!size);
+
+ /* out range */
+ if (addr + size < bdata->node_boot_start ||
+ PFN_DOWN(addr) > bdata->node_low_pfn)
+ return;
/*
* round down end of usable mem, partially free pages are
* considered reserved.
*/
- BUG_ON(!size);
- BUG_ON(PFN_DOWN(addr + size) > bdata->node_low_pfn);
- if (addr < bdata->last_success)
+ if (addr >= bdata->node_boot_start && addr < bdata->last_success)
bdata->last_success = addr;
/*
- * Round up the beginning of the address.
+ * Round up to index to the range.
*/
- sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
+ if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
+ sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
+ else
+ sidx = 0;
+
eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
+ if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
+ eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
for (i = sidx; i < eidx; i++) {
if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
@@ -421,7 +432,9 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
void __init free_bootmem(unsigned long addr, unsigned long size)
{
- free_bootmem_core(NODE_DATA(0)->bdata, addr, size);
+ bootmem_data_t *bdata;
+ list_for_each_entry(bdata, &bdata_list, list)
+ free_bootmem_core(bdata, addr, size);
}
unsigned long __init free_all_bootmem(void)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 45711585684..4046434046e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -70,13 +70,6 @@ struct scan_control {
int order;
- /*
- * Pages that have (or should have) IO pending. If we run into
- * a lot of these, we're better off waiting a little for IO to
- * finish rather than scanning more pages in the VM.
- */
- int nr_io_pages;
-
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
@@ -512,10 +505,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page);
- else {
- sc->nr_io_pages++;
+ else
goto keep_locked;
- }
}
referenced = page_referenced(page, 1, sc->mem_cgroup);
@@ -554,10 +545,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageDirty(page)) {
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked;
- if (!may_enter_fs) {
- sc->nr_io_pages++;
+ if (!may_enter_fs)
goto keep_locked;
- }
if (!sc->may_writepage)
goto keep_locked;
@@ -568,10 +557,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
- if (PageWriteback(page) || PageDirty(page)) {
- sc->nr_io_pages++;
+ if (PageWriteback(page) || PageDirty(page))
goto keep;
- }
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
@@ -1344,7 +1331,6 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc->nr_scanned = 0;
- sc->nr_io_pages = 0;
if (!priority)
disable_swap_token();
nr_reclaimed += shrink_zones(priority, zones, sc);
@@ -1379,8 +1365,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
}
/* Take a nap, wait for some writeback to complete */
- if (sc->nr_scanned && priority < DEF_PRIORITY - 2 &&
- sc->nr_io_pages > sc->swap_cluster_max)
+ if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
congestion_wait(WRITE, HZ/10);
}
/* top priority shrink_caches still had more to do? don't OOM, then */
@@ -1514,7 +1499,6 @@ loop_again:
if (!priority)
disable_swap_token();
- sc.nr_io_pages = 0;
all_zones_ok = 1;
/*
@@ -1607,8 +1591,7 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2 &&
- sc.nr_io_pages > sc.swap_cluster_max)
+ if (total_scanned && priority < DEF_PRIORITY - 2)
congestion_wait(WRITE, HZ/10);
/*
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 1aa9d517539..4e8d4e724b9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -861,7 +861,6 @@ static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
static void p9_mux_flush_cb(struct p9_req *freq, void *a)
{
- p9_conn_req_callback cb;
int tag;
struct p9_conn *m;
struct p9_req *req, *rreq, *rptr;
@@ -872,7 +871,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a)
freq->tcall->params.tflush.oldtag);
spin_lock(&m->lock);
- cb = NULL;
tag = freq->tcall->params.tflush.oldtag;
req = NULL;
list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
diff --git a/net/atm/clip.c b/net/atm/clip.c
index d30167c0b48..2ab1e36098f 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -947,6 +947,8 @@ static const struct file_operations arp_seq_fops = {
};
#endif
+static void atm_clip_exit_noproc(void);
+
static int __init atm_clip_init(void)
{
neigh_table_init_no_netlink(&clip_tbl);
@@ -963,18 +965,22 @@ static int __init atm_clip_init(void)
struct proc_dir_entry *p;
p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
+ if (!p) {
+ printk(KERN_ERR "Unable to initialize "
+ "/proc/net/atm/arp\n");
+ atm_clip_exit_noproc();
+ return -ENOMEM;
+ }
}
#endif
return 0;
}
-static void __exit atm_clip_exit(void)
+static void atm_clip_exit_noproc(void)
{
struct net_device *dev, *next;
- remove_proc_entry("arp", atm_proc_root);
-
unregister_inetaddr_notifier(&clip_inet_notifier);
unregister_netdevice_notifier(&clip_dev_notifier);
@@ -1005,6 +1011,13 @@ static void __exit atm_clip_exit(void)
clip_tbl_hook = NULL;
}
+static void __exit atm_clip_exit(void)
+{
+ remove_proc_entry("arp", atm_proc_root);
+
+ atm_clip_exit_noproc();
+}
+
module_init(atm_clip_init);
module_exit(atm_clip_exit);
MODULE_AUTHOR("Werner Almesberger");
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 0e450d12f03..a2efa7ff41f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1250,6 +1250,10 @@ static int __init lane_module_init(void)
struct proc_dir_entry *p;
p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
+ if (!p) {
+ printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n");
+ return -ENOMEM;
+ }
#endif
register_atm_ioctl(&lane_ioctl_ops);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1ff446d0fa8..f6cdc012eec 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -177,10 +177,13 @@ static inline struct tnode *node_parent_rcu(struct node *node)
return rcu_dereference(ret);
}
+/* Same as rcu_assign_pointer
+ * but that macro() assumes that value is a pointer.
+ */
static inline void node_set_parent(struct node *node, struct tnode *ptr)
{
- rcu_assign_pointer(node->parent,
- (unsigned long)ptr | NODE_TYPE(node));
+ smp_wmb();
+ node->parent = (unsigned long)ptr | NODE_TYPE(node);
}
static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a2e92f9709d..3b2e5adca83 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -568,7 +568,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
- net = skb->dev->nd_net;
+ net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net;
/* Start by cleaning up the memory. */
if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
ip_evictor(net);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 071e83a894a..39b629ac240 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -735,7 +735,7 @@ new_segment:
if (!(psize -= copy))
goto out;
- if (skb->len < mss_now || (flags & MSG_OOB))
+ if (skb->len < size_goal || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
@@ -981,7 +981,7 @@ new_segment:
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
- if (skb->len < mss_now || (flags & MSG_OOB))
+ if (skb->len < size_goal || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0d33a7d3212..51557c27a0c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1420,7 +1420,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
u8 *opt;
int rd_len;
int err;
- int hlen;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
dev = skb->dev;
@@ -1491,7 +1490,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
return;
}
- hlen = 0;
skb_reserve(buff, LL_RESERVED_SPACE(dev));
ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 795c761ad99..66148cc4759 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -711,9 +711,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
*/
static psched_time_t htb_do_events(struct htb_sched *q, int level)
{
- int i;
-
- for (i = 0; i < 500; i++) {
+ /* don't run for longer than 2 jiffies; 2 is used instead of
+ 1 to simplify things when jiffy is going to be incremented
+ too soon */
+ unsigned long stop_at = jiffies + 2;
+ while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
struct rb_node *p = rb_first(&q->wait_pq[level]);
@@ -731,9 +733,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
- if (net_ratelimit())
- printk(KERN_WARNING "htb: too many events !\n");
- return q->now + PSCHED_TICKS_PER_SEC / 10;
+ /* too much load - let's continue on next jiffie */
+ return q->now + PSCHED_TICKS_PER_SEC / HZ;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
diff --git a/net/socket.c b/net/socket.c
index b6d35cd72a5..9d3fbfbc853 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -909,11 +909,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!dlci_ioctl_hook)
request_module("dlci");
- if (dlci_ioctl_hook) {
- mutex_lock(&dlci_ioctl_mutex);
+ mutex_lock(&dlci_ioctl_mutex);
+ if (dlci_ioctl_hook)
err = dlci_ioctl_hook(cmd, argp);
- mutex_unlock(&dlci_ioctl_mutex);
- }
+ mutex_unlock(&dlci_ioctl_mutex);
break;
default:
err = sock->ops->ioctl(sock, cmd, arg);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index ab54a736486..971271602dd 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -237,14 +237,12 @@ static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt,
static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
{
-#ifdef RDMA_TRANSPORT_IWARP
if ((RDMA_TRANSPORT_IWARP ==
rdma_node_get_transport(xprt->sc_cm_id->
device->node_type))
&& sge_count > 1)
return 1;
else
-#endif
return min_t(int, sge_count, xprt->sc_max_sge);
}
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index afe7c9b0732..cfae8afcc26 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -74,11 +74,6 @@ struct smk_list_entry *smack_list;
#define SEQ_READ_FINISHED 1
/*
- * Disable concurrent writing open() operations
- */
-static struct semaphore smack_write_sem;
-
-/*
* Values for parsing cipso rules
* SMK_DIGITLEN: Length of a digit field in a rule.
* SMK_CIPSOMIN: Minimum possible cipso rule length.
@@ -168,32 +163,7 @@ static struct seq_operations load_seq_ops = {
*/
static int smk_open_load(struct inode *inode, struct file *file)
{
- if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return seq_open(file, &load_seq_ops);
-
- if (down_interruptible(&smack_write_sem))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-/**
- * smk_release_load - release() for /smack/load
- * @inode: inode structure representing file
- * @file: "load" file pointer
- *
- * For a reading session, use the seq_file release
- * implementation.
- * Otherwise, we are at the end of a writing session so
- * clean everything up.
- */
-static int smk_release_load(struct inode *inode, struct file *file)
-{
- if ((file->f_flags & O_ACCMODE) == O_RDONLY)
- return seq_release(inode, file);
-
- up(&smack_write_sem);
- return 0;
+ return seq_open(file, &load_seq_ops);
}
/**
@@ -341,7 +311,7 @@ static const struct file_operations smk_load_ops = {
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_load,
- .release = smk_release_load,
+ .release = seq_release,
};
/**
@@ -1011,7 +981,6 @@ static int __init init_smk_fs(void)
}
}
- sema_init(&smack_write_sem, 1);
smk_cipso_doi();
smk_unlbl_ambient(NULL);