aboutsummaryrefslogtreecommitdiff
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig46
-rw-r--r--init/do_mounts.c1
-rw-r--r--init/do_mounts.h1
-rw-r--r--init/do_mounts_md.c5
-rw-r--r--init/initramfs.c12
-rw-r--r--init/main.c11
6 files changed, 56 insertions, 20 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 26ac8772464..d4e9671347e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -208,6 +208,12 @@ config POSIX_MQUEUE
If unsure, say Y.
+config POSIX_MQUEUE_SYSCTL
+ bool
+ depends on POSIX_MQUEUE
+ depends on SYSCTL
+ default y
+
config BSD_PROCESS_ACCT
bool "BSD Process Accounting"
help
@@ -531,7 +537,7 @@ config CGROUP_DEVICE
config CPUSETS
bool "Cpuset support"
- depends on SMP && CGROUPS
+ depends on CGROUPS
help
This option will let you create and manage CPUSETs which
allow dynamically partitioning a system into sets of CPUs and
@@ -565,7 +571,7 @@ config CGROUP_MEM_RES_CTLR
select MM_OWNER
help
Provides a memory resource controller that manages both anonymous
- memory and page cache. (See Documentation/controllers/memory.txt)
+ memory and page cache. (See Documentation/cgroups/memory.txt)
Note that setting this option increases fixed memory overhead
associated with each page of memory in the system. By this,
@@ -597,6 +603,8 @@ config CGROUP_MEM_RES_CTLR_SWAP
is disabled by boot option, this will be automatically disabled and
there will be no overhead from this. Even when you set this config=y,
if boot option "noswapaccount" is set, swap will not be accounted.
+ Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
+ size is 4096bytes, 512k per 1Gbytes of swap.
endif # CGROUPS
@@ -668,10 +676,10 @@ config UTS_NS
config IPC_NS
bool "IPC namespace"
- depends on NAMESPACES && SYSVIPC
+ depends on NAMESPACES && (SYSVIPC || POSIX_MQUEUE)
help
In this namespace tasks work with IPC ids which correspond to
- different IPC objects in different namespaces
+ different IPC objects in different namespaces.
config USER_NS
bool "User namespace (EXPERIMENTAL)"
@@ -687,7 +695,7 @@ config PID_NS
depends on NAMESPACES && EXPERIMENTAL
help
Support process id namespaces. This allows having multiple
- process with the same pid as long as they are in different
+ processes with the same pid as long as they are in different
pid namespaces. This is a building block of containers.
Unless you want to work with an experimental feature
@@ -800,6 +808,14 @@ config KALLSYMS_EXTRA_PASS
you wait for kallsyms to be fixed.
+config STRIP_ASM_SYMS
+ bool "Strip assembler-generated symbols during link"
+ default n
+ help
+ Strip internal assembler-generated symbols during a link (symbols
+ that look like '.Lxxx') so they don't pollute the output of
+ get_wchan() and suchlike.
+
config HOTPLUG
bool "Support for hot-pluggable devices" if EMBEDDED
default y
@@ -952,7 +968,7 @@ config COMPAT_BRK
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
This option changes the bootup default to heap randomization
- disabled, and can be overriden runtime by setting
+ disabled, and can be overridden at runtime by setting
/proc/sys/kernel/randomize_va_space to 2.
On non-ancient distros (post-2000 ones) N is usually a safe choice.
@@ -1005,13 +1021,27 @@ config TRACEPOINTS
config MARKERS
bool "Activate markers"
- depends on TRACEPOINTS
+ select TRACEPOINTS
help
Place an empty function call at each marker site. Can be
dynamically changed for a probe function.
source "arch/Kconfig"
+config SLOW_WORK
+ default n
+ bool
+ help
+ The slow work thread pool provides a number of dynamically allocated
+ threads that can be used by the kernel to perform operations that
+ take a relatively long time.
+
+ An example of this would be CacheFiles doing a path lookup followed
+ by a series of mkdirs and a create call, all of which have to touch
+ disk.
+
+ See Documentation/slow-work.txt.
+
endmenu # General setup
config HAVE_GENERIC_DMA_COHERENT
@@ -1110,7 +1140,7 @@ config INIT_ALL_POSSIBLE
cpu_possible_map, some of them chose to initialize cpu_possible_map
with all 1s, and others with all 0s. When they were centralised,
it was better to provide this option than to break all the archs
- and have several arch maintainers persuing me down dark alleys.
+ and have several arch maintainers pursuing me down dark alleys.
config STOP_MACHINE
bool
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 8d4ff5afc1d..dd7ee5f203f 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/initrd.h>
#include <linux/async.h>
+#include <linux/fs_struct.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
diff --git a/init/do_mounts.h b/init/do_mounts.h
index 9aa968d5432..f5b978a9bb9 100644
--- a/init/do_mounts.h
+++ b/init/do_mounts.h
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 9bdddbcb3d6..69aebbf8fd2 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,5 +1,6 @@
#include <linux/delay.h>
-#include <linux/raid/md.h>
+#include <linux/raid/md_u.h>
+#include <linux/raid/md_p.h>
#include "do_mounts.h"
@@ -112,8 +113,6 @@ static int __init md_setup(char *str)
return 1;
}
-#define MdpMinorShift 6
-
static void __init md_setup_drive(void)
{
int minor, i, ent, partitioned;
diff --git a/init/initramfs.c b/init/initramfs.c
index 619c1baf770..9ee7b781041 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -310,7 +310,8 @@ static int __init do_name(void)
if (wfd >= 0) {
sys_fchown(wfd, uid, gid);
sys_fchmod(wfd, mode);
- sys_ftruncate(wfd, body_len);
+ if (body_len)
+ sys_ftruncate(wfd, body_len);
vcollected = kstrdup(collected, GFP_KERNEL);
state = CopyFile;
}
@@ -515,6 +516,7 @@ skip:
initrd_end = 0;
}
+#ifdef CONFIG_BLK_DEV_RAM
#define BUF_SIZE 1024
static void __init clean_rootfs(void)
{
@@ -561,6 +563,7 @@ static void __init clean_rootfs(void)
sys_close(fd);
kfree(buf);
}
+#endif
static int __init populate_rootfs(void)
{
@@ -571,11 +574,11 @@ static int __init populate_rootfs(void)
if (initrd_start) {
#ifdef CONFIG_BLK_DEV_RAM
int fd;
- printk(KERN_INFO "checking if image is initramfs...");
+ printk(KERN_INFO "checking if image is initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
initrd_end - initrd_start);
if (!err) {
- printk(" it is\n");
+ printk(KERN_INFO "rootfs image is initramfs; unpacking...\n");
free_initrd();
return 0;
} else {
@@ -583,7 +586,8 @@ static int __init populate_rootfs(void)
unpack_to_rootfs(__initramfs_start,
__initramfs_end - __initramfs_start);
}
- printk("it isn't (%s); looks like an initrd\n", err);
+ printk(KERN_INFO "rootfs image is not initramfs (%s)"
+ "; looks like an initrd\n", err);
fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
if (fd >= 0) {
sys_write(fd, (char *)initrd_start,
diff --git a/init/main.c b/init/main.c
index 6bf83afd654..3585f073d63 100644
--- a/init/main.c
+++ b/init/main.c
@@ -71,6 +71,7 @@
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
+#include <trace/kmemtrace.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
@@ -407,8 +408,7 @@ static void __init smp_init(void)
* Set up the current CPU as possible to migrate to.
* The other ones will be done by cpu_up/cpu_down()
*/
- cpu = smp_processor_id();
- cpu_set(cpu, cpu_active_map);
+ set_cpu_active(smp_processor_id(), true);
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
@@ -649,6 +649,7 @@ asmlinkage void __init start_kernel(void)
enable_debug_pagealloc();
cpu_hotplug_init();
kmem_cache_init();
+ kmemtrace_init();
debug_objects_mem_init();
idr_init_cache();
setup_per_cpu_pageset();
@@ -770,6 +771,7 @@ static void __init do_basic_setup(void)
{
rcu_init_sched(); /* needed by module_init stage. */
init_workqueues();
+ cpuset_init_smp();
usermodehelper_init();
driver_init();
init_irq_proc();
@@ -794,6 +796,7 @@ static void run_init_process(char *init_filename)
* makes it inline to init() and it becomes part of init.text section
*/
static noinline int init_post(void)
+ __releases(kernel_lock)
{
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
@@ -842,7 +845,7 @@ static int __init kernel_init(void * unused)
/*
* init can run on any cpu.
*/
- set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
+ set_cpus_allowed_ptr(current, cpu_all_mask);
/*
* Tell the world that we're going to be the grim
* reaper of innocent orphaned children.
@@ -863,8 +866,6 @@ static int __init kernel_init(void * unused)
smp_init();
sched_init_smp();
- cpuset_init_smp();
-
do_basic_setup();
/*