aboutsummaryrefslogtreecommitdiff
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig57
-rw-r--r--init/do_mounts.c3
-rw-r--r--init/do_mounts_initrd.c1
-rw-r--r--init/initramfs.c12
-rw-r--r--init/main.c69
5 files changed, 55 insertions, 87 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 38416a199de..3b36a1d5365 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -214,6 +214,17 @@ config CPUSETS
Say N if unsure.
+config RELAY
+ bool "Kernel->user space relay support (formerly relayfs)"
+ help
+ This option enables support for relay interface support in
+ certain file systems (such as debugfs).
+ It is designed to provide an efficient mechanism for tools and
+ facilities to relay large amounts of data from kernel space to
+ user space.
+
+ If unsure, say N.
+
source "usr/Kconfig"
config UID16
@@ -354,43 +365,6 @@ config SHMEM
option replaces shmem and tmpfs with the much simpler ramfs code,
which may be appropriate on small systems without swap.
-config CC_ALIGN_FUNCTIONS
- int "Function alignment" if EMBEDDED
- default 0
- help
- Align the start of functions to the next power-of-two greater than n,
- skipping up to n bytes. For instance, 32 aligns functions
- to the next 32-byte boundary, but 24 would align to the next
- 32-byte boundary only if this can be done by skipping 23 bytes or less.
- Zero means use compiler's default.
-
-config CC_ALIGN_LABELS
- int "Label alignment" if EMBEDDED
- default 0
- help
- Align all branch targets to a power-of-two boundary, skipping
- up to n bytes like ALIGN_FUNCTIONS. This option can easily
- make code slower, because it must insert dummy operations for
- when the branch target is reached in the usual flow of the code.
- Zero means use compiler's default.
-
-config CC_ALIGN_LOOPS
- int "Loop alignment" if EMBEDDED
- default 0
- help
- Align loops to a power-of-two boundary, skipping up to n bytes.
- Zero means use compiler's default.
-
-config CC_ALIGN_JUMPS
- int "Jump alignment" if EMBEDDED
- default 0
- help
- Align branch targets to a power-of-two boundary, for branch
- targets where the targets can only be reached by jumping,
- skipping up to n bytes like ALIGN_FUNCTIONS. In this case,
- no dummy operations need be executed.
- Zero means use compiler's default.
-
config SLAB
default y
bool "Use full SLAB allocator" if EMBEDDED
@@ -459,15 +433,6 @@ config MODULE_FORCE_UNLOAD
rmmod). This is mainly for kernel developers and desperate users.
If unsure, say N.
-config OBSOLETE_MODPARM
- bool
- default y
- depends on MODULES
- help
- You need this option to use module parameters on modules which
- have not been converted to the new module parameter system yet.
- If unsure, say Y.
-
config MODVERSIONS
bool "Module versioning support"
depends on MODULES
diff --git a/init/do_mounts.c b/init/do_mounts.c
index b27c1106440..adb7cad3e6e 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -19,11 +19,10 @@ extern int get_filesystem_list(char * buf);
int __initdata rd_doload; /* 1 = load RAM disk, 0 = don't load */
-int root_mountflags = MS_RDONLY | MS_VERBOSE;
+int root_mountflags = MS_RDONLY | MS_SILENT;
char * __initdata root_device_name;
static char __initdata saved_root_name[64];
-/* this is initialized in init/main.c */
dev_t ROOT_DEV;
static int __init load_ramdisk(char *str)
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index a05cabd0fd1..405f9031af8 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -56,6 +56,7 @@ static void __init handle_initrd(void)
sys_chroot(".");
mount_devfs_fs ();
+ current->flags |= PF_NOFREEZE;
pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
if (pid > 0) {
while (pid != sys_wait4(-1, NULL, 0, NULL))
diff --git a/init/initramfs.c b/init/initramfs.c
index 637344b0598..679d870d991 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -506,6 +506,7 @@ void __init populate_rootfs(void)
panic(err);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
+#ifdef CONFIG_BLK_DEV_RAM
int fd;
printk(KERN_INFO "checking if image is initramfs...");
err = unpack_to_rootfs((char *)initrd_start,
@@ -518,13 +519,22 @@ void __init populate_rootfs(void)
return;
}
printk("it isn't (%s); looks like an initrd\n", err);
- fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 700);
+ fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700);
if (fd >= 0) {
sys_write(fd, (char *)initrd_start,
initrd_end - initrd_start);
sys_close(fd);
free_initrd();
}
+#else
+ printk(KERN_INFO "Unpacking initramfs...");
+ err = unpack_to_rootfs((char *)initrd_start,
+ initrd_end - initrd_start, 0);
+ if (err)
+ panic(err);
+ printk(" done\n");
+ free_initrd();
+#endif
}
#endif
}
diff --git a/init/main.c b/init/main.c
index 4c194c47395..4a2f0898dda 100644
--- a/init/main.c
+++ b/init/main.c
@@ -306,8 +306,6 @@ static int __init rdinit_setup(char *str)
}
__setup("rdinit=", rdinit_setup);
-extern void setup_arch(char **);
-
#ifndef CONFIG_SMP
#ifdef CONFIG_X86_LOCAL_APIC
@@ -325,7 +323,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
#ifdef __GENERIC_PER_CPU
-unsigned long __per_cpu_offset[NR_CPUS];
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
@@ -333,6 +331,7 @@ static void __init setup_per_cpu_areas(void)
{
unsigned long size, i;
char *ptr;
+ unsigned long nr_possible_cpus = num_possible_cpus();
/* Copy section for each CPU (we discard the original) */
size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
@@ -340,12 +339,12 @@ static void __init setup_per_cpu_areas(void)
if (size < PERCPU_ENOUGH_ROOM)
size = PERCPU_ENOUGH_ROOM;
#endif
+ ptr = alloc_bootmem(size * nr_possible_cpus);
- ptr = alloc_bootmem(size * NR_CPUS);
-
- for (i = 0; i < NR_CPUS; i++, ptr += size) {
+ for_each_possible_cpu(i) {
__per_cpu_offset[i] = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ ptr += size;
}
}
#endif /* !__GENERIC_PER_CPU */
@@ -438,6 +437,15 @@ void __init parse_early_param(void)
* Activate the first processor.
*/
+static void __init boot_cpu_init(void)
+{
+ int cpu = smp_processor_id();
+ /* Mark the boot cpu "present", "online" etc for SMP and UP case */
+ cpu_set(cpu, cpu_online_map);
+ cpu_set(cpu, cpu_present_map);
+ cpu_set(cpu, cpu_possible_map);
+}
+
asmlinkage void __init start_kernel(void)
{
char * command_line;
@@ -447,17 +455,13 @@ asmlinkage void __init start_kernel(void)
* enable them
*/
lock_kernel();
+ boot_cpu_init();
page_address_init();
printk(KERN_NOTICE);
printk(linux_banner);
setup_arch(&command_line);
setup_per_cpu_areas();
-
- /*
- * Mark the boot cpu "online" so that it can call console drivers in
- * printk() and can access its per-cpu storage.
- */
- smp_prepare_boot_cpu();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
/*
* Set up the scheduler prior starting any interrupts (such as the
@@ -565,17 +569,23 @@ static void __init do_initcalls(void)
int count = preempt_count();
for (call = __initcall_start; call < __initcall_end; call++) {
- char *msg;
+ char *msg = NULL;
+ char msgbuf[40];
+ int result;
if (initcall_debug) {
- printk(KERN_DEBUG "Calling initcall 0x%p", *call);
- print_fn_descriptor_symbol(": %s()", (unsigned long) *call);
+ printk("Calling initcall 0x%p", *call);
+ print_fn_descriptor_symbol(": %s()",
+ (unsigned long) *call);
printk("\n");
}
- (*call)();
+ result = (*call)();
- msg = NULL;
+ if (result && (result != -ENODEV || initcall_debug)) {
+ sprintf(msgbuf, "error code %d", result);
+ msg = msgbuf;
+ }
if (preempt_count() != count) {
msg = "preemption imbalance";
preempt_count() = count;
@@ -585,8 +595,10 @@ static void __init do_initcalls(void)
local_irq_enable();
}
if (msg) {
- printk(KERN_WARNING "error in initcall at 0x%p: "
- "returned with %s\n", *call, msg);
+ printk(KERN_WARNING "initcall at 0x%p", *call);
+ print_fn_descriptor_symbol(": %s()",
+ (unsigned long) *call);
+ printk(": returned with %s\n", msg);
}
}
@@ -633,24 +645,6 @@ static void run_init_process(char *init_filename)
execve(init_filename, argv_init, envp_init);
}
-static inline void fixup_cpu_present_map(void)
-{
-#ifdef CONFIG_SMP
- int i;
-
- /*
- * If arch is not hotplug ready and did not populate
- * cpu_present_map, just make cpu_present_map same as cpu_possible_map
- * for other cpu bringup code to function as normal. e.g smp_init() etc.
- */
- if (cpus_empty(cpu_present_map)) {
- for_each_cpu(i) {
- cpu_set(i, cpu_present_map);
- }
- }
-#endif
-}
-
static int init(void * unused)
{
lock_kernel();
@@ -672,7 +666,6 @@ static int init(void * unused)
do_pre_smp_initcalls();
- fixup_cpu_present_map();
smp_init();
sched_init_smp();