aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt4
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/wait.c2
5 files changed, 10 insertions, 10 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 0b46a5dff4c..c64ce9c1420 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -23,7 +23,7 @@ config PREEMPT_VOLUNTARY
"explicit preemption points" to the kernel code. These new
preemption points have been selected to reduce the maximum
latency of rescheduling, providing faster application reactions,
- at the cost of slighly lower throughput.
+ at the cost of slightly lower throughput.
This allows reaction to interactive events by allowing a
low priority process to voluntarily preempt itself even if it
@@ -43,7 +43,7 @@ config PREEMPT
even if it is in kernel mode executing a system call and would
otherwise not be about to reach a natural preemption point.
This allows applications to run more 'smoothly' even when the
- system is under load, at the cost of slighly lower throughput
+ system is under load, at the cost of slightly lower throughput
and a slight runtime overhead to kernel code.
Select this if you are building a kernel for a desktop or
diff --git a/kernel/module.c b/kernel/module.c
index d36e45477fa..9bd93de01f4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -96,9 +96,9 @@ static inline void add_taint_module(struct module *mod, unsigned flag)
mod->taints |= flag;
}
-/* A thread that wants to hold a reference to a module only while it
- * is running can call ths to safely exit.
- * nfsd and lockd use this.
+/*
+ * A thread that wants to hold a reference to a module only while it
+ * is running can call this to safely exit. nfsd and lockd use this.
*/
void __module_put_and_exit(struct module *mod, long code)
{
@@ -1199,7 +1199,7 @@ static int __unlink_module(void *_mod)
return 0;
}
-/* Free a module, remove from lists, etc (must hold module mutex). */
+/* Free a module, remove from lists, etc (must hold module_mutex). */
static void free_module(struct module *mod)
{
/* Delete from various lists */
@@ -1246,7 +1246,7 @@ EXPORT_SYMBOL_GPL(__symbol_get);
/*
* Ensure that an exported symbol [global namespace] does not already exist
- * in the Kernel or in some other modules exported symbol table.
+ * in the kernel or in some other module's exported symbol table.
*/
static int verify_export_symbols(struct module *mod)
{
diff --git a/kernel/relay.c b/kernel/relay.c
index 61a504900ea..4311101b0ca 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -310,7 +310,7 @@ static struct rchan_callbacks default_channel_callbacks = {
/**
* wakeup_readers - wake up readers waiting on a channel
- * @data: contains the the channel buffer
+ * @data: contains the channel buffer
*
* This is the timer function used to defer reader waking.
*/
diff --git a/kernel/sys.c b/kernel/sys.c
index d4985df21b6..cdb7e9457ba 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1352,7 +1352,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
}
/*
- * Samma på svenska..
+ * Samma på svenska..
*/
asmlinkage long sys_setfsgid(gid_t gid)
{
diff --git a/kernel/wait.c b/kernel/wait.c
index 59a82f63275..444ddbfaefc 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(remove_wait_queue);
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
- * loads to move into the the critical region).
+ * loads to move into the critical region).
*/
void fastcall
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)