aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/capability.c113
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/hrtimer.c9
-rw-r--r--kernel/latency.c280
-rw-r--r--kernel/pm_qos_params.c425
-rw-r--r--kernel/posix-timers.c9
-rw-r--r--kernel/power/disk.c4
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/signal.c75
-rw-r--r--kernel/sys.c15
-rw-r--r--kernel/sys_ni.c7
-rw-r--r--kernel/sysctl.c53
-rw-r--r--kernel/sysctl_check.c7
15 files changed, 606 insertions, 422 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index db9af707ff5..135a1b94344 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,8 +8,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
- utsname.o notifier.o ksysfs.o
+ hrtimer.o rwsem.o nsproxy.o srcu.o \
+ utsname.o notifier.o ksysfs.o pm_qos_params.o
obj-$(CONFIG_SYSCTL) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/capability.c b/kernel/capability.c
index efbd9cdce13..39e8193b41e 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -22,6 +22,37 @@
static DEFINE_SPINLOCK(task_capability_lock);
/*
+ * Leveraged for setting/resetting capabilities
+ */
+
+const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET;
+const kernel_cap_t __cap_full_set = CAP_FULL_SET;
+const kernel_cap_t __cap_init_eff_set = CAP_INIT_EFF_SET;
+
+EXPORT_SYMBOL(__cap_empty_set);
+EXPORT_SYMBOL(__cap_full_set);
+EXPORT_SYMBOL(__cap_init_eff_set);
+
+/*
+ * More recent versions of libcap are available from:
+ *
+ * http://www.kernel.org/pub/linux/libs/security/linux-privs/
+ */
+
+static void warn_legacy_capability_use(void)
+{
+ static int warned;
+ if (!warned) {
+ char name[sizeof(current->comm)];
+
+ printk(KERN_INFO "warning: `%s' uses 32-bit capabilities"
+ " (legacy support in use)\n",
+ get_task_comm(name, current));
+ warned = 1;
+ }
+}
+
+/*
* For sys_getproccap() and sys_setproccap(), any of the three
* capability set pointers may be NULL -- indicating that that set is
* uninteresting and/or not to be changed.
@@ -42,12 +73,21 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
pid_t pid;
__u32 version;
struct task_struct *target;
- struct __user_cap_data_struct data;
+ unsigned tocopy;
+ kernel_cap_t pE, pI, pP;
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -71,14 +111,47 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
} else
target = current;
- ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
+ ret = security_capget(target, &pE, &pI, &pP);
out:
read_unlock(&tasklist_lock);
spin_unlock(&task_capability_lock);
- if (!ret && copy_to_user(dataptr, &data, sizeof data))
- return -EFAULT;
+ if (!ret) {
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i;
+
+ for (i = 0; i < tocopy; i++) {
+ kdata[i].effective = pE.cap[i];
+ kdata[i].permitted = pP.cap[i];
+ kdata[i].inheritable = pI.cap[i];
+ }
+
+ /*
+ * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
+ * we silently drop the upper capabilities here. This
+ * has the effect of making older libcap
+ * implementations implicitly drop upper capability
+ * bits when they perform a: capget/modify/capset
+ * sequence.
+ *
+ * This behavior is considered fail-safe
+ * behavior. Upgrading the application to a newer
+ * version of libcap will enable access to the newer
+ * capabilities.
+ *
+ * An alternative would be to return an error here
+ * (-ERANGE), but that causes legacy applications to
+ * unexpectidly fail; the capget/modify/capset aborts
+ * before modification is attempted and the application
+ * fails.
+ */
+
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+ }
+ }
return ret;
}
@@ -167,6 +240,8 @@ static inline int cap_set_all(kernel_cap_t *effective,
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i, tocopy;
kernel_cap_t inheritable, permitted, effective;
__u32 version;
struct task_struct *target;
@@ -176,7 +251,15 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -188,10 +271,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
return -EPERM;
- if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
- copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
- copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
+ if (copy_from_user(&kdata, data, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
return -EFAULT;
+ }
+
+ for (i = 0; i < tocopy; i++) {
+ effective.cap[i] = kdata[i].effective;
+ permitted.cap[i] = kdata[i].permitted;
+ inheritable.cap[i] = kdata[i].inheritable;
+ }
+ while (i < _LINUX_CAPABILITY_U32S) {
+ effective.cap[i] = 0;
+ permitted.cap[i] = 0;
+ inheritable.cap[i] = 0;
+ i++;
+ }
spin_lock(&task_capability_lock);
read_lock(&tasklist_lock);
diff --git a/kernel/exit.c b/kernel/exit.c
index 9e459fefda7..9d3d0f0b27d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1083,11 +1083,12 @@ do_group_exit(int exit_code)
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
- if (sig->flags & SIGNAL_GROUP_EXIT)
+ if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
+ sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
diff --git a/kernel/fork.c b/kernel/fork.c
index 05e0b6f4365..2b55b74cd99 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -325,7 +325,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
static inline void mm_free_pgd(struct mm_struct * mm)
{
- pgd_free(mm->pgd);
+ pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
@@ -1118,6 +1118,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_SECURITY
p->security = NULL;
#endif
+ p->cap_bset = current->cap_bset;
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1450,6 +1451,23 @@ long do_fork(unsigned long clone_flags,
int trace = 0;
long nr;
+ /*
+ * We hope to recycle these flags after 2.6.26
+ */
+ if (unlikely(clone_flags & CLONE_STOPPED)) {
+ static int __read_mostly count = 100;
+
+ if (count > 0 && printk_ratelimit()) {
+ char comm[TASK_COMM_LEN];
+
+ count--;
+ printk(KERN_INFO "fork(): process `%s' used deprecated "
+ "clone flags 0x%lx\n",
+ get_task_comm(comm, current),
+ clone_flags & CLONE_STOPPED);
+ }
+ }
+
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1069998fe25..668f3967eb3 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -306,7 +306,7 @@ EXPORT_SYMBOL_GPL(ktime_sub_ns);
/*
* Divide a ktime value by a nanosecond value
*/
-unsigned long ktime_divns(const ktime_t kt, s64 div)
+u64 ktime_divns(const ktime_t kt, s64 div)
{
u64 dclc, inc, dns;
int sft = 0;
@@ -321,7 +321,7 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
dclc >>= sft;
do_div(dclc, (unsigned long) div);
- return (unsigned long) dclc;
+ return dclc;
}
#endif /* BITS_PER_LONG >= 64 */
@@ -656,10 +656,9 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
* Forward the timer expiry so it will expire in the future.
* Returns the number of overruns.
*/
-unsigned long
-hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
- unsigned long orun = 1;
+ u64 orun = 1;
ktime_t delta;
delta = ktime_sub(now, timer->expires);
diff --git a/kernel/latency.c b/kernel/latency.c
deleted file mode 100644
index e63fcacb61a..00000000000
--- a/kernel/latency.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * latency.c: Explicit system-wide latency-expectation infrastructure
- *
- * The purpose of this infrastructure is to allow device drivers to set
- * latency constraint they have and to collect and summarize these
- * expectations globally. The cummulated result can then be used by
- * power management and similar users to make decisions that have
- * tradoffs with a latency component.
- *
- * An example user of this are the x86 C-states; each higher C state saves
- * more power, but has a higher exit latency. For the idle loop power
- * code to make a good decision which C-state to use, information about
- * acceptable latencies is required.
- *
- * An example announcer of latency is an audio driver that knowns it
- * will get an interrupt when the hardware has 200 usec of samples
- * left in the DMA buffer; in that case the driver can set a latency
- * constraint of, say, 150 usec.
- *
- * Multiple drivers can each announce their maximum accepted latency,
- * to keep these appart, a string based identifier is used.
- *
- *
- * (C) Copyright 2006 Intel Corporation
- * Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/latency.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/jiffies.h>
-#include <asm/atomic.h>
-
-struct latency_info {
- struct list_head list;
- int usecs;
- char *identifier;
-};
-
-/*
- * locking rule: all modifications to current_max_latency and
- * latency_list need to be done while holding the latency_lock.
- * latency_lock needs to be taken _irqsave.
- */
-static atomic_t current_max_latency;
-static DEFINE_SPINLOCK(latency_lock);
-
-static LIST_HEAD(latency_list);
-static BLOCKING_NOTIFIER_HEAD(latency_notifier);
-
-/*
- * This function returns the maximum latency allowed, which
- * happens to be the minimum of all maximum latencies on the
- * list.
- */
-static int __find_max_latency(void)
-{
- int min = INFINITE_LATENCY;
- struct latency_info *info;
-
- list_for_each_entry(info, &latency_list, list) {
- if (info->usecs < min)
- min = info->usecs;
- }
- return min;
-}
-
-/**
- * set_acceptable_latency - sets the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function sleeps and can only be called from process
- * context.
- * Calling this function with an existing identifier is valid
- * and will cause the existing latency setting to be changed.
- */
-void set_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *info, *iter;
- unsigned long flags;
- int found_old = 0;
-
- info = kzalloc(sizeof(struct latency_info), GFP_KERNEL);
- if (!info)
- return;
- info->usecs = usecs;
- info->identifier = kstrdup(identifier, GFP_KERNEL);
- if (!info->identifier)
- goto free_info;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier)==0) {
- found_old = 1;
- iter->usecs = usecs;
- break;
- }
- }
- if (!found_old)
- list_add(&info->list, &latency_list);
-
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
-
- spin_unlock_irqrestore(&latency_lock, flags);
-
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-
- /*
- * if we inserted the new one, we're done; otherwise there was
- * an existing one so we need to free the redundant data
- */
- if (!found_old)
- return;
-
- kfree(info->identifier);
-free_info:
- kfree(info);
-}
-EXPORT_SYMBOL_GPL(set_acceptable_latency);
-
-/**
- * modify_acceptable_latency - changes the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- *
- * Due to the atomic nature of this function, the modified latency
- * value will only be used for future decisions; past decisions
- * can still lead to longer latencies in the near future.
- */
-void modify_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *iter;
- unsigned long flags;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- iter->usecs = usecs;
- break;
- }
- }
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(modify_acceptable_latency);
-
-/**
- * remove_acceptable_latency - removes the maximum latency acceptable
- * @identifier: string that identifies this driver
- *
- * This function removes a previously set maximum latency setting
- * for the driver and frees up any resources associated with the
- * bookkeeping needed for this.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- */
-void remove_acceptable_latency(char *identifier)
-{
- unsigned long flags;
- int newmax = 0;
- struct latency_info *iter, *temp;
-
- spin_lock_irqsave(&latency_lock, flags);
-
- list_for_each_entry_safe(iter, temp, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- list_del(&iter->list);
- newmax = iter->usecs;
- kfree(iter->identifier);
- kfree(iter);
- break;
- }
- }
-
- /* If we just deleted the system wide value, we need to
- * recalculate with a full search
- */
- if (newmax == atomic_read(&current_max_latency)) {
- newmax = __find_max_latency();
- atomic_set(&current_max_latency, newmax);
- }
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(remove_acceptable_latency);
-
-/**
- * system_latency_constraint - queries the system wide latency maximum
- *
- * This function returns the system wide maximum latency in
- * microseconds.
- *
- * This function does not sleep and can be called in any context.
- */
-int system_latency_constraint(void)
-{
- return atomic_read(&current_max_latency);
-}
-EXPORT_SYMBOL_GPL(system_latency_constraint);
-
-/**
- * synchronize_acceptable_latency - recalculates all latency decisions
- *
- * This function will cause a callback to various kernel pieces that
- * will make those pieces rethink their latency decisions. This implies
- * that if there are overlong latencies in hardware state already, those
- * latencies get taken right now. When this call completes no overlong
- * latency decisions should be active anymore.
- *
- * Typical usecase of this is after a modify_acceptable_latency() call,
- * which in itself is non-blocking and non-synchronizing.
- *
- * This function blocks and should not be called with locks held.
- */
-
-void synchronize_acceptable_latency(void)
-{
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-}
-EXPORT_SYMBOL_GPL(synchronize_acceptable_latency);
-
-/*
- * Latency notifier: this notifier gets called when a non-atomic new
- * latency value gets set. The expectation nof the caller of the
- * non-atomic set is that when the call returns, future latencies
- * are within bounds, so the functions on the notifier list are
- * expected to take the overlong latencies immediately, inside the
- * callback, and not make a overlong latency decision anymore.
- *
- * The callback gets called when the new latency value is made
- * active so system_latency_constraint() returns the new latency.
- */
-int register_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_register(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(register_latency_notifier);
-
-int unregister_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_unregister(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_latency_notifier);
-
-static __init int latency_init(void)
-{
- atomic_set(&current_max_latency, INFINITE_LATENCY);
- /*
- * we don't want by default to have longer latencies than 2 ticks,
- * since that would cause lost ticks
- */
- set_acceptable_latency("kernel", 2*1000000/HZ);
- return 0;
-}
-
-module_init(latency_init);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
new file mode 100644
index 00000000000..0afe32be4c8
--- /dev/null
+++ b/kernel/pm_qos_params.c
@@ -0,0 +1,425 @@
+/*
+ * This module exposes the interface to kernel space for specifying
+ * QoS dependencies. It provides infrastructure for registration of:
+ *
+ * Dependents on a QoS value : register requirements
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ *
+ * There are 3 basic classes of QoS parameter: latency, timeout, throughput
+ * each have defined units:
+ * latency: usec
+ * timeout: usec <-- currently not used.
+ * throughput: kbs (kilo byte / sec)
+ *
+ * There are lists of pm_qos_objects each one wrapping requirements, notifiers
+ *
+ * User mode requirements on a QOS parameter register themselves to the
+ * subsystem by opening the device node /dev/... and writing there request to
+ * the node. As long as the process holds a file handle open to the node the
+ * client continues to be accounted for. Upon file release the usermode
+ * requirement is removed and a new qos target is computed. This way when the
+ * requirement that the application has is cleaned up when closes the file
+ * pointer or exits the pm_qos_object will get an opportunity to clean up.
+ *
+ * mark gross mgross@linux.intel.com
+ */
+
+#include <linux/pm_qos_params.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+
+#include <linux/uaccess.h>
+
+/*
+ * locking rule: all changes to target_value or requirements or notifiers lists
+ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+static s32 min_compare(s32 v1, s32 v2);
+
+struct pm_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice pm_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct pm_qos_object null_pm_qos;
+static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static struct pm_qos_object cpu_dma_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)},
+ .notifiers = &cpu_dma_lat_notifier,
+ .name = "cpu_dma_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static struct pm_qos_object network_lat_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)},
+ .notifiers = &network_lat_notifier,
+ .name = "network_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static struct pm_qos_object network_throughput_pm_qos = {
+ .requirements =
+ {LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)},
+ .notifiers = &network_throughput_notifier,
+ .name = "network_throughput",
+ .default_value = 0,
+ .target_value = 0,
+ .comparitor = max_compare
+};
+
+
+static struct pm_qos_object *pm_qos_array[] = {
+ &null_pm_qos,
+ &cpu_dma_pm_qos,
+ &network_lat_pm_qos,
+ &network_throughput_pm_qos
+};
+
+static DEFINE_SPINLOCK(pm_qos_lock);
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos);
+static int pm_qos_power_open(struct inode *inode, struct file *filp);
+static int pm_qos_power_release(struct inode *inode, struct file *filp);
+
+static const struct file_operations pm_qos_power_fops = {
+ .write = pm_qos_power_write,
+ .open = pm_qos_power_open,
+ .release = pm_qos_power_release,
+};
+
+/* static helper functions */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static s32 min_compare(s32 v1, s32 v2)
+{
+ return min(v1, v2);
+}
+
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ int call_notifier = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ extreme_value = pm_qos_array[target]->default_value;
+ list_for_each_entry(node,
+ &pm_qos_array[target]->requirements.list, list) {
+ extreme_value = pm_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (pm_qos_array[target]->target_value != extreme_value) {
+ call_notifier = 1;
+ pm_qos_array[target]->target_value = extreme_value;
+ pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
+ pm_qos_array[target]->target_value);
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ if (call_notifier)
+ blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
+ (unsigned long) extreme_value, NULL);
+}
+
+static int register_pm_qos_misc(struct pm_qos_object *qos)
+{
+ qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->pm_qos_power_miscdev.name = qos->name;
+ qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+
+ return misc_register(&qos->pm_qos_power_miscdev);
+}
+
+static int find_pm_qos_object_by_minor(int minor)
+{
+ int pm_qos_class;
+
+ for (pm_qos_class = 0;
+ pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
+ if (minor ==
+ pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
+ return pm_qos_class;
+ }
+ return -1;
+}
+
+/**
+ * pm_qos_requirement - returns current system wide qos expectation
+ * @pm_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int pm_qos_requirement(int pm_qos_class)
+{
+ int ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ ret_val = pm_qos_array[pm_qos_class]->target_value;
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ return ret_val;
+}
+EXPORT_SYMBOL_GPL(pm_qos_requirement);
+
+/**
+ * pm_qos_add_requirement - inserts new qos request into the list
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the pm_qos_class list of requested qos
+ * performance charactoistics. It recomputes the agregate QoS expectations for
+ * the pm_qos_class of parrameters.
+ */
+int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep) {
+ if (value == PM_QOS_DEFAULT_VALUE)
+ dep->value = pm_qos_array[pm_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_add(&dep->list,
+ &pm_qos_array[pm_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ update_target(pm_qos_class);
+
+ return 0;
+ }
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
+
+/**
+ * pm_qos_update_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requierement for the pm_qos_class of parameters along
+ * with updating the target pm_qos_class value.
+ *
+ * If the named request isn't in the lest then no change is made.
+ */
+int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PM_QOS_DEFAULT_VALUE)
+ node->value =
+ pm_qos_array[pm_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
+
+/**
+ * pm_qos_remove_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from pm_qos_class list of parrameters and
+ * recompute the current target value for the pm_qos_class.
+ */
+void pm_qos_remove_requirement(int pm_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
+
+/**
+ * pm_qos_add_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
+
+/**
+ * pm_qos_remove_notifier - deletes notification entry from chain.
+ * @pm_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
+
+#define PID_NAME_LEN sizeof("process_1234567890")
+static char name[PID_NAME_LEN];
+
+static int pm_qos_power_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ long pm_qos_class;
+
+ pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
+ if (pm_qos_class >= 0) {
+ filp->private_data = (void *)pm_qos_class;
+ sprintf(name, "process_%d", current->pid);
+ ret = pm_qos_add_requirement(pm_qos_class, name,
+ PM_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int pm_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_remove_requirement(pm_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_update_requirement(pm_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+
+static int __init pm_qos_power_init(void)
+{
+ int ret = 0;
+
+ ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_lat_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_throughput_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: network_throughput setup failed\n");
+
+ return ret;
+}
+
+late_initcall(pm_qos_power_init);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 36d563fd9e3..122d5c787fe 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -256,8 +256,9 @@ static void schedule_next_timer(struct k_itimer *timr)
if (timr->it.real.interval.tv64 == 0)
return;
- timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
- timr->it.real.interval);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer,
+ timer->base->get_time(),
+ timr->it.real.interval);
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
@@ -386,7 +387,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun +=
+ timr->it_overrun += (unsigned int)
hrtimer_forward(timer, now,
timr->it.real.interval);
ret = HRTIMER_RESTART;
@@ -662,7 +663,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
*/
if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
- timr->it_overrun += hrtimer_forward(timer, now, iv);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
remaining = ktime_sub(timer->expires, now);
/* Return 0 only, when the timer is expired and not pending */
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index d09da089517..859a8e59773 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -26,7 +26,7 @@
static int noresume = 0;
-char resume_file[256] = CONFIG_PM_STD_PARTITION;
+static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
@@ -185,7 +185,7 @@ static void platform_restore_cleanup(int platform_mode)
* reappears in this routine after a restore.
*/
-int create_image(int platform_mode)
+static int create_image(int platform_mode)
{
int error;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f6a5df934f8..95250d7c8d9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)
printk(KERN_INFO "PM: Creating hibernation image: \n");
- drain_local_pages();
+ drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
@@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
- drain_local_pages();
+ drain_local_pages(NULL);
copy_data_pages(&copy_bm, &orig_bm);
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 4333b6dbb42..6a5f97cd337 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -911,27 +911,6 @@ __group_complete_signal(int sig, struct task_struct *p)
} while_each_thread(p, t);
return;
}
-
- /*
- * There will be a core dump. We make all threads other
- * than the chosen one go into a group stop so that nothing
- * happens until it gets scheduled, takes the signal off
- * the shared queue, and does the core dump. This is a
- * little more complicated than strictly necessary, but it
- * keeps the signal state that winds up in the core dump
- * unchanged from the death state, e.g. which thread had
- * the core-dump signal unblocked.
- */
- rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
- p->signal->group_stop_count = 0;
- p->signal->group_exit_task = t;
- p = t;
- do {
- p->signal->group_stop_count++;
- signal_wake_up(t, t == p);
- } while_each_thread(p, t);
- return;
}
/*
@@ -978,7 +957,6 @@ void zap_other_threads(struct task_struct *p)
{
struct task_struct *t;
- p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
for (t = next_thread(p); t != p; t = next_thread(t)) {
@@ -1709,9 +1687,6 @@ static int do_signal_stop(int signr)
struct signal_struct *sig = current->signal;
int stop_count;
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
- return 0;
-
if (sig->group_stop_count > 0) {
/*
* There is a group stop in progress. We don't need to
@@ -1719,12 +1694,15 @@ static int do_signal_stop(int signr)
*/
stop_count = --sig->group_stop_count;
} else {
+ struct task_struct *t;
+
+ if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+ unlikely(sig->group_exit_task))
+ return 0;
/*
* There is no group stop already in progress.
* We must initiate one now.
*/
- struct task_struct *t;
-
sig->group_exit_code = signr;
stop_count = 0;
@@ -1752,47 +1730,6 @@ static int do_signal_stop(int signr)
return 1;
}
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static int handle_group_stop(void)
-{
- int stop_count;
-
- if (current->signal->group_exit_task == current) {
- /*
- * Group stop is so we can do a core dump,
- * We are the initiating thread, so get on with it.
- */
- current->signal->group_exit_task = NULL;
- return 0;
- }
-
- if (current->signal->flags & SIGNAL_GROUP_EXIT)
- /*
- * Group stop is so another thread can do a core dump,
- * or else we are racing against a death signal.
- * Just punt the stop so we can get the next signal.
- */
- return 0;
-
- /*
- * There is a group stop in progress. We stop
- * without any associated signal being in our queue.
- */
- stop_count = --current->signal->group_stop_count;
- if (stop_count == 0)
- current->signal->flags = SIGNAL_STOP_STOPPED;
- current->exit_code = current->signal->group_exit_code;
- set_current_state(TASK_STOPPED);
- spin_unlock_irq(&current->sighand->siglock);
- finish_stop(stop_count);
- return 1;
-}
-
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct pt_regs *regs, void *cookie)
{
@@ -1807,7 +1744,7 @@ relock:
struct k_sigaction *ka;
if (unlikely(current->signal->group_stop_count > 0) &&
- handle_group_stop())
+ do_signal_stop(0))
goto relock;
signr = dequeue_signal(current, mask, info);
diff --git a/kernel/sys.c b/kernel/sys.c
index d1fe71eb454..53de35fc824 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -315,7 +315,7 @@ static void kernel_kexec(void)
#endif
}
-void kernel_shutdown_prepare(enum system_states state)
+static void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
@@ -1637,7 +1637,7 @@ asmlinkage long sys_umask(int mask)
mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
return mask;
}
-
+
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
@@ -1742,6 +1742,17 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = prctl_set_seccomp(arg2);
break;
+ case PR_CAPBSET_READ:
+ if (!cap_valid(arg2))
+ return -EINVAL;
+ return !!cap_raised(current->cap_bset, arg2);
+ case PR_CAPBSET_DROP:
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+ return cap_prctl_drop(arg2);
+#else
+ return -EINVAL;
+#endif
+
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index beee5b3b68a..5b9b467de07 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -154,7 +154,10 @@ cond_syscall(sys_ioprio_get);
/* New file descriptors */
cond_syscall(sys_signalfd);
-cond_syscall(sys_timerfd);
cond_syscall(compat_sys_signalfd);
-cond_syscall(compat_sys_timerfd);
+cond_syscall(sys_timerfd_create);
+cond_syscall(sys_timerfd_settime);
+cond_syscall(sys_timerfd_gettime);
+cond_syscall(compat_sys_timerfd_settime);
+cond_syscall(compat_sys_timerfd_gettime);
cond_syscall(sys_eventfd);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7cb1ac3e6ff..5e2ad5bf88e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -84,8 +84,11 @@ extern int sysctl_stat_interval;
extern int latencytop_enabled;
/* Constants used for minimum and maximum */
-#ifdef CONFIG_DETECT_SOFTLOCKUP
+#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM)
static int one = 1;
+#endif
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
static int sixty = 60;
#endif
@@ -416,15 +419,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_SECURITY_CAPABILITIES
- {
- .procname = "cap-bound",
- .data = &cap_bset,
- .maxlen = sizeof(kernel_cap_t),
- .mode = 0600,
- .proc_handler = &proc_dointvec_bset,
- },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
#ifdef CONFIG_BLK_DEV_INITRD
{
.ctl_name = KERN_REALROOTDEV,
@@ -1150,6 +1144,19 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
#endif
+#ifdef CONFIG_HIGHMEM
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "highmem_is_dirtyable",
+ .data = &vm_highmem_is_dirtyable,
+ .maxlen = sizeof(vm_highmem_is_dirtyable),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -2080,26 +2087,6 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
return 0;
}
-#ifdef CONFIG_SECURITY_CAPABILITIES
-/*
- * init may raise the set.
- */
-
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int op;
-
- if (write && !capable(CAP_SYS_MODULE)) {
- return -EPERM;
- }
-
- op = is_global_init(current) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
- do_proc_dointvec_bset_conv,&op);
-}
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
/*
* Taint values can only be increased
*/
@@ -2513,12 +2500,6 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
return -ENOSYS;
}
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return -ENOSYS;
-}
-
int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c3206fa5004..006365b69ea 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -37,10 +37,6 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_NODENAME, "hostname" },
{ KERN_DOMAINNAME, "domainname" },
-#ifdef CONFIG_SECURITY_CAPABILITIES
- { KERN_CAP_BSET, "cap-bound" },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
{ KERN_PANIC, "panic" },
{ KERN_REALROOTDEV, "real-root-dev" },
@@ -1498,9 +1494,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
(table->strategy == sysctl_ms_jiffies) ||
(table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
-#ifdef CONFIG_SECURITY_CAPABILITIES
- (table->proc_handler == proc_dointvec_bset) ||
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||