aboutsummaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h23
1 files changed, 22 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 33b9b4841ee..2e490271acf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -681,7 +681,7 @@ enum cpu_idle_type {
#define SCHED_LOAD_SHIFT 10
#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
-#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 5)
+#define SCHED_LOAD_SCALE_FUZZ (SCHED_LOAD_SCALE >> 1)
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
@@ -786,6 +786,22 @@ extern int partition_sched_domains(cpumask_t *partition1,
#endif /* CONFIG_SMP */
+/*
+ * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
+ * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
+ * task of nice 0 or enough lower priority tasks to bring up the
+ * weighted_cpuload
+ */
+static inline int above_background_load(void)
+{
+ unsigned long cpu;
+
+ for_each_online_cpu(cpu) {
+ if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
+ return 1;
+ }
+ return 0;
+}
struct io_context; /* See blkdev.h */
struct cpuset;
@@ -935,6 +951,11 @@ struct task_struct {
struct sched_class *sched_class;
struct sched_entity se;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+#endif
+
unsigned short ioprio;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;