aboutsummaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig25
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h13
-rw-r--r--drivers/base/bus.c23
-rw-r--r--drivers/base/class.c87
-rw-r--r--drivers/base/core.c48
-rw-r--r--drivers/base/dd.c42
-rw-r--r--drivers/base/devtmpfs.c375
-rw-r--r--drivers/base/dma-coherent.c176
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/base/platform.c92
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/main.c199
-rw-r--r--drivers/base/power/power.h31
-rw-r--r--drivers/base/power/runtime.c1011
17 files changed, 1955 insertions, 180 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 8f006f96ff5..ee377270beb 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -8,6 +8,31 @@ config UEVENT_HELPER_PATH
Path to uevent helper program forked by the kernel for
every uevent.
+config DEVTMPFS
+ bool "Create a kernel maintained /dev tmpfs (EXPERIMENTAL)"
+ depends on HOTPLUG && SHMEM && TMPFS
+ help
+ This creates a tmpfs filesystem, and mounts it at bootup
+ and mounts it at /dev. The kernel driver core creates device
+ nodes for all registered devices in that filesystem. All device
+ nodes are owned by root and have the default mode of 0600.
+ Userspace can add and delete the nodes as needed. This is
+ intended to simplify bootup, and make it possible to delay
+ the initial coldplug at bootup done by udev in userspace.
+ It should also provide a simpler way for rescue systems
+ to bring up a kernel with dynamic major/minor numbers.
+ Meaningful symlinks, permissions and device ownership must
+ still be handled by userspace.
+ If unsure, say N here.
+
+config DEVTMPFS_MOUNT
+ bool "Automount devtmpfs at /dev"
+ depends on DEVTMPFS
+ help
+ This will mount devtmpfs at /dev if the kernel mounts the root
+ filesystem. It will not affect initramfs based mounting.
+ If unsure, say N here.
+
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
default y
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b5b8ba512b2..c12c7f2f2a6 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -4,8 +4,10 @@ obj-y := core.o sys.o bus.o dd.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o
+obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b528145a078..2ca7f5b7b82 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -70,6 +70,8 @@ struct class_private {
* @knode_parent - node in sibling list
* @knode_driver - node in driver list
* @knode_bus - node in bus list
+ * @driver_data - private pointer for driver specific info. Will turn into a
+ * list soon.
* @device - pointer back to the struct class that this structure is
* associated with.
*
@@ -80,6 +82,7 @@ struct device_private {
struct klist_node knode_parent;
struct klist_node knode_driver;
struct klist_node knode_bus;
+ void *driver_data;
struct device *device;
};
#define to_device_private_parent(obj) \
@@ -89,6 +92,8 @@ struct device_private {
#define to_device_private_bus(obj) \
container_of(obj, struct device_private, knode_bus)
+extern int device_private_init(struct device *dev);
+
/* initialisation functions */
extern int devices_init(void);
extern int buses_init(void);
@@ -104,7 +109,7 @@ extern int system_bus_init(void);
extern int cpu_dev_init(void);
extern int bus_add_device(struct device *dev);
-extern void bus_attach_device(struct device *dev);
+extern void bus_probe_device(struct device *dev);
extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
@@ -134,3 +139,9 @@ static inline void module_add_driver(struct module *mod,
struct device_driver *drv) { }
static inline void module_remove_driver(struct device_driver *drv) { }
#endif
+
+#ifdef CONFIG_DEVTMPFS
+extern int devtmpfs_init(void);
+#else
+static inline int devtmpfs_init(void) { return 0; }
+#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 4b04a15146d..973bf2ad4e0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -459,8 +459,9 @@ static inline void remove_deprecated_bus_links(struct device *dev) { }
* bus_add_device - add device to bus
* @dev: device being added
*
+ * - Add device's bus attributes.
+ * - Create links to device's bus.
* - Add the device to its bus's list of devices.
- * - Create link to device's bus.
*/
int bus_add_device(struct device *dev)
{
@@ -483,6 +484,7 @@ int bus_add_device(struct device *dev)
error = make_deprecated_bus_links(dev);
if (error)
goto out_deprecated;
+ klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
}
return 0;
@@ -498,24 +500,19 @@ out_put:
}
/**
- * bus_attach_device - add device to bus
- * @dev: device tried to attach to a driver
+ * bus_probe_device - probe drivers for a new device
+ * @dev: device to probe
*
- * - Add device to bus's list of devices.
- * - Try to attach to driver.
+ * - Automatically probe for a driver if the bus allows it.
*/
-void bus_attach_device(struct device *dev)
+void bus_probe_device(struct device *dev)
{
struct bus_type *bus = dev->bus;
- int ret = 0;
+ int ret;
- if (bus) {
- if (bus->p->drivers_autoprobe)
- ret = device_attach(dev);
+ if (bus && bus->p->drivers_autoprobe) {
+ ret = device_attach(dev);
WARN_ON(ret < 0);
- if (ret >= 0)
- klist_add_tail(&dev->p->knode_bus,
- &bus->p->klist_devices);
}
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index eb85e431230..161746deab4 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -488,6 +488,93 @@ void class_interface_unregister(struct class_interface *class_intf)
class_put(parent);
}
+struct class_compat {
+ struct kobject *kobj;
+};
+
+/**
+ * class_compat_register - register a compatibility class
+ * @name: the name of the class
+ *
+ * Compatibility class are meant as a temporary user-space compatibility
+ * workaround when converting a family of class devices to a bus devices.
+ */
+struct class_compat *class_compat_register(const char *name)
+{
+ struct class_compat *cls;
+
+ cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
+ if (!cls)
+ return NULL;
+ cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
+ if (!cls->kobj) {
+ kfree(cls);
+ return NULL;
+ }
+ return cls;
+}
+EXPORT_SYMBOL_GPL(class_compat_register);
+
+/**
+ * class_compat_unregister - unregister a compatibility class
+ * @cls: the class to unregister
+ */
+void class_compat_unregister(struct class_compat *cls)
+{
+ kobject_put(cls->kobj);
+ kfree(cls);
+}
+EXPORT_SYMBOL_GPL(class_compat_unregister);
+
+/**
+ * class_compat_create_link - create a compatibility class device link to
+ * a bus device
+ * @cls: the compatibility class
+ * @dev: the target bus device
+ * @device_link: an optional device to which a "device" link should be created
+ */
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link)
+{
+ int error;
+
+ error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
+ if (error)
+ return error;
+
+ /*
+ * Optionally add a "device" link (typically to the parent), as a
+ * class device would have one and we want to provide as much
+ * backwards compatibility as possible.
+ */
+ if (device_link) {
+ error = sysfs_create_link(&dev->kobj, &device_link->kobj,
+ "device");
+ if (error)
+ sysfs_remove_link(cls->kobj, dev_name(dev));
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(class_compat_create_link);
+
+/**
+ * class_compat_remove_link - remove a compatibility class device link to
+ * a bus device
+ * @cls: the compatibility class
+ * @dev: the target bus device
+ * @device_link: an optional device to which a "device" link was previously
+ * created
+ */
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link)
+{
+ if (device_link)
+ sysfs_remove_link(&dev->kobj, "device");
+ sysfs_remove_link(cls->kobj, dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(class_compat_remove_link);
+
int __init classes_init(void)
{
class_kset = kset_create_and_add("class", NULL, NULL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7ecb1938e59..6bee6af8d8e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -166,13 +166,16 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (MAJOR(dev->devt)) {
const char *tmp;
const char *name;
+ mode_t mode = 0;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
- name = device_get_nodename(dev, &tmp);
+ name = device_get_devnode(dev, &mode, &tmp);
if (name) {
add_uevent_var(env, "DEVNAME=%s", name);
kfree(tmp);
+ if (mode)
+ add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
}
}
@@ -341,7 +344,7 @@ static void device_remove_attributes(struct device *dev,
}
static int device_add_groups(struct device *dev,
- struct attribute_group **groups)
+ const struct attribute_group **groups)
{
int error = 0;
int i;
@@ -361,7 +364,7 @@ static int device_add_groups(struct device *dev,
}
static void device_remove_groups(struct device *dev,
- struct attribute_group **groups)
+ const struct attribute_group **groups)
{
int i;
@@ -843,6 +846,17 @@ static void device_remove_sys_dev_entry(struct device *dev)
}
}
+int device_private_init(struct device *dev)
+{
+ dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
+ if (!dev->p)
+ return -ENOMEM;
+ dev->p->device = dev;
+ klist_init(&dev->p->klist_children, klist_children_get,
+ klist_children_put);
+ return 0;
+}
+
/**
* device_add - add device to device hierarchy.
* @dev: device.
@@ -868,14 +882,11 @@ int device_add(struct device *dev)
if (!dev)
goto done;
- dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
if (!dev->p) {
- error = -ENOMEM;
- goto done;
+ error = device_private_init(dev);
+ if (error)
+ goto done;
}
- dev->p->device = dev;
- klist_init(&dev->p->klist_children, klist_children_get,
- klist_children_put);
/*
* for statically allocated devices, which should all be converted
@@ -921,6 +932,8 @@ int device_add(struct device *dev)
error = device_create_sys_dev_entry(dev);
if (error)
goto devtattrError;
+
+ devtmpfs_create_node(dev);
}
error = device_add_class_symlinks(dev);
@@ -945,7 +958,7 @@ int device_add(struct device *dev)
BUS_NOTIFY_ADD_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_ADD);
- bus_attach_device(dev);
+ bus_probe_device(dev);
if (parent)
klist_add_tail(&dev->p->knode_parent,
&parent->p->klist_children);
@@ -1067,6 +1080,7 @@ void device_del(struct device *dev)
if (parent)
klist_del(&dev->p->knode_parent);
if (MAJOR(dev->devt)) {
+ devtmpfs_delete_node(dev);
device_remove_sys_dev_entry(dev);
device_remove_file(dev, &devt_attr);
}
@@ -1137,8 +1151,9 @@ static struct device *next_device(struct klist_iter *i)
}
/**
- * device_get_nodename - path of device node file
+ * device_get_devnode - path of device node file
* @dev: device
+ * @mode: returned file access mode
* @tmp: possibly allocated string
*
* Return the relative path of a possible device node.
@@ -1146,21 +1161,22 @@ static struct device *next_device(struct klist_iter *i)
* a name. This memory is returned in tmp and needs to be
* freed by the caller.
*/
-const char *device_get_nodename(struct device *dev, const char **tmp)
+const char *device_get_devnode(struct device *dev,
+ mode_t *mode, const char **tmp)
{
char *s;
*tmp = NULL;
/* the device type may provide a specific name */
- if (dev->type && dev->type->nodename)
- *tmp = dev->type->nodename(dev);
+ if (dev->type && dev->type->devnode)
+ *tmp = dev->type->devnode(dev, mode);
if (*tmp)
return *tmp;
/* the class may provide a specific name */
- if (dev->class && dev->class->nodename)
- *tmp = dev->class->nodename(dev);
+ if (dev->class && dev->class->devnode)
+ *tmp = dev->class->devnode(dev, mode);
if (*tmp)
return *tmp;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0106875f01..979d159b5cd 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -11,8 +11,8 @@
*
* Copyright (c) 2002-5 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
- * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- * Copyright (c) 2007 Novell Inc.
+ * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007-2009 Novell Inc.
*
* This file is released under the GPLv2
*/
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
+#include <linux/pm_runtime.h>
#include "base.h"
#include "power/power.h"
@@ -202,7 +203,10 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
+ pm_runtime_put_sync(dev);
return ret;
}
@@ -245,7 +249,9 @@ int device_attach(struct device *dev)
ret = 0;
}
} else {
+ pm_runtime_get_noresume(dev);
ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
+ pm_runtime_put_sync(dev);
}
up(&dev->sem);
return ret;
@@ -306,6 +312,9 @@ static void __device_release_driver(struct device *dev)
drv = dev->driver;
if (drv) {
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
+
driver_sysfs_remove(dev);
if (dev->bus)
@@ -324,6 +333,8 @@ static void __device_release_driver(struct device *dev)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBOUND_DRIVER,
dev);
+
+ pm_runtime_put_sync(dev);
}
}
@@ -380,3 +391,30 @@ void driver_detach(struct device_driver *drv)
put_device(dev);
}
}
+
+/*
+ * These exports can't be _GPL due to .h files using this within them, and it
+ * might break something that was previously working...
+ */
+void *dev_get_drvdata(const struct device *dev)
+{
+ if (dev && dev->p)
+ return dev->p->driver_data;
+ return NULL;
+}
+EXPORT_SYMBOL(dev_get_drvdata);
+
+void dev_set_drvdata(struct device *dev, void *data)
+{
+ int error;
+
+ if (!dev)
+ return;
+ if (!dev->p) {
+ error = device_private_init(dev);
+ if (error)
+ return;
+ }
+ dev->p->driver_data = data;
+}
+EXPORT_SYMBOL(dev_set_drvdata);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
new file mode 100644
index 00000000000..a1cb5afe680
--- /dev/null
+++ b/drivers/base/devtmpfs.c
@@ -0,0 +1,375 @@
+/*
+ * devtmpfs - kernel-maintained tmpfs-based /dev
+ *
+ * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
+ *
+ * During bootup, before any driver core device is registered,
+ * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
+ * device which requests a device node, will add a node in this
+ * filesystem.
+ * By default, all devices are named after the the name of the
+ * device, owned by root and have a default mode of 0600. Subsystems
+ * can overwrite the default setting if needed.
+ */
+
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/device.h>
+#include <linux/genhd.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+#include <linux/shmem_fs.h>
+#include <linux/cred.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+
+static struct vfsmount *dev_mnt;
+
+#if defined CONFIG_DEVTMPFS_MOUNT
+static int dev_mount = 1;
+#else
+static int dev_mount;
+#endif
+
+static int __init mount_param(char *str)
+{
+ dev_mount = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+__setup("devtmpfs.mount=", mount_param);
+
+static int dev_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt);
+}
+
+static struct file_system_type dev_fs_type = {
+ .name = "devtmpfs",
+ .get_sb = dev_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+#ifdef CONFIG_BLOCK
+static inline int is_blockdev(struct device *dev)
+{
+ return dev->class == &block_class;
+}
+#else
+static inline int is_blockdev(struct device *dev) { return 0; }
+#endif
+
+static int dev_mkdir(const char *name, mode_t mode)
+{
+ struct nameidata nd;
+ struct dentry *dentry;
+ int err;
+
+ err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
+ name, LOOKUP_PARENT, &nd);
+ if (err)
+ return err;
+
+ dentry = lookup_create(&nd, 1);
+ if (!IS_ERR(dentry)) {
+ err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
+ dput(dentry);
+ } else {
+ err = PTR_ERR(dentry);
+ }
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+
+ path_put(&nd.path);
+ return err;
+}
+
+static int create_path(const char *nodepath)
+{
+ char *path;
+ struct nameidata nd;
+ int err = 0;
+
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
+ path, LOOKUP_PARENT, &nd);
+ if (err == 0) {
+ struct dentry *dentry;
+
+ /* create directory right away */
+ dentry = lookup_create(&nd, 1);
+ if (!IS_ERR(dentry)) {
+ err = vfs_mkdir(nd.path.dentry->d_inode,
+ dentry, 0755);
+ dput(dentry);
+ }
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+
+ path_put(&nd.path);
+ } else if (err == -ENOENT) {
+ char *s;
+
+ /* parent directories do not exist, create them */
+ s = path;
+ while (1) {
+ s = strchr(s, '/');
+ if (!s)
+ break;
+ s[0] = '\0';
+ err = dev_mkdir(path, 0755);
+ if (err && err != -EEXIST)
+ break;
+ s[0] = '/';
+ s++;
+ }
+ }
+
+ kfree(path);
+ return err;
+}
+
+int devtmpfs_create_node(struct device *dev)
+{
+ const char *tmp = NULL;
+ const char *nodename;
+ const struct cred *curr_cred;
+ mode_t mode = 0;
+ struct nameidata nd;
+ struct dentry *dentry;
+ int err;
+
+ if (!dev_mnt)
+ return 0;
+
+ nodename = device_get_devnode(dev, &mode, &tmp);
+ if (!nodename)
+ return -ENOMEM;
+
+ if (mode == 0)
+ mode = 0600;
+ if (is_blockdev(dev))
+ mode |= S_IFBLK;
+ else
+ mode |= S_IFCHR;
+
+ curr_cred = override_creds(&init_cred);
+ err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
+ nodename, LOOKUP_PARENT, &nd);
+ if (err == -ENOENT) {
+ /* create missing parent directories */
+ create_path(nodename);
+ err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
+ nodename, LOOKUP_PARENT, &nd);
+ if (err)
+ goto out;
+ }
+
+ dentry = lookup_create(&nd, 0);
+ if (!IS_ERR(dentry)) {
+ int umask;
+
+ umask = sys_umask(0000);
+ err = vfs_mknod(nd.path.dentry->d_inode,
+ dentry, mode, dev->devt);
+ sys_umask(umask);
+ /* mark as kernel created inode */
+ if (!err)
+ dentry->d_inode->i_private = &dev_mnt;
+ dput(dentry);
+ } else {
+ err = PTR_ERR(dentry);
+ }
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+
+ path_put(&nd.path);
+out:
+ kfree(tmp);
+ revert_creds(curr_cred);
+ return err;
+}
+
+static int dev_rmdir(const char *name)
+{
+ struct nameidata nd;
+ struct dentry *dentry;
+ int err;
+
+ err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
+ name, LOOKUP_PARENT, &nd);
+ if (err)
+ return err;
+
+ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
+ if (!IS_ERR(dentry)) {
+ if (dentry->d_inode)
+ err = vfs_rmdir(nd.path.dentry->d_inode, dentry);
+ else
+ err = -ENOENT;
+ dput(dentry);
+ } else {
+ err = PTR_ERR(dentry);
+ }
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+
+ path_put(&nd.path);
+ return err;
+}
+
+static int delete_path(const char *nodepath)
+{
+ const char *path;
+ int err = 0;
+
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ char *base;
+
+ base = strrchr(path, '/');
+ if (!base)
+ break;
+ base[0] = '\0';
+ err = dev_rmdir(path);
+ if (err)
+ break;
+ }
+
+ kfree(path);
+ return err;
+}
+
+static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
+{
+ /* did we create it */
+ if (inode->i_private != &dev_mnt)
+ return 0;
+
+ /* does the dev_t match */
+ if (is_blockdev(dev)) {
+ if (!S_ISBLK(stat->mode))
+ return 0;
+ } else {
+ if (!S_ISCHR(stat->mode))
+ return 0;
+ }
+ if (stat->rdev != dev->devt)
+ return 0;
+
+ /* ours */
+ return 1;
+}
+
+int devtmpfs_delete_node(struct device *dev)
+{
+ const char *tmp = NULL;
+ const char *nodename;
+ const struct cred *curr_cred;
+ struct nameidata nd;
+ struct dentry *dentry;
+ struct kstat stat;
+ int deleted = 1;
+ int err;
+
+ if (!dev_mnt)
+ return 0;
+
+ nodename = device_get_devnode(dev, NULL, &tmp);
+ if (!nodename)
+ return -ENOMEM;
+
+ curr_cred = override_creds(&init_cred);
+ err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
+ nodename, LOOKUP_PARENT, &nd);
+ if (err)
+ goto out;
+
+ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
+ if (!IS_ERR(dentry)) {
+ if (dentry->d_inode) {
+ err = vfs_getattr(nd.path.mnt, dentry, &stat);
+ if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ err = vfs_unlink(nd.path.dentry->d_inode,
+ dentry);
+ if (!err || err == -ENOENT)
+ deleted = 1;
+ }
+ } else {
+ err = -ENOENT;
+ }
+ dput(dentry);
+ } else {
+ err = PTR_ERR(dentry);
+ }
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+
+ path_put(&nd.path);
+ if (deleted && strchr(nodename, '/'))
+ delete_path(nodename);
+out:
+ kfree(tmp);
+ revert_creds(curr_cred);
+ return err;
+}
+
+/*
+ * If configured, or requested by the commandline, devtmpfs will be
+ * auto-mounted after the kernel mounted the root filesystem.
+ */
+int devtmpfs_mount(const char *mountpoint)
+{
+ struct path path;
+ int err;
+
+ if (!dev_mount)
+ return 0;
+
+ if (!dev_mnt)
+ return 0;
+
+ err = kern_path(mountpoint, LOOKUP_FOLLOW, &path);
+ if (err)
+ return err;
+ err = do_add_mount(dev_mnt, &path, 0, NULL);
+ if (err)
+ printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+ else
+ printk(KERN_INFO "devtmpfs: mounted\n");
+ path_put(&path);
+ return err;
+}
+
+/*
+ * Create devtmpfs instance, driver-core devices will add their device
+ * nodes here.
+ */
+int __init devtmpfs_init(void)
+{
+ int err;
+ struct vfsmount *mnt;
+
+ err = register_filesystem(&dev_fs_type);
+ if (err) {
+ printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
+ "type %i\n", err);
+ return err;
+ }
+
+ mnt = kern_mount(&dev_fs_type);
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
+ printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
+ unregister_filesystem(&dev_fs_type);
+ return err;
+ }
+ dev_mnt = mnt;
+
+ printk(KERN_INFO "devtmpfs: initialized\n");
+ return 0;
+}
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
new file mode 100644
index 00000000000..962a3b574f2
--- /dev/null
+++ b/drivers/base/dma-coherent.c
@@ -0,0 +1,176 @@
+/*
+ * Coherent per-device memory handling.
+ * Borrowed from i386
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+
+struct dma_coherent_mem {
+ void *virt_base;
+ u32 device_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+};
+
+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ void __iomem *mem_base = NULL;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
+ goto out;
+ if (!size)
+ goto out;
+ if (dev->dma_mem)
+ goto out;
+
+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+ mem_base = ioremap(bus_addr, size);
+ if (!mem_base)
+ goto out;
+
+ dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dev->dma_mem)
+ goto out;
+ dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dev->dma_mem->bitmap)
+ goto free1_out;
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+
+ if (flags & DMA_MEMORY_MAP)
+ return DMA_MEMORY_MAP;
+
+ return DMA_MEMORY_IO;
+
+ free1_out:
+ kfree(dev->dma_mem);
+ out:
+ if (mem_base)
+ iounmap(mem_base);
+ return 0;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+
+ if (!mem)
+ return;
+ dev->dma_mem = NULL;
+ iounmap(mem->virt_base);
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
+{
+ struct dma_coherent_mem *mem = dev->dma_mem;
+ int pos, err;
+
+ size += device_addr & ~PAGE_MASK;
+
+ if (!mem)
+ return ERR_PTR(-EINVAL);
+
+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
+ if (err != 0)
+ return ERR_PTR(err);
+ return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+/**
+ * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
+ *
+ * @dev: device from which we allocate memory
+ * @size: size of requested memory area
+ * @dma_handle: This will be filled with the correct dma handle
+ * @ret: This pointer will be filled with the virtual address
+ * to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret)
+{
+ struct dma_coherent_mem *mem;
+ int order = get_order(size);
+ int pageno;
+
+ if (!dev)
+ return 0;
+ mem = dev->dma_mem;
+ if (!mem)
+ return 0;
+
+ *ret = NULL;
+
+ if (unlikely(size > (mem->size << PAGE_SHIFT)))
+ goto err;
+
+ pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
+ if (unlikely(pageno < 0))
+ goto err;
+
+ /*
+ * Memory was found in the per-device area.
+ */
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ memset(*ret, 0, size);
+
+ return 1;
+
+err:
+ /*
+ * In the case where the allocation can not be satisfied from the
+ * per-device area, try to fall back to generic memory if the
+ * constraints allow it.
+ */
+ return mem->flags & DMA_MEMORY_EXCLUSIVE;
+}
+EXPORT_SYMBOL(dma_alloc_from_coherent);
+
+/**
+ * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
+ * @dev: device from which the memory was allocated
+ * @order: the order of pages allocated
+ * @vaddr: virtual address of allocated pages
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, releases that memory.
+ *
+ * Returns 1 if we correctly released the memory, or 0 if
+ * dma_release_coherent() should proceed with releasing memory from
+ * generic pools.
+ */
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ if (mem && vaddr >= mem->virt_base && vaddr <
+ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+
+ bitmap_release_region(mem->bitmap, page, order);
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dma_release_from_coherent);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 8ae0f63602e..ed2ebd3c287 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -181,7 +181,7 @@ void put_driver(struct device_driver *drv)
EXPORT_SYMBOL_GPL(put_driver);
static int driver_add_groups(struct device_driver *drv,
- struct attribute_group **groups)
+ const struct attribute_group **groups)
{
int error = 0;
int i;
@@ -201,7 +201,7 @@ static int driver_add_groups(struct device_driver *drv,
}
static void driver_remove_groups(struct device_driver *drv,
- struct attribute_group **groups)
+ const struct attribute_group **groups)
{
int i;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index 7bd9b6a5b01..c8a934e7942 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -20,6 +20,7 @@
void __init driver_init(void)
{
/* These are the core pieces */
+ devtmpfs_init();
devices_init();
buses_init();
classes_init();
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 91d4087b403..1fe5536d404 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -85,6 +85,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
"Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
+ "Node %d Shmem: %8lu kB\n"
+ "Node %d KernelStack: %8lu kB\n"
"Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
@@ -116,6 +118,9 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(node_page_state(nid, NR_ANON_PAGES)),
+ nid, K(node_page_state(nid, NR_SHMEM)),
+ nid, node_page_state(nid, NR_KERNEL_STACK) *
+ THREAD_SIZE / 1024,
nid, K(node_page_state(nid, NR_PAGETABLE)),
nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
nid, K(node_page_state(nid, NR_BOUNCE)),
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 456594bd97b..ed156a13aa4 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -10,6 +10,7 @@
* information.
*/
+#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -17,6 +18,7 @@
#include <linux/bootmem.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include "base.h"
@@ -212,14 +214,13 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources);
int platform_device_add_data(struct platform_device *pdev, const void *data,
size_t size)
{
- void *d;
+ void *d = kmemdup(data, size, GFP_KERNEL);
- d = kmalloc(size, GFP_KERNEL);
if (d) {
- memcpy(d, data, size);
pdev->dev.platform_data = d;
+ return 0;
}
- return d ? 0 : -ENOMEM;
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(platform_device_add_data);
@@ -625,30 +626,6 @@ static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
return ret;
}
-static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
-{
- struct platform_driver *pdrv = to_platform_driver(dev->driver);
- struct platform_device *pdev = to_platform_device(dev);
- int ret = 0;
-
- if (dev->driver && pdrv->suspend_late)
- ret = pdrv->suspend_late(pdev, mesg);
-
- return ret;
-}
-
-static int platform_legacy_resume_early(struct device *dev)
-{
- struct platform_driver *pdrv = to_platform_driver(dev->driver);
- struct platform_device *pdev = to_platform_device(dev);
- int ret = 0;
-
- if (dev->driver && pdrv->resume_early)
- ret = pdrv->resume_early(pdev);
-
- return ret;
-}
-
static int platform_legacy_resume(struct device *dev)
{
struct platform_driver *pdrv = to_platform_driver(dev->driver);
@@ -680,6 +657,13 @@ static void platform_pm_complete(struct device *dev)
drv->pm->complete(dev);
}
+#else /* !CONFIG_PM_SLEEP */
+
+#define platform_pm_prepare NULL
+#define platform_pm_complete NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
#ifdef CONFIG_SUSPEND
static int platform_pm_suspend(struct device *dev)
@@ -711,8 +695,6 @@ static int platform_pm_suspend_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->suspend_noirq)
ret = drv->pm->suspend_noirq(dev);
- } else {
- ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
}
return ret;
@@ -747,8 +729,6 @@ static int platform_pm_resume_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->resume_noirq)
ret = drv->pm->resume_noirq(dev);
- } else {
- ret = platform_legacy_resume_early(dev);
}
return ret;
@@ -794,8 +774,6 @@ static int platform_pm_freeze_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->freeze_noirq)
ret = drv->pm->freeze_noirq(dev);
- } else {
- ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
}
return ret;
@@ -830,8 +808,6 @@ static int platform_pm_thaw_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->thaw_noirq)
ret = drv->pm->thaw_noirq(dev);
- } else {
- ret = platform_legacy_resume_early(dev);
}
return ret;
@@ -866,8 +842,6 @@ static int platform_pm_poweroff_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->poweroff_noirq)
ret = drv->pm->poweroff_noirq(dev);
- } else {
- ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
return ret;
@@ -902,8 +876,6 @@ static int platform_pm_restore_noirq(struct device *dev)
if (drv->pm) {
if (drv->pm->restore_noirq)
ret = drv->pm->restore_noirq(dev);
- } else {
- ret = platform_legacy_resume_early(dev);
}
return ret;
@@ -922,7 +894,32 @@ static int platform_pm_restore_noirq(struct device *dev)
#endif /* !CONFIG_HIBERNATION */
-static struct dev_pm_ops platform_dev_pm_ops = {
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak platform_pm_runtime_suspend(struct device *dev)
+{
+ return -ENOSYS;
+};
+
+int __weak platform_pm_runtime_resume(struct device *dev)
+{
+ return -ENOSYS;
+};
+
+int __weak platform_pm_runtime_idle(struct device *dev)
+{
+ return -ENOSYS;
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define platform_pm_runtime_suspend NULL
+#define platform_pm_runtime_resume NULL
+#define platform_pm_runtime_idle NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops platform_dev_pm_ops = {
.prepare = platform_pm_prepare,
.complete = platform_pm_complete,
.suspend = platform_pm_suspend,
@@ -937,22 +934,17 @@ static struct dev_pm_ops platform_dev_pm_ops = {
.thaw_noirq = platform_pm_thaw_noirq,
.poweroff_noirq = platform_pm_poweroff_noirq,
.restore_noirq = platform_pm_restore_noirq,
+ .runtime_suspend = platform_pm_runtime_suspend,
+ .runtime_resume = platform_pm_runtime_resume,
+ .runtime_idle = platform_pm_runtime_idle,
};
-#define PLATFORM_PM_OPS_PTR (&platform_dev_pm_ops)
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define PLATFORM_PM_OPS_PTR NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
struct bus_type platform_bus_type = {
.name = "platform",
.dev_attrs = platform_dev_attrs,
.match = platform_match,
.uevent = platform_uevent,
- .pm = PLATFORM_PM_OPS_PTR,
+ .pm = &platform_dev_pm_ops,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 911208b8925..3ce3519e8f3 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_PM) += sysfs.o
obj-$(CONFIG_PM_SLEEP) += main.o
+obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 58a3e572f2c..e0dc4071e08 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/mutex.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/resume-trace.h>
#include <linux/rwsem.h>
#include <linux/interrupt.h>
@@ -49,7 +50,17 @@ static DEFINE_MUTEX(dpm_list_mtx);
static bool transition_started;
/**
- * device_pm_lock - lock the list of active devices used by the PM core
+ * device_pm_init - Initialize the PM-related part of a device object.
+ * @dev: Device object being initialized.
+ */
+void device_pm_init(struct device *dev)
+{
+ dev->power.status = DPM_ON;
+ pm_runtime_init(dev);
+}
+
+/**
+ * device_pm_lock - Lock the list of active devices used by the PM core.
*/
void device_pm_lock(void)
{
@@ -57,7 +68,7 @@ void device_pm_lock(void)
}
/**
- * device_pm_unlock - unlock the list of active devices used by the PM core
+ * device_pm_unlock - Unlock the list of active devices used by the PM core.
*/
void device_pm_unlock(void)
{
@@ -65,8 +76,8 @@ void device_pm_unlock(void)
}
/**
- * device_pm_add - add a device to the list of active devices
- * @dev: Device to be added to the list
+ * device_pm_add - Add a device to the PM core's list of active devices.
+ * @dev: Device to add to the list.
*/
void device_pm_add(struct device *dev)
{
@@ -92,10 +103,8 @@ void device_pm_add(struct device *dev)
}
/**
- * device_pm_remove - remove a device from the list of active devices
- * @dev: Device to be removed from the list
- *
- * This function also removes the device's PM-related sysfs attributes.
+ * device_pm_remove - Remove a device from the PM core's list of active devices.
+ * @dev: Device to be removed from the list.
*/
void device_pm_remove(struct device *dev)
{
@@ -105,12 +114,13 @@ void device_pm_remove(struct device *dev)
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
+ pm_runtime_remove(dev);
}
/**
- * device_pm_move_before - move device in dpm_list
- * @deva: Device to move in dpm_list
- * @devb: Device @deva should come before
+ * device_pm_move_before - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come before.
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
@@ -124,9 +134,9 @@ void device_pm_move_before(struct device *deva, struct device *devb)
}
/**
- * device_pm_move_after - move device in dpm_list
- * @deva: Device to move in dpm_list
- * @devb: Device @deva should come after
+ * device_pm_move_after - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come after.
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
@@ -140,8 +150,8 @@ void device_pm_move_after(struct device *deva, struct device *devb)
}
/**
- * device_pm_move_last - move device to end of dpm_list
- * @dev: Device to move in dpm_list
+ * device_pm_move_last - Move device to end of the PM core's list of devices.
+ * @dev: Device to move in dpm_list.
*/
void device_pm_move_last(struct device *dev)
{
@@ -152,13 +162,14 @@ void device_pm_move_last(struct device *dev)
}
/**
- * pm_op - execute the PM operation appropiate for given PM event
- * @dev: Device.
- * @ops: PM operations to choose from.
- * @state: PM transition of the system being carried out.
+ * pm_op - Execute the PM operation appropriate for given PM event.
+ * @dev: Device to handle.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
*/
-static int pm_op(struct device *dev, struct dev_pm_ops *ops,
- pm_message_t state)
+static int pm_op(struct device *dev,
+ const struct dev_pm_ops *ops,
+ pm_message_t state)
{
int error = 0;
@@ -212,15 +223,16 @@ static int pm_op(struct device *dev, struct dev_pm_ops *ops,
}
/**
- * pm_noirq_op - execute the PM operation appropiate for given PM event
- * @dev: Device.
- * @ops: PM operations to choose from.
- * @state: PM transition of the system being carried out.
+ * pm_noirq_op - Execute the PM operation appropriate for given PM event.
+ * @dev: Device to handle.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
*
- * The operation is executed with interrupts disabled by the only remaining
- * functional CPU in the system.
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
*/
-static int pm_noirq_op(struct device *dev, struct dev_pm_ops *ops,
+static int pm_noirq_op(struct device *dev,
+ const struct dev_pm_ops *ops,
pm_message_t state)
{
int error = 0;
@@ -315,11 +327,12 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
/*------------------------- Resume routines -------------------------*/
/**
- * device_resume_noirq - Power on one device (early resume).
- * @dev: Device.
- * @state: PM transition of the system being carried out.
+ * device_resume_noirq - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
*
- * Must be called with interrupts disabled.
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
*/
static int device_resume_noirq(struct device *dev, pm_message_t state)
{
@@ -341,20 +354,18 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_resume_noirq - Power on all regular (non-sysdev) devices.
- * @state: PM transition of the system being carried out.
- *
- * Call the "noirq" resume handlers for all devices marked as
- * DPM_OFF_IRQ and enable device drivers to receive interrupts.
+ * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
*
- * Must be called under dpm_list_mtx. Device drivers should not receive
- * interrupts while it's being executed.
+ * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
+ * enable device drivers to receive interrupts.
*/
void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
mutex_lock(&dpm_list_mtx);
+ transition_started = false;
list_for_each_entry(dev, &dpm_list, power.entry)
if (dev->power.status > DPM_OFF) {
int error;
@@ -370,9 +381,9 @@ void dpm_resume_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * device_resume - Restore state for one device.
- * @dev: Device.
- * @state: PM transition of the system being carried out.
+ * device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
*/
static int device_resume(struct device *dev, pm_message_t state)
{
@@ -421,11 +432,11 @@ static int device_resume(struct device *dev, pm_message_t state)
}
/**
- * dpm_resume - Resume every device.
- * @state: PM transition of the system being carried out.
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
*
- * Execute the appropriate "resume" callback for all devices the status of
- * which indicates that they are inactive.
+ * Execute the appropriate "resume" callback for all devices whose status
+ * indicates that they are suspended.
*/
static void dpm_resume(pm_message_t state)
{
@@ -433,7 +444,6 @@ static void dpm_resume(pm_message_t state)
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- transition_started = false;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
@@ -462,9 +472,9 @@ static void dpm_resume(pm_message_t state)
}
/**
- * device_complete - Complete a PM transition for given device
- * @dev: Device.
- * @state: PM transition of the system being carried out.
+ * device_complete - Complete a PM transition for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
*/
static void device_complete(struct device *dev, pm_message_t state)
{
@@ -489,11 +499,11 @@ static void device_complete(struct device *dev, pm_message_t state)
}
/**
- * dpm_complete - Complete a PM transition for all devices.
- * @state: PM transition of the system being carried out.
+ * dpm_complete - Complete a PM transition for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
*
- * Execute the ->complete() callbacks for all devices that are not marked
- * as DPM_ON.
+ * Execute the ->complete() callbacks for all devices whose PM status is not
+ * DPM_ON (this allows new devices to be registered).
*/
static void dpm_complete(pm_message_t state)
{
@@ -510,6 +520,7 @@ static void dpm_complete(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
device_complete(dev, state);
+ pm_runtime_put_noidle(dev);
mutex_lock(&dpm_list_mtx);
}
@@ -522,11 +533,11 @@ static void dpm_complete(pm_message_t state)
}
/**
- * dpm_resume_end - Restore state of each device in system.
- * @state: PM transition of the system being carried out.
+ * dpm_resume_end - Execute "resume" callbacks and complete system transition.
+ * @state: PM transition of the system being carried out.
*
- * Resume all the devices, unlock them all, and allow new
- * devices to be registered once again.
+ * Execute "resume" callbacks for all devices and complete the PM transition of
+ * the system.
*/
void dpm_resume_end(pm_message_t state)
{
@@ -540,9 +551,11 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
/**
- * resume_event - return a PM message representing the resume event
- * corresponding to given sleep state.
- * @sleep_state: PM message representing a sleep state.
+ * resume_event - Return a "resume" message for given "suspend" sleep state.
+ * @sleep_state: PM message representing a sleep state.
+ *
+ * Return a PM message representing the resume event corresponding to given
+ * sleep state.
*/
static pm_message_t resume_event(pm_message_t sleep_state)
{
@@ -559,11 +572,12 @@ static pm_message_t resume_event(pm_message_t sleep_state)
}
/**
- * device_suspend_noirq - Shut down one device (late suspend).
- * @dev: Device.
- * @state: PM transition of the system being carried out.
+ * device_suspend_noirq - Execute a "late suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
*
- * This is called with interrupts off and only a single CPU running.
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
@@ -580,13 +594,11 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
- * @state: PM transition of the system being carried out.
- *
- * Prevent device drivers from receiving interrupts and call the "noirq"
- * suspend handlers.
+ * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
*
- * Must be called under dpm_list_mtx.
+ * Prevent device drivers from receiving interrupts and call the "noirq" suspend
+ * handlers for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
{
@@ -611,9 +623,9 @@ int dpm_suspend_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
- * device_suspend - Save state of one device.
- * @dev: Device.
- * @state: PM transition of the system being carried out.
+ * device_suspend - Execute "suspend" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
*/
static int device_suspend(struct device *dev, pm_message_t state)
{
@@ -660,10 +672,8 @@ static int device_suspend(struct device *dev, pm_message_t state)
}
/**
- * dpm_suspend - Suspend every device.
- * @state: PM transition of the system being carried out.
- *
- * Execute the appropriate "suspend" callbacks for all devices.
+ * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
*/
static int dpm_suspend(pm_message_t state)
{
@@ -697,9 +707,12 @@ static int dpm_suspend(pm_message_t state)
}
/**
- * device_prepare - Execute the ->prepare() callback(s) for given device.
- * @dev: Device.
- * @state: PM transition of the system being carried out.
+ * device_prepare - Prepare a device for system power transition.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for given device. No new children of the
+ * device may be registered after this function has returned.
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
@@ -735,10 +748,10 @@ static int device_prepare(struct device *dev, pm_message_t state)
}
/**
- * dpm_prepare - Prepare all devices for a PM transition.
- * @state: PM transition of the system being carried out.
+ * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
+ * @state: PM transition of the system being carried out.
*
- * Execute the ->prepare() callback for all devices.
+ * Execute the ->prepare() callback(s) for all devices.
*/
static int dpm_prepare(pm_message_t state)
{
@@ -755,7 +768,14 @@ static int dpm_prepare(pm_message_t state)
dev->power.status = DPM_PREPARING;
mutex_unlock(&dpm_list_mtx);
- error = device_prepare(dev, state);
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
+ /* Wake-up requested during system sleep transition. */
+ pm_runtime_put_noidle(dev);
+ error = -EBUSY;
+ } else {
+ error = device_prepare(dev, state);
+ }
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -782,10 +802,11 @@ static int dpm_prepare(pm_message_t state)
}
/**
- * dpm_suspend_start - Save state and stop all devices in system.
- * @state: PM transition of the system being carried out.
+ * dpm_suspend_start - Prepare devices for PM transition and suspend them.
+ * @state: PM transition of the system being carried out.
*
- * Prepare and suspend all devices.
+ * Prepare all non-sysdev devices for system PM transition and execute "suspend"
+ * callbacks for them.
*/
int dpm_suspend_start(pm_message_t state)
{
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index c7cb4fc3735..b8fa1aa5225 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,7 +1,14 @@
-static inline void device_pm_init(struct device *dev)
-{
- dev->power.status = DPM_ON;
-}
+#ifdef CONFIG_PM_RUNTIME
+
+extern void pm_runtime_init(struct device *dev);
+extern void pm_runtime_remove(struct device *dev);
+
+#else /* !CONFIG_PM_RUNTIME */
+
+static inline void pm_runtime_init(struct device *dev) {}
+static inline void pm_runtime_remove(struct device *dev) {}
+
+#endif /* !CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -16,23 +23,33 @@ static inline struct device *to_device(struct list_head *entry)
return container_of(entry, struct device, power.entry);
}
+extern void device_pm_init(struct device *dev);
extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
extern void device_pm_move_before(struct device *, struct device *);
extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
-#else /* CONFIG_PM_SLEEP */
+#else /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_init(struct device *dev)
+{
+ pm_runtime_init(dev);
+}
+
+static inline void device_pm_remove(struct device *dev)
+{
+ pm_runtime_remove(dev);
+}
static inline void device_pm_add(struct device *dev) {}
-static inline void device_pm_remove(struct device *dev) {}
static inline void device_pm_move_before(struct device *deva,
struct device *devb) {}
static inline void device_pm_move_after(struct device *deva,
struct device *devb) {}
static inline void device_pm_move_last(struct device *dev) {}
-#endif
+#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 00000000000..38556f6cc22
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1011 @@
+/*
+ * drivers/base/power/runtime.c - Helper functions for device run-time PM
+ *
+ * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/jiffies.h>
+
+static int __pm_runtime_resume(struct device *dev, bool from_wq);
+static int __pm_request_idle(struct device *dev);
+static int __pm_request_resume(struct device *dev);
+
+/**
+ * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_deactivate_timer(struct device *dev)
+{
+ if (dev->power.timer_expires > 0) {
+ del_timer(&dev->power.suspend_timer);
+ dev->power.timer_expires = 0;
+ }
+}
+
+/**
+ * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_cancel_pending(struct device *dev)
+{
+ pm_runtime_deactivate_timer(dev);
+ /*
+ * In case there's a request pending, make sure its work function will
+ * return without doing anything.
+ */
+ dev->power.request = RPM_REQ_NONE;
+}
+
+/**
+ * __pm_runtime_idle - Notify device bus type if the device can be suspended.
+ * @dev: Device to notify the bus type about.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int __pm_runtime_idle(struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int retval = 0;
+
+ dev_dbg(dev, "__pm_runtime_idle()!\n");
+
+ if (dev->power.runtime_error)
+ retval = -EINVAL;
+ else if (dev->power.idle_notification)
+ retval = -EINPROGRESS;
+ else if (atomic_read(&dev->power.usage_count) > 0
+ || dev->power.disable_depth > 0
+ || dev->power.runtime_status != RPM_ACTIVE)
+ retval = -EAGAIN;
+ else if (!pm_children_suspended(dev))
+ retval = -EBUSY;
+ if (retval)
+ goto out;
+
+ if (dev->power.request_pending) {
+ /*
+ * If an idle notification request is pending, cancel it. Any
+ * other pending request takes precedence over us.
+ */
+ if (dev->power.request == RPM_REQ_IDLE) {
+ dev->power.request = RPM_REQ_NONE;
+ } else if (dev->power.request != RPM_REQ_NONE) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+
+ dev->power.idle_notification = true;
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
+ spin_unlock_irq(&dev->power.lock);
+
+ dev->bus->pm->runtime_idle(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ dev->power.idle_notification = false;
+ wake_up_all(&dev->power.wait_queue);
+
+ out:
+ dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval);
+
+ return retval;
+}
+
+/**
+ * pm_runtime_idle - Notify device bus type if the device can be suspended.
+ * @dev: Device to notify the bus type about.
+ */
+int pm_runtime_idle(struct device *dev)
+{
+ int retval;
+
+ spin_lock_irq(&dev->power.lock);
+ retval = __pm_runtime_idle(dev);
+ spin_unlock_irq(&dev->power.lock);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_idle);
+
+/**
+ * __pm_runtime_suspend - Carry out run-time suspend of given device.
+ * @dev: Device to suspend.
+ * @from_wq: If set, the function has been called via pm_wq.
+ *
+ * Check if the device can be suspended and run the ->runtime_suspend() callback
+ * provided by its bus type. If another suspend has been started earlier, wait
+ * for it to finish. If an idle notification or suspend request is pending or
+ * scheduled, cancel it.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+int __pm_runtime_suspend(struct device *dev, bool from_wq)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ struct device *parent = NULL;
+ bool notify = false;
+ int retval = 0;
+
+ dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
+ from_wq ? " from workqueue" : "");
+
+ repeat:
+ if (dev->power.runtime_error) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ /* Pending resume requests take precedence over us. */
+ if (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ /* Other scheduled or pending requests need to be canceled. */
+ pm_runtime_cancel_pending(dev);
+
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+ else if (dev->power.runtime_status == RPM_RESUMING
+ || dev->power.disable_depth > 0
+ || atomic_read(&dev->power.usage_count) > 0)
+ retval = -EAGAIN;
+ else if (!pm_children_suspended(dev))
+ retval = -EBUSY;
+ if (retval)
+ goto out;
+
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
+ DEFINE_WAIT(wait);
+
+ if (from_wq) {
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ /* Wait for the other suspend running in parallel with us. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_SUSPENDING)
+ break;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ goto repeat;
+ }
+
+ dev->power.runtime_status = RPM_SUSPENDING;
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->bus->pm->runtime_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+ } else {
+ retval = -ENOSYS;
+ }
+
+ if (retval) {
+ dev->power.runtime_status = RPM_ACTIVE;
+ pm_runtime_cancel_pending(dev);
+ dev->power.deferred_resume = false;
+
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ notify = true;
+ dev->power.runtime_error = 0;
+ }
+ } else {
+ dev->power.runtime_status = RPM_SUSPENDED;
+
+ if (dev->parent) {
+ parent = dev->parent;
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ }
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+ if (dev->power.deferred_resume) {
+ dev->power.deferred_resume = false;
+ __pm_runtime_resume(dev, false);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ if (notify)
+ __pm_runtime_idle(dev);
+
+ if (parent && !parent->power.ignore_children) {
+ spin_unlock_irq(&dev->power.lock);
+
+ pm_request_idle(parent);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ out:
+ dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
+
+ return retval;
+}
+
+/**
+ * pm_runtime_suspend - Carry out run-time suspend of given device.
+ * @dev: Device to suspend.
+ */
+int pm_runtime_suspend(struct device *dev)
+{
+ int retval;
+
+ spin_lock_irq(&dev->power.lock);
+ retval = __pm_runtime_suspend(dev, false);
+ spin_unlock_irq(&dev->power.lock);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspend);
+
+/**
+ * __pm_runtime_resume - Carry out run-time resume of given device.
+ * @dev: Device to resume.
+ * @from_wq: If set, the function has been called via pm_wq.
+ *
+ * Check if the device can be woken up and run the ->runtime_resume() callback
+ * provided by its bus type. If another resume has been started earlier, wait
+ * for it to finish. If there's a suspend running in parallel with this
+ * function, wait for it to finish and resume the device. Cancel any scheduled
+ * or pending requests.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+int __pm_runtime_resume(struct device *dev, bool from_wq)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ struct device *parent = NULL;
+ int retval = 0;
+
+ dev_dbg(dev, "__pm_runtime_resume()%s!\n",
+ from_wq ? " from workqueue" : "");
+
+ repeat:
+ if (dev->power.runtime_error) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ pm_runtime_cancel_pending(dev);
+
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ retval = 1;
+ else if (dev->power.disable_depth > 0)
+ retval = -EAGAIN;
+ if (retval)
+ goto out;
+
+ if (dev->power.runtime_status == RPM_RESUMING
+ || dev->power.runtime_status == RPM_SUSPENDING) {
+ DEFINE_WAIT(wait);
+
+ if (from_wq) {
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ dev->power.deferred_resume = true;
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ /* Wait for the operation carried out in parallel with us. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_RESUMING
+ && dev->power.runtime_status != RPM_SUSPENDING)
+ break;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ goto repeat;
+ }
+
+ if (!parent && dev->parent) {
+ /*
+ * Increment the parent's resume counter and resume it if
+ * necessary.
+ */
+ parent = dev->parent;
+ spin_unlock_irq(&dev->power.lock);
+
+ pm_runtime_get_noresume(parent);
+
+ spin_lock_irq(&parent->power.lock);
+ /*
+ * We can resume if the parent's run-time PM is disabled or it
+ * is set to ignore children.
+ */
+ if (!parent->power.disable_depth
+ && !parent->power.ignore_children) {
+ __pm_runtime_resume(parent, false);
+ if (parent->power.runtime_status != RPM_ACTIVE)
+ retval = -EBUSY;
+ }
+ spin_unlock_irq(&parent->power.lock);
+
+ spin_lock_irq(&dev->power.lock);
+ if (retval)
+ goto out;
+ goto repeat;
+ }
+
+ dev->power.runtime_status = RPM_RESUMING;
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->bus->pm->runtime_resume(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+ } else {
+ retval = -ENOSYS;
+ }
+
+ if (retval) {
+ dev->power.runtime_status = RPM_SUSPENDED;
+ pm_runtime_cancel_pending(dev);
+ } else {
+ dev->power.runtime_status = RPM_ACTIVE;
+ if (parent)
+ atomic_inc(&parent->power.child_count);
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+ if (!retval)
+ __pm_request_idle(dev);
+
+ out:
+ if (parent) {
+ spin_unlock_irq(&dev->power.lock);
+
+ pm_runtime_put(parent);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
+
+ return retval;
+}
+
+/**
+ * pm_runtime_resume - Carry out run-time resume of given device.
+ * @dev: Device to suspend.
+ */
+int pm_runtime_resume(struct device *dev)
+{
+ int retval;
+
+ spin_lock_irq(&dev->power.lock);
+ retval = __pm_runtime_resume(dev, false);
+ spin_unlock_irq(&dev->power.lock);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_resume);
+
+/**
+ * pm_runtime_work - Universal run-time PM work function.
+ * @work: Work structure used for scheduling the execution of this function.
+ *
+ * Use @work to get the device object the work is to be done for, determine what
+ * is to be done and execute the appropriate run-time PM function.
+ */
+static void pm_runtime_work(struct work_struct *work)
+{
+ struct device *dev = container_of(work, struct device, power.work);
+ enum rpm_request req;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (!dev->power.request_pending)
+ goto out;
+
+ req = dev->power.request;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.request_pending = false;
+
+ switch (req) {
+ case RPM_REQ_NONE:
+ break;
+ case RPM_REQ_IDLE:
+ __pm_runtime_idle(dev);
+ break;
+ case RPM_REQ_SUSPEND:
+ __pm_runtime_suspend(dev, true);
+ break;
+ case RPM_REQ_RESUME:
+ __pm_runtime_resume(dev, true);
+ break;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
+ * __pm_request_idle - Submit an idle notification request for given device.
+ * @dev: Device to handle.
+ *
+ * Check if the device's run-time PM status is correct for suspending the device
+ * and queue up a request to run __pm_runtime_idle() for it.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int __pm_request_idle(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev->power.runtime_error)
+ retval = -EINVAL;
+ else if (atomic_read(&dev->power.usage_count) > 0
+ || dev->power.disable_depth > 0
+ || dev->power.runtime_status == RPM_SUSPENDED
+ || dev->power.runtime_status == RPM_SUSPENDING)
+ retval = -EAGAIN;
+ else if (!pm_children_suspended(dev))
+ retval = -EBUSY;
+ if (retval)
+ return retval;
+
+ if (dev->power.request_pending) {
+ /* Any requests other then RPM_REQ_IDLE take precedence. */
+ if (dev->power.request == RPM_REQ_NONE)
+ dev->power.request = RPM_REQ_IDLE;
+ else if (dev->power.request != RPM_REQ_IDLE)
+ retval = -EAGAIN;
+ return retval;
+ }
+
+ dev->power.request = RPM_REQ_IDLE;
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+
+ return retval;
+}
+
+/**
+ * pm_request_idle - Submit an idle notification request for given device.
+ * @dev: Device to handle.
+ */
+int pm_request_idle(struct device *dev)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = __pm_request_idle(dev);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_request_idle);
+
+/**
+ * __pm_request_suspend - Submit a suspend request for given device.
+ * @dev: Device to suspend.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int __pm_request_suspend(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev->power.runtime_error)
+ return -EINVAL;
+
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+ else if (atomic_read(&dev->power.usage_count) > 0
+ || dev->power.disable_depth > 0)
+ retval = -EAGAIN;
+ else if (dev->power.runtime_status == RPM_SUSPENDING)
+ retval = -EINPROGRESS;
+ else if (!pm_children_suspended(dev))
+ retval = -EBUSY;
+ if (retval < 0)
+ return retval;
+
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.request_pending) {
+ /*
+ * Pending resume requests take precedence over us, but we can
+ * overtake any other pending request.
+ */
+ if (dev->power.request == RPM_REQ_RESUME)
+ retval = -EAGAIN;
+ else if (dev->power.request != RPM_REQ_SUSPEND)
+ dev->power.request = retval ?
+ RPM_REQ_NONE : RPM_REQ_SUSPEND;
+ return retval;
+ } else if (retval) {
+ return retval;
+ }
+
+ dev->power.request = RPM_REQ_SUSPEND;
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+
+ return 0;
+}
+
+/**
+ * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
+ * @data: Device pointer passed by pm_schedule_suspend().
+ *
+ * Check if the time is right and execute __pm_request_suspend() in that case.
+ */
+static void pm_suspend_timer_fn(unsigned long data)
+{
+ struct device *dev = (struct device *)data;
+ unsigned long flags;
+ unsigned long expires;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ expires = dev->power.timer_expires;
+ /* If 'expire' is after 'jiffies' we've been called too early. */
+ if (expires > 0 && !time_after(expires, jiffies)) {
+ dev->power.timer_expires = 0;
+ __pm_request_suspend(dev);
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+
+/**
+ * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
+ * @dev: Device to suspend.
+ * @delay: Time to wait before submitting a suspend request, in milliseconds.
+ */
+int pm_schedule_suspend(struct device *dev, unsigned int delay)
+{
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.runtime_error) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (!delay) {
+ retval = __pm_request_suspend(dev);
+ goto out;
+ }
+
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.request_pending) {
+ /*
+ * Pending resume requests take precedence over us, but any
+ * other pending requests have to be canceled.
+ */
+ if (dev->power.request == RPM_REQ_RESUME) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ dev->power.request = RPM_REQ_NONE;
+ }
+
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+ else if (dev->power.runtime_status == RPM_SUSPENDING)
+ retval = -EINPROGRESS;
+ else if (atomic_read(&dev->power.usage_count) > 0
+ || dev->power.disable_depth > 0)
+ retval = -EAGAIN;
+ else if (!pm_children_suspended(dev))
+ retval = -EBUSY;
+ if (retval)
+ goto out;
+
+ dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
+ mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+
+/**
+ * pm_request_resume - Submit a resume request for given device.
+ * @dev: Device to resume.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int __pm_request_resume(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev->power.runtime_error)
+ return -EINVAL;
+
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ retval = 1;
+ else if (dev->power.runtime_status == RPM_RESUMING)
+ retval = -EINPROGRESS;
+ else if (dev->power.disable_depth > 0)
+ retval = -EAGAIN;
+ if (retval < 0)
+ return retval;
+
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.request_pending) {
+ /* If non-resume request is pending, we can overtake it. */
+ dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
+ return retval;
+ } else if (retval) {
+ return retval;
+ }
+
+ dev->power.request = RPM_REQ_RESUME;
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+
+ return retval;
+}
+
+/**
+ * pm_request_resume - Submit a resume request for given device.
+ * @dev: Device to resume.
+ */
+int pm_request_resume(struct device *dev)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = __pm_request_resume(dev);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_request_resume);
+
+/**
+ * __pm_runtime_get - Reference count a device and wake it up, if necessary.
+ * @dev: Device to handle.
+ * @sync: If set and the device is suspended, resume it synchronously.
+ *
+ * Increment the usage count of the device and if it was zero previously,
+ * resume it or submit a resume request for it, depending on the value of @sync.
+ */
+int __pm_runtime_get(struct device *dev, bool sync)
+{
+ int retval = 1;
+
+ if (atomic_add_return(1, &dev->power.usage_count) == 1)
+ retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_get);
+
+/**
+ * __pm_runtime_put - Decrement the device's usage counter and notify its bus.
+ * @dev: Device to handle.
+ * @sync: If the device's bus type is to be notified, do that synchronously.
+ *
+ * Decrement the usage count of the device and if it reaches zero, carry out a
+ * synchronous idle notification or submit an idle notification request for it,
+ * depending on the value of @sync.
+ */
+int __pm_runtime_put(struct device *dev, bool sync)
+{
+ int retval = 0;
+
+ if (atomic_dec_and_test(&dev->power.usage_count))
+ retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_put);
+
+/**
+ * __pm_runtime_set_status - Set run-time PM status of a device.
+ * @dev: Device to handle.
+ * @status: New run-time PM status of the device.
+ *
+ * If run-time PM of the device is disabled or its power.runtime_error field is
+ * different from zero, the status may be changed either to RPM_ACTIVE, or to
+ * RPM_SUSPENDED, as long as that reflects the actual state of the device.
+ * However, if the device has a parent and the parent is not active, and the
+ * parent's power.ignore_children flag is unset, the device's status cannot be
+ * set to RPM_ACTIVE, so -EBUSY is returned in that case.
+ *
+ * If successful, __pm_runtime_set_status() clears the power.runtime_error field
+ * and the device parent's counter of unsuspended children is modified to
+ * reflect the new status. If the new status is RPM_SUSPENDED, an idle
+ * notification request for the parent is submitted.
+ */
+int __pm_runtime_set_status(struct device *dev, unsigned int status)
+{
+ struct device *parent = dev->parent;
+ unsigned long flags;
+ bool notify_parent = false;
+ int error = 0;
+
+ if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!dev->power.runtime_error && !dev->power.disable_depth) {
+ error = -EAGAIN;
+ goto out;
+ }
+
+ if (dev->power.runtime_status == status)
+ goto out_set;
+
+ if (status == RPM_SUSPENDED) {
+ /* It always is possible to set the status to 'suspended'. */
+ if (parent) {
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ notify_parent = !parent->power.ignore_children;
+ }
+ goto out_set;
+ }
+
+ if (parent) {
+ spin_lock_irq(&parent->power.lock);
+
+ /*
+ * It is invalid to put an active child under a parent that is
+ * not active, has run-time PM enabled and the
+ * 'power.ignore_children' flag unset.
+ */
+ if (!parent->power.disable_depth
+ && !parent->power.ignore_children
+ && parent->power.runtime_status != RPM_ACTIVE) {
+ error = -EBUSY;
+ } else {
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ atomic_inc(&parent->power.child_count);
+ }
+
+ spin_unlock_irq(&parent->power.lock);
+
+ if (error)
+ goto out;
+ }
+
+ out_set:
+ dev->power.runtime_status = status;
+ dev->power.runtime_error = 0;
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (notify_parent)
+ pm_request_idle(parent);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
+
+/**
+ * __pm_runtime_barrier - Cancel pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Flush all pending requests for the device from pm_wq and wait for all
+ * run-time PM operations involving the device in progress to complete.
+ *
+ * Should be called under dev->power.lock with interrupts disabled.
+ */
+static void __pm_runtime_barrier(struct device *dev)
+{
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.request_pending) {
+ dev->power.request = RPM_REQ_NONE;
+ spin_unlock_irq(&dev->power.lock);
+
+ cancel_work_sync(&dev->power.work);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.request_pending = false;
+ }
+
+ if (dev->power.runtime_status == RPM_SUSPENDING
+ || dev->power.runtime_status == RPM_RESUMING
+ || dev->power.idle_notification) {
+ DEFINE_WAIT(wait);
+
+ /* Suspend, wake-up or idle notification in progress. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_SUSPENDING
+ && dev->power.runtime_status != RPM_RESUMING
+ && !dev->power.idle_notification)
+ break;
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ }
+}
+
+/**
+ * pm_runtime_barrier - Flush pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Prevent the device from being suspended by incrementing its usage counter and
+ * if there's a pending resume request for the device, wake the device up.
+ * Next, make sure that all pending requests for the device have been flushed
+ * from pm_wq and wait for all run-time PM operations involving the device in
+ * progress to complete.
+ *
+ * Return value:
+ * 1, if there was a resume request pending and the device had to be woken up,
+ * 0, otherwise
+ */
+int pm_runtime_barrier(struct device *dev)
+{
+ int retval = 0;
+
+ pm_runtime_get_noresume(dev);
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ __pm_runtime_resume(dev, false);
+ retval = 1;
+ }
+
+ __pm_runtime_barrier(dev);
+
+ spin_unlock_irq(&dev->power.lock);
+ pm_runtime_put_noidle(dev);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_barrier);
+
+/**
+ * __pm_runtime_disable - Disable run-time PM of a device.
+ * @dev: Device to handle.
+ * @check_resume: If set, check if there's a resume request for the device.
+ *
+ * Increment power.disable_depth for the device and if was zero previously,
+ * cancel all pending run-time PM requests for the device and wait for all
+ * operations in progress to complete. The device can be either active or
+ * suspended after its run-time PM has been disabled.
+ *
+ * If @check_resume is set and there's a resume request pending when
+ * __pm_runtime_disable() is called and power.disable_depth is zero, the
+ * function will wake up the device before disabling its run-time PM.
+ */
+void __pm_runtime_disable(struct device *dev, bool check_resume)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.disable_depth > 0) {
+ dev->power.disable_depth++;
+ goto out;
+ }
+
+ /*
+ * Wake up the device if there's a resume request pending, because that
+ * means there probably is some I/O to process and disabling run-time PM
+ * shouldn't prevent the device from processing the I/O.
+ */
+ if (check_resume && dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ /*
+ * Prevent suspends and idle notifications from being carried
+ * out after we have woken up the device.
+ */
+ pm_runtime_get_noresume(dev);
+
+ __pm_runtime_resume(dev, false);
+
+ pm_runtime_put_noidle(dev);
+ }
+
+ if (!dev->power.disable_depth++)
+ __pm_runtime_barrier(dev);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_disable);
+
+/**
+ * pm_runtime_enable - Enable run-time PM of a device.
+ * @dev: Device to handle.
+ */
+void pm_runtime_enable(struct device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.disable_depth > 0)
+ dev->power.disable_depth--;
+ else
+ dev_warn(dev, "Unbalanced %s!\n", __func__);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_enable);
+
+/**
+ * pm_runtime_init - Initialize run-time PM fields in given device object.
+ * @dev: Device object to initialize.
+ */
+void pm_runtime_init(struct device *dev)
+{
+ spin_lock_init(&dev->power.lock);
+
+ dev->power.runtime_status = RPM_SUSPENDED;
+ dev->power.idle_notification = false;
+
+ dev->power.disable_depth = 1;
+ atomic_set(&dev->power.usage_count, 0);
+
+ dev->power.runtime_error = 0;
+
+ atomic_set(&dev->power.child_count, 0);
+ pm_suspend_ignore_children(dev, false);
+
+ dev->power.request_pending = false;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.deferred_resume = false;
+ INIT_WORK(&dev->power.work, pm_runtime_work);
+
+ dev->power.timer_expires = 0;
+ setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
+ (unsigned long)dev);
+
+ init_waitqueue_head(&dev->power.wait_queue);
+}
+
+/**
+ * pm_runtime_remove - Prepare for removing a device from device hierarchy.
+ * @dev: Device object being removed from device hierarchy.
+ */
+void pm_runtime_remove(struct device *dev)
+{
+ __pm_runtime_disable(dev, false);
+
+ /* Change the status back to 'suspended' to match the initial status. */
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ pm_runtime_set_suspended(dev);
+}