aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-12 22:43:25 -0800
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-12 22:43:25 -0800
commitd9bc125caf592b7d081021f32ce5b717efdf70c8 (patch)
tree263b7066ba22ddce21db610c0300f6eaac6f2064 /lib
parent43d78ef2ba5bec26d0315859e8324bfc0be23766 (diff)
parentec2f9d1331f658433411c58077871e1eef4ee1b4 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts: net/sunrpc/auth_gss/gss_krb5_crypto.c net/sunrpc/auth_gss/gss_spkm3_token.c net/sunrpc/clnt.c Merge with mainline and fix conflicts.
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bitmap.c8
-rw-r--r--lib/bust_spinlocks.c12
-rw-r--r--lib/cmdline.c8
-rw-r--r--lib/devres.c300
-rw-r--r--lib/idr.c4
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/kobject.c78
-rw-r--r--lib/sha1.c9
-rw-r--r--lib/sort.c2
-rw-r--r--lib/string.c8
-rw-r--r--lib/swiotlb.c298
-rw-r--r--lib/textsearch.c2
-rw-r--r--lib/vsprintf.c15
16 files changed, 616 insertions, 166 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 9b03581cdec..38424991504 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -101,9 +101,14 @@ config TEXTSEARCH_FSM
config PLIST
boolean
-config IOMAP_COPY
+config HAS_IOMEM
boolean
- depends on !UML
+ depends on !NO_IOMEM
+ default y
+
+config HAS_IOPORT
+ boolean
+ depends on HAS_IOMEM && !NO_IOPORT
default y
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5c2681875b9..63f04c15e6f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -77,6 +77,15 @@ config DEBUG_KERNEL
Say Y here if you are developing drivers or trying to debug and
identify kernel problems.
+config DEBUG_SHIRQ
+ bool "Debug shared IRQ handlers"
+ depends on DEBUG_KERNEL && GENERIC_HARDIRQS
+ help
+ Enable this to generate a spurious interrupt as soon as a shared
+ interrupt handler is registered, and just before one is deregistered.
+ Drivers ought to be able to handle interrupts coming in at those
+ points; some don't and need to be caught.
+
config LOG_BUF_SHIFT
int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
range 12 21
@@ -181,19 +190,11 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
-config DEBUG_RWSEMS
- bool "RW-sem debugging: basic checks"
- depends on DEBUG_KERNEL
- help
- This feature allows read-write semaphore semantics violations to
- be detected and reported.
-
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
- select DEBUG_RWSEMS
select LOCKDEP
help
This feature will check whether any held lock (spinlock, rwlock,
@@ -209,7 +210,6 @@ config PROVE_LOCKING
select LOCKDEP
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
- select DEBUG_RWSEMS
select DEBUG_LOCK_ALLOC
default n
help
diff --git a/lib/Makefile b/lib/Makefile
index 77b4bad7d44..992a39ef9ff 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -3,7 +3,7 @@
#
lib-y := ctype.o string.o vsprintf.o cmdline.o \
- bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
+ rbtree.o radix-tree.o dump_stack.o \
idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o
@@ -12,14 +12,15 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
-obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o
+obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
endif
-obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o
+obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
@@ -41,7 +42,6 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
-obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 037fa9aa2ed..ee6e58fce8f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -95,7 +95,7 @@ void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
}
EXPORT_SYMBOL(__bitmap_complement);
-/*
+/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst - destination bitmap
* @src - source bitmap
@@ -139,7 +139,7 @@ void __bitmap_shift_right(unsigned long *dst,
EXPORT_SYMBOL(__bitmap_shift_right);
-/*
+/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst - destination bitmap
* @src - source bitmap
@@ -529,7 +529,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
}
EXPORT_SYMBOL(bitmap_parselist);
-/*
+/**
* bitmap_pos_to_ord(buf, pos, bits)
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @bits)
@@ -804,7 +804,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
* @pos: beginning of bit region to release
* @order: region size (log base 2 of number of bits) to release
*
- * This is the complement to __bitmap_find_free_region and releases
+ * This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*
* No return value.
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index a2055bc3ef6..accb3565816 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -14,24 +14,16 @@
#include <linux/vt_kern.h>
-void bust_spinlocks(int yes)
+void __attribute__((weak)) bust_spinlocks(int yes)
{
if (yes) {
oops_in_progress = 1;
} else {
- int loglevel_save = console_loglevel;
#ifdef CONFIG_VT
unblank_screen();
#endif
oops_in_progress = 0;
- /*
- * OK, the message is on the console. Now we call printk()
- * without oops_in_progress set so that printk() will give klogd
- * and the blanked console a poke. Hold onto your hats...
- */
- console_loglevel = 15; /* NMI oopser may have shut the console up */
- printk(" ");
- console_loglevel = loglevel_save;
+ wake_up_klogd();
}
}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8a5b5303bd4..f596c08d213 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -43,10 +43,10 @@ static int get_range(char **str, int *pint)
* comma as well.
*
* Return values:
- * 0 : no int in string
- * 1 : int found, no subsequent comma
- * 2 : int found including a subsequent comma
- * 3 : hyphen found to denote a range
+ * 0 - no int in string
+ * 1 - int found, no subsequent comma
+ * 2 - int found including a subsequent comma
+ * 3 - hyphen found to denote a range
*/
int get_option (char **str, int *pint)
diff --git a/lib/devres.c b/lib/devres.c
new file mode 100644
index 00000000000..2a668dd7cac
--- /dev/null
+++ b/lib/devres.c
@@ -0,0 +1,300 @@
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+static void devm_ioremap_release(struct device *dev, void *res)
+{
+ iounmap(*(void __iomem **)res);
+}
+
+static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap);
+
+/**
+ * devm_ioremap_nocache - Managed ioremap_nocache()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap_nocache(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap_nocache(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap_nocache);
+
+/**
+ * devm_iounmap - Managed iounmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
+ */
+void devm_iounmap(struct device *dev, void __iomem *addr)
+{
+ iounmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+ (void *)addr));
+}
+EXPORT_SYMBOL(devm_iounmap);
+
+#ifdef CONFIG_HAS_IOPORT
+/*
+ * Generic iomap devres
+ */
+static void devm_ioport_map_release(struct device *dev, void *res)
+{
+ ioport_unmap(*(void __iomem **)res);
+}
+
+static int devm_ioport_map_match(struct device *dev, void *res,
+ void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioport_map - Managed ioport_map()
+ * @dev: Generic device to map ioport for
+ * @port: Port to map
+ * @nr: Number of ports to map
+ *
+ * Managed ioport_map(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+ unsigned int nr)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioport_map(port, nr);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioport_map);
+
+/**
+ * devm_ioport_unmap - Managed ioport_unmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed ioport_unmap(). @addr must have been mapped using
+ * devm_ioport_map().
+ */
+void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+ devm_ioport_map_match, (void *)addr));
+}
+EXPORT_SYMBOL(devm_ioport_unmap);
+
+#ifdef CONFIG_PCI
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succed once
+ * allocated.
+ */
+void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_region;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_iomap;
+ }
+
+ return 0;
+
+ err_iomap:
+ pcim_iounmap(pdev, iomap[i]);
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+#endif
+#endif
diff --git a/lib/idr.c b/lib/idr.c
index 71853531d3b..305117ca2d4 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -329,8 +329,8 @@ static void sub_remove(struct idr *idp, int shift, int id)
/**
* idr_remove - remove the given id and free it's slot
- * idp: idr handle
- * id: uniqueue key
+ * @idp: idr handle
+ * @id: unique key
*/
void idr_remove(struct idr *idp, int id)
{
diff --git a/lib/iomap.c b/lib/iomap.c
index d6ccdd85df5..4d43f37c015 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -4,8 +4,9 @@
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
+#include <linux/io.h>
+
#include <linux/module.h>
-#include <asm/io.h>
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
diff --git a/lib/kobject.c b/lib/kobject.c
index 7ce6dc138e9..2782f49e906 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -44,11 +44,11 @@ static int populate_dir(struct kobject * kobj)
return error;
}
-static int create_dir(struct kobject * kobj)
+static int create_dir(struct kobject * kobj, struct dentry *shadow_parent)
{
int error = 0;
if (kobject_name(kobj)) {
- error = sysfs_create_dir(kobj);
+ error = sysfs_create_dir(kobj, shadow_parent);
if (!error) {
if ((error = populate_dir(kobj)))
sysfs_remove_dir(kobj);
@@ -97,11 +97,12 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
}
/**
- * kobject_get_path - generate and return the path associated with a given kobj
- * and kset pair. The result must be freed by the caller with kfree().
+ * kobject_get_path - generate and return the path associated with a given kobj and kset pair.
*
* @kobj: kobject in question, with which to build the path
* @gfp_mask: the allocation type used to allocate the path
+ *
+ * The result must be freed by the caller with kfree().
*/
char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
{
@@ -126,6 +127,8 @@ EXPORT_SYMBOL_GPL(kobject_get_path);
*/
void kobject_init(struct kobject * kobj)
{
+ if (!kobj)
+ return;
kref_init(&kobj->kref);
INIT_LIST_HEAD(&kobj->entry);
init_waitqueue_head(&kobj->poll);
@@ -156,9 +159,10 @@ static void unlink(struct kobject * kobj)
/**
* kobject_add - add an object to the hierarchy.
* @kobj: object.
+ * @shadow_parent: sysfs directory to add to.
*/
-int kobject_add(struct kobject * kobj)
+int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
{
int error = 0;
struct kobject * parent;
@@ -189,12 +193,11 @@ int kobject_add(struct kobject * kobj)
}
kobj->parent = parent;
- error = create_dir(kobj);
+ error = create_dir(kobj, shadow_parent);
if (error) {
/* unlink does the kobject_put() for us */
unlink(kobj);
- if (parent)
- kobject_put(parent);
+ kobject_put(parent);
/* be noisy on error issues */
if (error == -EEXIST)
@@ -211,6 +214,15 @@ int kobject_add(struct kobject * kobj)
return error;
}
+/**
+ * kobject_add - add an object to the hierarchy.
+ * @kobj: object.
+ */
+int kobject_add(struct kobject * kobj)
+{
+ return kobject_shadow_add(kobj, NULL);
+}
+
/**
* kobject_register - initialize and add an object.
@@ -303,7 +315,29 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
- error = sysfs_rename_dir(kobj, new_name);
+ if (!kobj->parent)
+ return -EINVAL;
+ error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
+ kobject_put(kobj);
+
+ return error;
+}
+
+/**
+ * kobject_rename - change the name of an object
+ * @kobj: object in question.
+ * @new_name: object's new name
+ */
+
+int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent,
+ const char *new_name)
+{
+ int error = 0;
+
+ kobj = kobject_get(kobj);
+ if (!kobj)
+ return -EINVAL;
+ error = sysfs_rename_dir(kobj, new_parent, new_name);
kobject_put(kobj);
return error;
@@ -312,7 +346,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
/**
* kobject_move - move object to another parent
* @kobj: object in question.
- * @new_parent: object's new parent
+ * @new_parent: object's new parent (can be NULL)
*/
int kobject_move(struct kobject *kobj, struct kobject *new_parent)
@@ -328,8 +362,8 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
return -EINVAL;
new_parent = kobject_get(new_parent);
if (!new_parent) {
- error = -EINVAL;
- goto out;
+ if (kobj->kset)
+ new_parent = kobject_get(&kobj->kset->kobj);
}
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
@@ -366,6 +400,8 @@ out:
void kobject_del(struct kobject * kobj)
{
+ if (!kobj)
+ return;
sysfs_remove_dir(kobj);
unlink(kobj);
}
@@ -377,6 +413,8 @@ void kobject_del(struct kobject * kobj)
void kobject_unregister(struct kobject * kobj)
{
+ if (!kobj)
+ return;
pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
kobject_uevent(kobj, KOBJ_REMOVE);
kobject_del(kobj);
@@ -414,8 +452,7 @@ void kobject_cleanup(struct kobject * kobj)
t->release(kobj);
if (s)
kset_put(s);
- if (parent)
- kobject_put(parent);
+ kobject_put(parent);
}
static void kobject_release(struct kref *kref)
@@ -523,6 +560,8 @@ int kset_add(struct kset * k)
int kset_register(struct kset * k)
{
+ if (!k)
+ return -EINVAL;
kset_init(k);
return kset_add(k);
}
@@ -535,6 +574,8 @@ int kset_register(struct kset * k)
void kset_unregister(struct kset * k)
{
+ if (!k)
+ return;
kobject_unregister(&k->kobj);
}
@@ -586,6 +627,9 @@ int subsystem_register(struct subsystem * s)
{
int error;
+ if (!s)
+ return -EINVAL;
+
subsystem_init(s);
pr_debug("subsystem %s: registering\n",s->kset.kobj.name);
@@ -598,6 +642,8 @@ int subsystem_register(struct subsystem * s)
void subsystem_unregister(struct subsystem * s)
{
+ if (!s)
+ return;
pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name);
kset_unregister(&s->kset);
}
@@ -612,6 +658,10 @@ void subsystem_unregister(struct subsystem * s)
int subsys_create_file(struct subsystem * s, struct subsys_attribute * a)
{
int error = 0;
+
+ if (!s || !a)
+ return -EINVAL;
+
if (subsys_get(s)) {
error = sysfs_create_file(&s->kset.kobj,&a->attr);
subsys_put(s);
diff --git a/lib/sha1.c b/lib/sha1.c
index 1cdabe3065f..4c45fd50e91 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -20,8 +20,8 @@
#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
-/*
- * sha_transform: single block SHA1 transform
+/**
+ * sha_transform - single block SHA1 transform
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
@@ -80,9 +80,8 @@ void sha_transform(__u32 *digest, const char *in, __u32 *W)
}
EXPORT_SYMBOL(sha_transform);
-/*
- * sha_init: initialize the vectors for a SHA1 digest
- *
+/**
+ * sha_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
void sha_init(__u32 *buf)
diff --git a/lib/sort.c b/lib/sort.c
index 488788b341c..961567894d1 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -27,7 +27,7 @@ static void generic_swap(void *a, void *b, int size)
} while (--size > 0);
}
-/*
+/**
* sort - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
diff --git a/lib/string.c b/lib/string.c
index a485d75962a..bab440fb0df 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(strcat);
* @src: The string to append to it
* @count: The maximum numbers of bytes to copy
*
- * Note that in contrast to strncpy, strncat ensures the result is
+ * Note that in contrast to strncpy(), strncat() ensures the result is
* terminated.
*/
char *strncat(char *dest, const char *src, size_t count)
@@ -366,8 +366,7 @@ EXPORT_SYMBOL(strnlen);
#ifndef __HAVE_ARCH_STRSPN
/**
- * strspn - Calculate the length of the initial substring of @s which only
- * contain letters in @accept
+ * strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
* @s: The string to be searched
* @accept: The string to search for
*/
@@ -394,8 +393,7 @@ EXPORT_SYMBOL(strspn);
#ifndef __HAVE_ARCH_STRCSPN
/**
- * strcspn - Calculate the length of the initial substring of @s which does
- * not contain letters in @reject
+ * strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
* @s: The string to be searched
* @reject: The string to avoid
*/
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eef..623a68af8b1 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
/*
* Dynamic DMA mapping support.
*
- * This implementation is for IA-64 and EM64T platforms that do not support
+ * This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
+#include <asm/swiotlb.h>
#include <linux/init.h>
#include <linux/bootmem.h>
@@ -35,8 +36,10 @@
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
+#ifndef SG_ENT_VIRT_ADDRESS
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
-#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
+#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
+#endif
/*
* Maximum allowable number of contiguous slabs to map,
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
-static unsigned char **io_tlb_orig_addr;
+#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
+typedef char *io_tlb_addr_t;
+#define swiotlb_orig_addr_null(buffer) (!(buffer))
+#define ptr_to_io_tlb_addr(ptr) (ptr)
+#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
+#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
+#endif
+static io_tlb_addr_t *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
+#ifdef SWIOTLB_EXTRA_VARIABLES
+SWIOTLB_EXTRA_VARIABLES;
+#endif
+
+#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
static int __init
setup_io_tlb_npages(char *str)
{
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
swiotlb_force = 1;
return 1;
}
+#endif
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
+#ifndef swiotlb_adjust_size
+#define swiotlb_adjust_size(size) ((void)0)
+#endif
+
+#ifndef swiotlb_adjust_seg
+#define swiotlb_adjust_seg(start, size) ((void)0)
+#endif
+
+#ifndef swiotlb_print_info
+#define swiotlb_print_info(bytes) \
+ printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
+ "0x%lx\n", bytes >> 20, \
+ virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
+#endif
+
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
-void
-swiotlb_init_with_default_size (size_t default_size)
+void __init
+swiotlb_init_with_default_size(size_t default_size)
{
- unsigned long i;
+ unsigned long i, bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
+ swiotlb_adjust_size(io_tlb_nslabs);
+ swiotlb_adjust_size(io_tlb_overflow);
+
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_start = alloc_bootmem_low_pages(bytes);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+ io_tlb_end = io_tlb_start + bytes;
/*
* Allocate and initialize the free list array. This array is used
@@ -153,34 +188,45 @@ swiotlb_init_with_default_size (size_t default_size)
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
- for (i = 0; i < io_tlb_nslabs; i++)
+ for (i = 0; i < io_tlb_nslabs; i++) {
+ if ( !(i % IO_TLB_SEGSIZE) )
+ swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
+ IO_TLB_SEGSIZE << IO_TLB_SHIFT);
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ }
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
- printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
- virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+ if (!io_tlb_overflow_buffer)
+ panic("Cannot allocate SWIOTLB overflow buffer!\n");
+ swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
+
+ swiotlb_print_info(bytes);
}
+#ifndef __swiotlb_init_with_default_size
+#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
+#endif
-void
-swiotlb_init (void)
+void __init
+swiotlb_init(void)
{
- swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+ __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
+#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
-swiotlb_late_init_with_default_size (size_t default_size)
+swiotlb_late_init_with_default_size(size_t default_size)
{
- unsigned long i, req_nslabs = io_tlb_nslabs;
+ unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
unsigned int order;
if (!io_tlb_nslabs) {
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_start)
goto cleanup1;
- if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
+ if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
}
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
- memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_end = io_tlb_start + bytes;
+ memset(io_tlb_start, 0, bytes);
/*
* Allocate and initialize the free list array. This array is used
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(char *)));
+ io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
+ get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
if (!io_tlb_orig_addr)
goto cleanup3;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
+ memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
/*
* Get the overflow emergency buffer
@@ -242,30 +290,30 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_overflow_buffer)
goto cleanup4;
- printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
- "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
- virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+ swiotlb_print_info(bytes);
return 0;
cleanup4:
- free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
- sizeof(char *)));
+ free_pages((unsigned long)io_tlb_orig_addr,
+ get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
io_tlb_orig_addr = NULL;
cleanup3:
- free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
- sizeof(int)));
+ free_pages((unsigned long)io_tlb_list,
+ get_order(io_tlb_nslabs * sizeof(int)));
io_tlb_list = NULL;
- io_tlb_end = NULL;
cleanup2:
+ io_tlb_end = NULL;
free_pages((unsigned long)io_tlb_start, order);
io_tlb_start = NULL;
cleanup1:
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
+#endif
-static inline int
+#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
+static int
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
dma_addr_t mask = 0xffffffff;
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
return (addr & ~mask) != 0;
}
+static inline int range_needs_mapping(const void *ptr, size_t size)
+{
+ return swiotlb_force;
+}
+
+static inline int order_needs_mapping(unsigned int order)
+{
+ return 0;
+}
+#endif
+
+static void
+__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
+{
+#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
+ if (dir == DMA_TO_DEVICE)
+ memcpy(dma_addr, buffer, size);
+ else
+ memcpy(buffer, dma_addr, size);
+#else
+ __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
+#endif
+}
+
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
-map_single(struct device *hwdev, char *buffer, size_t size, int dir)
+map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
*/
io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- memcpy(dma_addr, buffer, size);
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
return dma_addr;
}
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
+ io_tlb_addr_t buffer = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
- if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if (!swiotlb_orig_addr_null(buffer)
+ && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
/*
* bounce... copy the data back into the original buffer * and
* delete the bounce buffer.
*/
- memcpy(buffer, dma_addr, size);
+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
int dir, int target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
+ io_tlb_addr_t buffer = io_tlb_orig_addr[index];
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- memcpy(buffer, dma_addr, size);
+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- memcpy(dma_addr, buffer, size);
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
}
}
+#ifdef SWIOTLB_ARCH_NEED_ALLOC
+
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- unsigned long dev_addr;
+ dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
flags |= GFP_DMA;
- ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
+ if (!order_needs_mapping(order))
+ ret = (void *)__get_free_pages(flags, order);
+ else
+ ret = NULL;
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (swiotlb_dma_mapping_error(handle))
return NULL;
- ret = phys_to_virt(handle);
+ ret = bus_to_virt(handle);
}
memset(ret, 0, size);
- dev_addr = virt_to_phys(ret);
+ dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
- (unsigned long long)*hwdev->dma_mask, dev_addr);
+ printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)*hwdev->dma_mask,
+ (unsigned long long)dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device");
}
*dma_handle = dev_addr;
return ret;
}
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
}
+EXPORT_SYMBOL(swiotlb_free_coherent);
+
+#endif
static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
- printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
+ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
- unsigned long dev_addr = virt_to_phys(ptr);
+ dma_addr_t dev_addr = virt_to_bus(ptr);
void *map;
BUG_ON(dir == DMA_NONE);
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!range_needs_mapping(ptr, size)
+ && !address_needs_mapping(hwdev, dev_addr))
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
- map = map_single(hwdev, ptr, size, dir);
+ map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer;
}
- dev_addr = virt_to_phys(map);
+ dev_addr = virt_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -558,25 +642,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
}
/*
- * Since DMA is i-cache coherent, any (complete) pages that were written via
- * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
- * flush them when they get mapped into an executable vm-area.
- */
-static void
-mark_clean(void *addr, size_t size)
-{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
-}
-
-/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous swiotlb_map_single call. All
* other usages are undefined.
@@ -588,13 +653,13 @@ void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir)
{
- char *dma_addr = phys_to_virt(dev_addr);
+ char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ dma_mark_clean(dma_addr, size);
}
/*
@@ -607,17 +672,17 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
* address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
-static inline void
+static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
- char *dma_addr = phys_to_virt(dev_addr);
+ char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ dma_mark_clean(dma_addr, size);
}
void
@@ -637,18 +702,18 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
/*
* Same as above, but for a sub-range of the mapping.
*/
-static inline void
+static void
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
int dir, int target)
{
- char *dma_addr = phys_to_virt(dev_addr) + offset;
+ char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ dma_mark_clean(dma_addr, size);
}
void
@@ -687,18 +752,16 @@ int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
- void *addr;
- unsigned long dev_addr;
+ dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
- dev_addr = virt_to_phys(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
- void *map = map_single(hwdev, addr, sg->length, dir);
- sg->dma_address = virt_to_bus(map);
+ dev_addr = SG_ENT_PHYS_ADDRESS(sg);
+ if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
+ || address_needs_mapping(hwdev, dev_addr)) {
+ void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
sg[0].dma_length = 0;
return 0;
}
+ sg->dma_address = virt_to_bus(map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
+ unmap_single(hwdev, bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+ dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
/*
@@ -740,7 +805,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* and usage.
*/
-static inline void
+static void
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir, int target)
{
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address,
+ sync_single(hwdev, bus_to_virt(sg->dma_address),
sg->dma_length, dir, target);
+ else if (dir == DMA_FROM_DEVICE)
+ dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
void
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
+#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
+
+dma_addr_t
+swiotlb_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_addr_t dev_addr;
+ char *map;
+
+ dev_addr = page_to_bus(page) + offset;
+ if (address_needs_mapping(hwdev, dev_addr)) {
+ map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
+ if (!map) {
+ swiotlb_full(hwdev, size, direction, 1);
+ map = io_tlb_overflow_buffer;
+ }
+ dev_addr = virt_to_bus(map);
+ }
+
+ return dev_addr;
+}
+
+void
+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ char *dma_addr = bus_to_virt(dev_addr);
+
+ BUG_ON(direction == DMA_NONE);
+ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ unmap_single(hwdev, dma_addr, size, direction);
+ else if (direction == DMA_FROM_DEVICE)
+ dma_mark_clean(dma_addr, size);
+}
+
+#endif
+
int
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{
- return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
+ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
/*
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
+#ifndef __swiotlb_dma_supported
+#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
+#endif
int
-swiotlb_dma_supported (struct device *hwdev, u64 mask)
+swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return (virt_to_phys (io_tlb_end) - 1) <= mask;
+ return __swiotlb_dma_supported(hwdev, mask);
}
EXPORT_SYMBOL(swiotlb_init);
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
-EXPORT_SYMBOL(swiotlb_free_coherent);
EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 98bcadc0118..9e2a002c5b5 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -218,7 +218,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
* Call textsearch_next() to retrieve subsequent matches.
*
* Returns the position of first occurrence of the pattern or
- * UINT_MAX if no occurrence was found.
+ * %UINT_MAX if no occurrence was found.
*/
unsigned int textsearch_find_continuous(struct ts_config *conf,
struct ts_state *state,
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index bed7229378f..b025864d2e4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -247,12 +247,12 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
- * (not including the trailing '\0'), use vscnprintf. If the
+ * (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*
* Call this function if you are already dealing with a va_list.
- * You probably want snprintf instead.
+ * You probably want snprintf() instead.
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
@@ -509,7 +509,7 @@ EXPORT_SYMBOL(vsnprintf);
* returns 0.
*
* Call this function if you are already dealing with a va_list.
- * You probably want scnprintf instead.
+ * You probably want scnprintf() instead.
*/
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
@@ -554,8 +554,7 @@ EXPORT_SYMBOL(snprintf);
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
- * the trailing '\0'. If @size is <= 0 the function returns 0. If the return is
- * greater than or equal to @size, the resulting string is truncated.
+ * the trailing '\0'. If @size is <= 0 the function returns 0.
*/
int scnprintf(char * buf, size_t size, const char *fmt, ...)
@@ -577,11 +576,11 @@ EXPORT_SYMBOL(scnprintf);
* @args: Arguments for the format string
*
* The function returns the number of characters written
- * into @buf. Use vsnprintf or vscnprintf in order to avoid
+ * into @buf. Use vsnprintf() or vscnprintf() in order to avoid
* buffer overflows.
*
* Call this function if you are already dealing with a va_list.
- * You probably want sprintf instead.
+ * You probably want sprintf() instead.
*/
int vsprintf(char *buf, const char *fmt, va_list args)
{
@@ -597,7 +596,7 @@ EXPORT_SYMBOL(vsprintf);
* @...: Arguments for the format string
*
* The function returns the number of characters written
- * into @buf. Use snprintf or scnprintf in order to avoid
+ * into @buf. Use snprintf() or scnprintf() in order to avoid
* buffer overflows.
*/
int sprintf(char * buf, const char *fmt, ...)