aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig19
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Makefile13
-rw-r--r--lib/bitmap.c3
-rw-r--r--lib/genalloc.c188
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kernel_lock.c55
-rw-r--r--lib/klist.c265
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/sha1.c2
-rw-r--r--lib/smp_processor_id.c55
-rw-r--r--lib/textsearch.c317
-rw-r--r--lib/ts_fsm.c338
-rw-r--r--lib/ts_kmp.c145
15 files changed, 1345 insertions, 68 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index eeb45225248..eeb429a5215 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -40,6 +40,12 @@ config ZLIB_DEFLATE
tristate
#
+# Generic allocator support is selected if needed
+#
+config GENERIC_ALLOCATOR
+ boolean
+
+#
# reed solomon support is select'ed if needed
#
config REED_SOLOMON
@@ -57,5 +63,16 @@ config REED_SOLOMON_ENC16
config REED_SOLOMON_DEC16
boolean
-endmenu
+#
+# Textsearch support is select'ed if needed
+#
+config TEXTSEARCH
+ boolean
+
+config TEXTSEARCH_KMP
+ tristate
+config TEXTSEARCH_FSM
+ tristate
+
+endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ac23847ce0e..0c421295e61 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -151,7 +151,8 @@ config DEBUG_FS
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
- depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV)
+ depends on DEBUG_KERNEL && ((X86 && !X86_64) || CRIS || M68K || M68KNOMMU || FRV || UML)
+ default y if DEBUG_INFO && UML
help
If you say Y here the resulting kernel image will be slightly larger
and slower, but it will give very useful debugging information.
diff --git a/lib/Makefile b/lib/Makefile
index 7c70db79c0e..beed1585294 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,9 +4,10 @@
lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
- kobject.o kref.o idr.o div64.o int_sqrt.o \
- bitmap.o extable.o kobject_uevent.o prio_tree.o sha1.o \
- halfmd4.o
+ idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
+ sha1.o halfmd4.o
+
+lib-y += kobject.o kref.o kobject_uevent.o klist.o
obj-y += sort.o parser.o
@@ -19,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
@@ -28,11 +30,16 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
+obj-$(CONFIG_TEXTSEARCH) += textsearch.o
+obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
+obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index d1388a5ce89..fb9371fdd44 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -289,7 +289,6 @@ EXPORT_SYMBOL(__bitmap_weight);
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
-#define roundup_power2(val,modulus) (((val) + (modulus) - 1) & ~((modulus) - 1))
#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
@@ -316,7 +315,7 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
if (chunksz == 0)
chunksz = CHUNKSZ;
- i = roundup_power2(nmaskbits, CHUNKSZ) - CHUNKSZ;
+ i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
diff --git a/lib/genalloc.c b/lib/genalloc.c
new file mode 100644
index 00000000000..d6d30d2e716
--- /dev/null
+++ b/lib/genalloc.c
@@ -0,0 +1,188 @@
+/*
+ * Basic general purpose allocator for managing special purpose memory
+ * not managed by the regular kmalloc/kfree interface.
+ * Uses for this includes on-device special memory, uncached memory
+ * etc.
+ *
+ * This code is based on the buddy allocator found in the sym53c8xx_2
+ * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
+ * and adapted for general purpose use.
+ *
+ * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+
+#include <asm/page.h>
+
+
+struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
+ unsigned long (*fp)(struct gen_pool *),
+ unsigned long data)
+{
+ struct gen_pool *poolp;
+ unsigned long tmp;
+ int i;
+
+ /*
+ * This is really an arbitrary limit, +10 is enough for
+ * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
+ * this can be increased without problems.
+ */
+ if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
+ ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
+ return NULL;
+
+ if (!max_chunk_shift)
+ max_chunk_shift = PAGE_SHIFT;
+
+ poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
+ if (!poolp)
+ return NULL;
+ memset(poolp, 0, sizeof(struct gen_pool));
+ poolp->h = kmalloc(sizeof(struct gen_pool_link) *
+ (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
+ GFP_KERNEL);
+ if (!poolp->h) {
+ printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
+ kfree(poolp);
+ return NULL;
+ }
+ memset(poolp->h, 0, sizeof(struct gen_pool_link) *
+ (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
+
+ spin_lock_init(&poolp->lock);
+ poolp->get_new_chunk = fp;
+ poolp->max_chunk_shift = max_chunk_shift;
+ poolp->private = data;
+
+ for (i = 0; i < nr_chunks; i++) {
+ tmp = poolp->get_new_chunk(poolp);
+ printk(KERN_INFO "allocated %lx\n", tmp);
+ if (!tmp)
+ break;
+ gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
+ }
+
+ return poolp;
+}
+EXPORT_SYMBOL(gen_pool_create);
+
+
+/*
+ * Simple power of two buddy-like generic allocator.
+ * Provides naturally aligned memory chunks.
+ */
+unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+{
+ int j, i, s, max_chunk_size;
+ unsigned long a, flags;
+ struct gen_pool_link *h = poolp->h;
+
+ max_chunk_size = 1 << poolp->max_chunk_shift;
+
+ if (size > max_chunk_size)
+ return 0;
+
+ i = 0;
+
+ size = max(size, 1 << ALLOC_MIN_SHIFT);
+ s = roundup_pow_of_two(size);
+
+ j = i;
+
+ spin_lock_irqsave(&poolp->lock, flags);
+ while (!h[j].next) {
+ if (s == max_chunk_size) {
+ struct gen_pool_link *ptr;
+ spin_unlock_irqrestore(&poolp->lock, flags);
+ ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
+ spin_lock_irqsave(&poolp->lock, flags);
+ h[j].next = ptr;
+ if (h[j].next)
+ h[j].next->next = NULL;
+ break;
+ }
+ j++;
+ s <<= 1;
+ }
+ a = (unsigned long) h[j].next;
+ if (a) {
+ h[j].next = h[j].next->next;
+ /*
+ * This should be split into a seperate function doing
+ * the chunk split in order to support custom
+ * handling memory not physically accessible by host
+ */
+ while (j > i) {
+ j -= 1;
+ s >>= 1;
+ h[j].next = (struct gen_pool_link *) (a + s);
+ h[j].next->next = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&poolp->lock, flags);
+ return a;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+
+/*
+ * Counter-part of the generic allocator.
+ */
+void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+{
+ struct gen_pool_link *q;
+ struct gen_pool_link *h = poolp->h;
+ unsigned long a, b, flags;
+ int i, s, max_chunk_size;
+
+ max_chunk_size = 1 << poolp->max_chunk_shift;
+
+ if (size > max_chunk_size)
+ return;
+
+ i = 0;
+
+ size = max(size, 1 << ALLOC_MIN_SHIFT);
+ s = roundup_pow_of_two(size);
+
+ a = ptr;
+
+ spin_lock_irqsave(&poolp->lock, flags);
+ while (1) {
+ if (s == max_chunk_size) {
+ ((struct gen_pool_link *)a)->next = h[i].next;
+ h[i].next = (struct gen_pool_link *)a;
+ break;
+ }
+ b = a ^ s;
+ q = &h[i];
+
+ while (q->next && q->next != (struct gen_pool_link *)b)
+ q = q->next;
+
+ if (!q->next) {
+ ((struct gen_pool_link *)a)->next = h[i].next;
+ h[i].next = (struct gen_pool_link *)a;
+ break;
+ }
+ q->next = q->next->next;
+ a = a & b;
+ s <<= 1;
+ i++;
+ }
+ spin_unlock_irqrestore(&poolp->lock, flags);
+}
+EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/idr.c b/lib/idr.c
index 81fc430602e..c5be889de44 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -175,7 +175,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) {
+ while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count)
continue;
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 99b0ae3d51d..bd2bc5d887b 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,61 +9,6 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
-#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
- defined(CONFIG_DEBUG_PREEMPT)
-
-/*
- * Debugging check.
- */
-unsigned int smp_processor_id(void)
-{
- unsigned long preempt_count = preempt_count();
- int this_cpu = __smp_processor_id();
- cpumask_t this_mask;
-
- if (likely(preempt_count))
- goto out;
-
- if (irqs_disabled())
- goto out;
-
- /*
- * Kernel threads bound to a single CPU can safely use
- * smp_processor_id():
- */
- this_mask = cpumask_of_cpu(this_cpu);
-
- if (cpus_equal(current->cpus_allowed, this_mask))
- goto out;
-
- /*
- * It is valid to assume CPU-locality during early bootup:
- */
- if (system_state != SYSTEM_RUNNING)
- goto out;
-
- /*
- * Avoid recursion:
- */
- preempt_disable();
-
- if (!printk_ratelimit())
- goto out_enable;
-
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
- dump_stack();
-
-out_enable:
- preempt_enable_no_resched();
-out:
- return this_cpu;
-}
-
-EXPORT_SYMBOL(smp_processor_id);
-
-#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
-
#ifdef CONFIG_PREEMPT_BKL
/*
* The 'big kernel semaphore'
diff --git a/lib/klist.c b/lib/klist.c
new file mode 100644
index 00000000000..738ab810160
--- /dev/null
+++ b/lib/klist.c
@@ -0,0 +1,265 @@
+/*
+ * klist.c - Routines for manipulating klists.
+ *
+ *
+ * This klist interface provides a couple of structures that wrap around
+ * struct list_head to provide explicit list "head" (struct klist) and
+ * list "node" (struct klist_node) objects. For struct klist, a spinlock
+ * is included that protects access to the actual list itself. struct
+ * klist_node provides a pointer to the klist that owns it and a kref
+ * reference count that indicates the number of current users of that node
+ * in the list.
+ *
+ * The entire point is to provide an interface for iterating over a list
+ * that is safe and allows for modification of the list during the
+ * iteration (e.g. insertion and removal), including modification of the
+ * current node on the list.
+ *
+ * It works using a 3rd object type - struct klist_iter - that is declared
+ * and initialized before an iteration. klist_next() is used to acquire the
+ * next element in the list. It returns NULL if there are no more items.
+ * Internally, that routine takes the klist's lock, decrements the reference
+ * count of the previous klist_node and increments the count of the next
+ * klist_node. It then drops the lock and returns.
+ *
+ * There are primitives for adding and removing nodes to/from a klist.
+ * When deleting, klist_del() will simply decrement the reference count.
+ * Only when the count goes to 0 is the node removed from the list.
+ * klist_remove() will try to delete the node from the list and block
+ * until it is actually removed. This is useful for objects (like devices)
+ * that have been removed from the system and must be freed (but must wait
+ * until all accessors have finished).
+ *
+ * Copyright (C) 2005 Patrick Mochel
+ *
+ * This file is released under the GPL v2.
+ */
+
+#include <linux/klist.h>
+#include <linux/module.h>
+
+
+/**
+ * klist_init - Initialize a klist structure.
+ * @k: The klist we're initializing.
+ */
+
+void klist_init(struct klist * k)
+{
+ INIT_LIST_HEAD(&k->k_list);
+ spin_lock_init(&k->k_lock);
+}
+
+EXPORT_SYMBOL_GPL(klist_init);
+
+
+static void add_head(struct klist * k, struct klist_node * n)
+{
+ spin_lock(&k->k_lock);
+ list_add(&n->n_node, &k->k_list);
+ spin_unlock(&k->k_lock);
+}
+
+static void add_tail(struct klist * k, struct klist_node * n)
+{
+ spin_lock(&k->k_lock);
+ list_add_tail(&n->n_node, &k->k_list);
+ spin_unlock(&k->k_lock);
+}
+
+
+static void klist_node_init(struct klist * k, struct klist_node * n)
+{
+ INIT_LIST_HEAD(&n->n_node);
+ init_completion(&n->n_removed);
+ kref_init(&n->n_ref);
+ n->n_klist = k;
+}
+
+
+/**
+ * klist_add_head - Initialize a klist_node and add it to front.
+ * @k: klist it's going on.
+ * @n: node we're adding.
+ */
+
+void klist_add_head(struct klist * k, struct klist_node * n)
+{
+ klist_node_init(k, n);
+ add_head(k, n);
+}
+
+EXPORT_SYMBOL_GPL(klist_add_head);
+
+
+/**
+ * klist_add_tail - Initialize a klist_node and add it to back.
+ * @k: klist it's going on.
+ * @n: node we're adding.
+ */
+
+void klist_add_tail(struct klist * k, struct klist_node * n)
+{
+ klist_node_init(k, n);
+ add_tail(k, n);
+}
+
+EXPORT_SYMBOL_GPL(klist_add_tail);
+
+
+static void klist_release(struct kref * kref)
+{
+ struct klist_node * n = container_of(kref, struct klist_node, n_ref);
+ list_del(&n->n_node);
+ complete(&n->n_removed);
+ n->n_klist = NULL;
+}
+
+static int klist_dec_and_del(struct klist_node * n)
+{
+ return kref_put(&n->n_ref, klist_release);
+}
+
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+
+void klist_del(struct klist_node * n)
+{
+ struct klist * k = n->n_klist;
+
+ spin_lock(&k->k_lock);
+ klist_dec_and_del(n);
+ spin_unlock(&k->k_lock);
+}
+
+EXPORT_SYMBOL_GPL(klist_del);
+
+
+/**
+ * klist_remove - Decrement the refcount of node and wait for it to go away.
+ * @n: node we're removing.
+ */
+
+void klist_remove(struct klist_node * n)
+{
+ struct klist * k = n->n_klist;
+ spin_lock(&k->k_lock);
+ klist_dec_and_del(n);
+ spin_unlock(&k->k_lock);
+ wait_for_completion(&n->n_removed);
+}
+
+EXPORT_SYMBOL_GPL(klist_remove);
+
+
+/**
+ * klist_node_attached - Say whether a node is bound to a list or not.
+ * @n: Node that we're testing.
+ */
+
+int klist_node_attached(struct klist_node * n)
+{
+ return (n->n_klist != NULL);
+}
+
+EXPORT_SYMBOL_GPL(klist_node_attached);
+
+
+/**
+ * klist_iter_init_node - Initialize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter we're filling.
+ * @n: node to start with.
+ *
+ * Similar to klist_iter_init(), but starts the action off with @n,
+ * instead of with the list head.
+ */
+
+void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
+{
+ i->i_klist = k;
+ i->i_head = &k->k_list;
+ i->i_cur = n;
+}
+
+EXPORT_SYMBOL_GPL(klist_iter_init_node);
+
+
+/**
+ * klist_iter_init - Iniitalize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter structure we're filling.
+ *
+ * Similar to klist_iter_init_node(), but start with the list head.
+ */
+
+void klist_iter_init(struct klist * k, struct klist_iter * i)
+{
+ klist_iter_init_node(k, i, NULL);
+}
+
+EXPORT_SYMBOL_GPL(klist_iter_init);
+
+
+/**
+ * klist_iter_exit - Finish a list iteration.
+ * @i: Iterator structure.
+ *
+ * Must be called when done iterating over list, as it decrements the
+ * refcount of the current node. Necessary in case iteration exited before
+ * the end of the list was reached, and always good form.
+ */
+
+void klist_iter_exit(struct klist_iter * i)
+{
+ if (i->i_cur) {
+ klist_del(i->i_cur);
+ i->i_cur = NULL;
+ }
+}
+
+EXPORT_SYMBOL_GPL(klist_iter_exit);
+
+
+static struct klist_node * to_klist_node(struct list_head * n)
+{
+ return container_of(n, struct klist_node, n_node);
+}
+
+
+/**
+ * klist_next - Ante up next node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the next node, increment its reference
+ * count, drop the lock, and return that next node.
+ */
+
+struct klist_node * klist_next(struct klist_iter * i)
+{
+ struct list_head * next;
+ struct klist_node * knode = NULL;
+
+ spin_lock(&i->i_klist->k_lock);
+ if (i->i_cur) {
+ next = i->i_cur->n_node.next;
+ klist_dec_and_del(i->i_cur);
+ } else
+ next = i->i_head->next;
+
+ if (next != i->i_head) {
+ knode = to_klist_node(next);
+ kref_get(&knode->n_ref);
+ }
+ i->i_cur = knode;
+ spin_unlock(&i->i_klist->k_lock);
+ return knode;
+}
+
+EXPORT_SYMBOL_GPL(klist_next);
+
+
diff --git a/lib/kobject.c b/lib/kobject.c
index 94048826624..dd0917dd9fa 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(kobject_set_name);
* @new_name: object's new name
*/
-int kobject_rename(struct kobject * kobj, char *new_name)
+int kobject_rename(struct kobject * kobj, const char *new_name)
{
int error = 0;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2a4e7671eaf..8e49d21057e 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -197,7 +197,7 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
int i = 0;
int retval;
char *kobj_path = NULL;
- char *name = NULL;
+ const char *name = NULL;
char *action_string;
u64 seq;
struct kobject *top_kobj = kobj;
@@ -246,10 +246,10 @@ void kobject_hotplug(struct kobject *kobj, enum kobject_action action)
if (hotplug_ops->name)
name = hotplug_ops->name(kset, kobj);
if (name == NULL)
- name = kset->kobj.name;
+ name = kobject_name(&kset->kobj);
argv [0] = hotplug_path;
- argv [1] = name;
+ argv [1] = (char *)name; /* won't be changed but 'const' has to go */
argv [2] = NULL;
/* minimal command environment */
diff --git a/lib/sha1.c b/lib/sha1.c
index 2f7f1148dfd..1cdabe3065f 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -41,7 +41,7 @@ void sha_transform(__u32 *digest, const char *in, __u32 *W)
__u32 a, b, c, d, e, t, i;
for (i = 0; i < 16; i++)
- W[i] = be32_to_cpu(((const __u32 *)in)[i]);
+ W[i] = be32_to_cpu(((const __be32 *)in)[i]);
for (i = 0; i < 64; i++)
W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644
index 00000000000..42c08ef828c
--- /dev/null
+++ b/lib/smp_processor_id.c
@@ -0,0 +1,55 @@
+/*
+ * lib/smp_processor_id.c
+ *
+ * DEBUG_PREEMPT variant of smp_processor_id().
+ */
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+
+unsigned int debug_smp_processor_id(void)
+{
+ unsigned long preempt_count = preempt_count();
+ int this_cpu = raw_smp_processor_id();
+ cpumask_t this_mask;
+
+ if (likely(preempt_count))
+ goto out;
+
+ if (irqs_disabled())
+ goto out;
+
+ /*
+ * Kernel threads bound to a single CPU can safely use
+ * smp_processor_id():
+ */
+ this_mask = cpumask_of_cpu(this_cpu);
+
+ if (cpus_equal(current->cpus_allowed, this_mask))
+ goto out;
+
+ /*
+ * It is valid to assume CPU-locality during early bootup:
+ */
+ if (system_state != SYSTEM_RUNNING)
+ goto out;
+
+ /*
+ * Avoid recursion:
+ */
+ preempt_disable();
+
+ if (!printk_ratelimit())
+ goto out_enable;
+
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
+out_enable:
+ preempt_enable_no_resched();
+out:
+ return this_cpu;
+}
+
+EXPORT_SYMBOL(debug_smp_processor_id);
+
diff --git a/lib/textsearch.c b/lib/textsearch.c
new file mode 100644
index 00000000000..1e934c196f0
--- /dev/null
+++ b/lib/textsearch.c
@@ -0,0 +1,317 @@
+/*
+ * lib/textsearch.c Generic text search interface
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ * Pablo Neira Ayuso <pablo@eurodev.net>
+ *
+ * ==========================================================================
+ *
+ * INTRODUCTION
+ *
+ * The textsearch infrastructure provides text searching facitilies for
+ * both linear and non-linear data. Individual search algorithms are
+ * implemented in modules and chosen by the user.
+ *
+ * ARCHITECTURE
+ *
+ * User
+ * +----------------+
+ * | finish()|<--------------(6)-----------------+
+ * |get_next_block()|<--------------(5)---------------+ |
+ * | | Algorithm | |
+ * | | +------------------------------+
+ * | | | init() find() destroy() |
+ * | | +------------------------------+
+ * | | Core API ^ ^ ^
+ * | | +---------------+ (2) (4) (8)
+ * | (1)|----->| prepare() |---+ | |
+ * | (3)|----->| find()/next() |-----------+ |
+ * | (7)|----->| destroy() |----------------------+
+ * +----------------+ +---------------+
+ *
+ * (1) User configures a search by calling _prepare() specifying the
+ * search parameters such as the pattern and algorithm name.
+ * (2) Core requests the algorithm to allocate and initialize a search
+ * configuration according to the specified parameters.
+ * (3) User starts the search(es) by calling _find() or _next() to
+ * fetch subsequent occurrences. A state variable is provided
+ * to the algorihtm to store persistant variables.
+ * (4) Core eventually resets the search offset and forwards the find()
+ * request to the algorithm.
+ * (5) Algorithm calls get_next_block() provided by the user continously
+ * to fetch the data to be searched in block by block.
+ * (6) Algorithm invokes finish() after the last call to get_next_block
+ * to clean up any leftovers from get_next_block. (Optional)
+ * (7) User destroys the configuration by calling _destroy().
+ * (8) Core notifies the algorithm to destroy algorithm specific
+ * allocations. (Optional)
+ *
+ * USAGE
+ *
+ * Before a search can be performed, a configuration must be created
+ * by calling textsearch_prepare() specyfing the searching algorithm and
+ * the pattern to look for. The returned configuration may then be used
+ * for an arbitary amount of times and even in parallel as long as a
+ * separate struct ts_state variable is provided to every instance.
+ *
+ * The actual search is performed by either calling textsearch_find_-
+ * continuous() for linear data or by providing an own get_next_block()
+ * implementation and calling textsearch_find(). Both functions return
+ * the position of the first occurrence of the patern or UINT_MAX if
+ * no match was found. Subsequent occurences can be found by calling
+ * textsearch_next() regardless of the linearity of the data.
+ *
+ * Once you're done using a configuration it must be given back via
+ * textsearch_destroy.
+ *
+ * EXAMPLE
+ *
+ * int pos;
+ * struct ts_config *conf;
+ * struct ts_state state;
+ * const char *pattern = "chicken";
+ * const char *example = "We dance the funky chicken";
+ *
+ * conf = textsearch_prepare("kmp", pattern, strlen(pattern),
+ * GFP_KERNEL, TS_AUTOLOAD);
+ * if (IS_ERR(conf)) {
+ * err = PTR_ERR(conf);
+ * goto errout;
+ * }
+ *
+ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
+ * if (pos != UINT_MAX)
+ * panic("Oh my god, dancing chickens at %d\n", pos);
+ *
+ * textsearch_destroy(conf);
+ *
+ * ==========================================================================
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/err.h>
+#include <linux/textsearch.h>
+
+static LIST_HEAD(ts_ops);
+static DEFINE_SPINLOCK(ts_mod_lock);
+
+static inline struct ts_ops *lookup_ts_algo(const char *name)
+{
+ struct ts_ops *o;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(o, &ts_ops, list) {
+ if (!strcmp(name, o->name)) {
+ if (!try_module_get(o->owner))
+ o = NULL;
+ rcu_read_unlock();
+ return o;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * textsearch_register - register a textsearch module
+ * @ops: operations lookup table
+ *
+ * This function must be called by textsearch modules to announce
+ * their presence. The specified &@ops must have %name set to a
+ * unique identifier and the callbacks find(), init(), get_pattern(),
+ * and get_pattern_len() must be implemented.
+ *
+ * Returns 0 or -EEXISTS if another module has already registered
+ * with same name.
+ */
+int textsearch_register(struct ts_ops *ops)
+{
+ int err = -EEXIST;
+ struct ts_ops *o;
+
+ if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||
+ ops->get_pattern == NULL || ops->get_pattern_len == NULL)
+ return -EINVAL;
+
+ spin_lock(&ts_mod_lock);
+ list_for_each_entry(o, &ts_ops, list) {
+ if (!strcmp(ops->name, o->name))
+ goto errout;
+ }
+
+ list_add_tail_rcu(&ops->list, &ts_ops);
+ err = 0;
+errout:
+ spin_unlock(&ts_mod_lock);
+ return err;
+}
+
+/**
+ * textsearch_unregister - unregister a textsearch module
+ * @ops: operations lookup table
+ *
+ * This function must be called by textsearch modules to announce
+ * their disappearance for examples when the module gets unloaded.
+ * The &ops parameter must be the same as the one during the
+ * registration.
+ *
+ * Returns 0 on success or -ENOENT if no matching textsearch
+ * registration was found.
+ */
+int textsearch_unregister(struct ts_ops *ops)
+{
+ int err = 0;
+ struct ts_ops *o;
+
+ spin_lock(&ts_mod_lock);
+ list_for_each_entry(o, &ts_ops, list) {
+ if (o == ops) {
+ list_del_rcu(&o->list);
+ goto out;
+ }
+ }
+
+ err = -ENOENT;
+out:
+ spin_unlock(&ts_mod_lock);
+ return err;
+}
+
+struct ts_linear_state
+{
+ unsigned int len;
+ const void *data;
+};
+
+static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
+ struct ts_config *conf,
+ struct ts_state *state)
+{
+ struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
+
+ if (likely(consumed < st->len)) {
+ *dst = st->data + consumed;
+ return st->len - consumed;
+ }
+
+ return 0;
+}
+
+/**
+ * textsearch_find_continuous - search a pattern in continuous/linear data
+ * @conf: search configuration
+ * @state: search state
+ * @data: data to search in
+ * @len: length of data
+ *
+ * A simplified version of textsearch_find() for continuous/linear data.
+ * Call textsearch_next() to retrieve subsequent matches.
+ *
+ * Returns the position of first occurrence of the pattern or
+ * UINT_MAX if no occurrence was found.
+ */
+unsigned int textsearch_find_continuous(struct ts_config *conf,
+ struct ts_state *state,
+ const void *data, unsigned int len)
+{
+ struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
+
+ conf->get_next_block = get_linear_data;
+ st->data = data;
+ st->len = len;
+
+ return textsearch_find(conf, state);
+}
+
+/**
+ * textsearch_prepare - Prepare a search
+ * @algo: name of search algorithm
+ * @pattern: pattern data
+ * @len: length of pattern
+ * @gfp_mask: allocation mask
+ * @flags: search flags
+ *
+ * Looks up the search algorithm module and creates a new textsearch
+ * configuration for the specified pattern. Upon completion all
+ * necessary refcnts are held and the configuration must be put back
+ * using textsearch_put() after usage.
+ *
+ * Note: The format of the pattern may not be compatible between
+ * the various search algorithms.
+ *
+ * Returns a new textsearch configuration according to the specified
+ * parameters or a ERR_PTR().
+ */
+struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
+ unsigned int len, int gfp_mask, int flags)
+{
+ int err = -ENOENT;
+ struct ts_config *conf;
+ struct ts_ops *ops;
+
+ ops = lookup_ts_algo(algo);
+#ifdef CONFIG_KMOD
+ /*
+ * Why not always autoload you may ask. Some users are
+ * in a situation where requesting a module may deadlock,
+ * especially when the module is located on a NFS mount.
+ */
+ if (ops == NULL && flags & TS_AUTOLOAD) {
+ request_module("ts_%s", algo);
+ ops = lookup_ts_algo(algo);
+ }
+#endif
+
+ if (ops == NULL)
+ goto errout;
+
+ conf = ops->init(pattern, len, gfp_mask);
+ if (IS_ERR(conf)) {
+ err = PTR_ERR(conf);
+ goto errout;
+ }
+
+ conf->ops = ops;
+ return conf;
+
+errout:
+ if (ops)
+ module_put(ops->owner);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * textsearch_destroy - destroy a search configuration
+ * @conf: search configuration
+ *
+ * Releases all references of the configuration and frees
+ * up the memory.
+ */
+void textsearch_destroy(struct ts_config *conf)
+{
+ if (conf->ops) {
+ if (conf->ops->destroy)
+ conf->ops->destroy(conf);
+ module_put(conf->ops->owner);
+ }
+
+ kfree(conf);
+}
+
+EXPORT_SYMBOL(textsearch_register);
+EXPORT_SYMBOL(textsearch_unregister);
+EXPORT_SYMBOL(textsearch_prepare);
+EXPORT_SYMBOL(textsearch_find_continuous);
+EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
new file mode 100644
index 00000000000..d27c0a07294
--- /dev/null
+++ b/lib/ts_fsm.c
@@ -0,0 +1,338 @@
+/*
+ * lib/ts_fsm.c A naive finite state machine text search approach
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ *
+ * ==========================================================================
+ *
+ * A finite state machine consists of n states (struct ts_fsm_token)
+ * representing the pattern as a finite automation. The data is read
+ * sequentially on a octet basis. Every state token specifies the number
+ * of recurrences and the type of value accepted which can be either a
+ * specific character or ctype based set of characters. The available
+ * type of recurrences include 1, (0|1), [0 n], and [1 n].
+ *
+ * The algorithm differs between strict/non-strict mode specyfing
+ * whether the pattern has to start at the first octect. Strict mode
+ * is enabled by default and can be disabled by inserting
+ * TS_FSM_HEAD_IGNORE as the first token in the chain.
+ *
+ * The runtime performance of the algorithm should be around O(n),
+ * however while in strict mode the average runtime can be better.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/textsearch.h>
+#include <linux/textsearch_fsm.h>
+
+struct ts_fsm
+{
+ unsigned int ntokens;
+ struct ts_fsm_token tokens[0];
+};
+
+/* other values derived from ctype.h */
+#define _A 0x100 /* ascii */
+#define _W 0x200 /* wildcard */
+
+/* Map to _ctype flags and some magic numbers */
+static u16 token_map[TS_FSM_TYPE_MAX+1] = {
+ [TS_FSM_SPECIFIC] = 0,
+ [TS_FSM_WILDCARD] = _W,
+ [TS_FSM_CNTRL] = _C,
+ [TS_FSM_LOWER] = _L,
+ [TS_FSM_UPPER] = _U,
+ [TS_FSM_PUNCT] = _P,
+ [TS_FSM_SPACE] = _S,
+ [TS_FSM_DIGIT] = _D,
+ [TS_FSM_XDIGIT] = _D | _X,
+ [TS_FSM_ALPHA] = _U | _L,
+ [TS_FSM_ALNUM] = _U | _L | _D,
+ [TS_FSM_PRINT] = _P | _U | _L | _D | _SP,
+ [TS_FSM_GRAPH] = _P | _U | _L | _D,
+ [TS_FSM_ASCII] = _A,
+};
+
+static u16 token_lookup_tbl[256] = {
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
+_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */
+_W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */
+_W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */
+_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */
+_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */
+_W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */
+_W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */
+_W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */
+_W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */
+_W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */
+_W, _W, _W, _W, /* 128-131 */
+_W, _W, _W, _W, /* 132-135 */
+_W, _W, _W, _W, /* 136-139 */
+_W, _W, _W, _W, /* 140-143 */
+_W, _W, _W, _W, /* 144-147 */
+_W, _W, _W, _W, /* 148-151 */
+_W, _W, _W, _W, /* 152-155 */
+_W, _W, _W, _W, /* 156-159 */
+_W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */
+_W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */
+_W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */
+_W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */
+_W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */
+
+static inline int match_token(struct ts_fsm_token *t, u8 d)
+{
+ if (t->type)
+ return (token_lookup_tbl[d] & t->type) != 0;
+ else
+ return t->value == d;
+}
+
+static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state)
+{
+ struct ts_fsm *fsm = ts_config_priv(conf);
+ struct ts_fsm_token *cur = NULL, *next;
+ unsigned int match_start, block_idx = 0, tok_idx;
+ unsigned block_len = 0, strict, consumed = state->offset;
+ const u8 *data;
+
+#define GET_NEXT_BLOCK() \
+({ consumed += block_idx; \
+ block_idx = 0; \
+ block_len = conf->get_next_block(consumed, &data, conf, state); })
+
+#define TOKEN_MISMATCH() \
+ do { \
+ if (strict) \
+ goto no_match; \
+ block_idx++; \
+ goto startover; \
+ } while(0)
+
+#define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK())
+
+ if (end_of_data())
+ goto no_match;
+
+ strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE;
+
+startover:
+ match_start = consumed + block_idx;
+
+ for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) {
+ cur = &fsm->tokens[tok_idx];
+
+ if (likely(tok_idx < (fsm->ntokens - 1)))
+ next = &fsm->tokens[tok_idx + 1];
+ else
+ next = NULL;
+
+ switch (cur->recur) {
+ case TS_FSM_SINGLE:
+ if (end_of_data())
+ goto no_match;
+
+ if (!match_token(cur, data[block_idx]))
+ TOKEN_MISMATCH();
+ break;
+
+ case TS_FSM_PERHAPS:
+ if (end_of_data() ||
+ !match_token(cur, data[block_idx]))
+ continue;
+ break;
+
+ case TS_FSM_MULTI:
+ if (end_of_data())
+ goto no_match;
+
+ if (!match_token(cur, data[block_idx]))
+ TOKEN_MISMATCH();
+
+ block_idx++;
+ /* fall through */
+
+ case TS_FSM_ANY:
+ if (next == NULL)
+ goto found_match;
+
+ if (end_of_data())
+ continue;
+
+ while (!match_token(next, data[block_idx])) {
+ if (!match_token(cur, data[block_idx]))
+ TOKEN_MISMATCH();
+ block_idx++;
+ if (end_of_data())
+ goto no_match;
+ }
+ continue;
+
+ /*
+ * Optimization: Prefer small local loop over jumping
+ * back and forth until garbage at head is munched.
+ */
+ case TS_FSM_HEAD_IGNORE:
+ if (end_of_data())
+ continue;
+
+ while (!match_token(next, data[block_idx])) {
+ /*
+ * Special case, don't start over upon
+ * a mismatch, give the user the
+ * chance to specify the type of data
+ * allowed to be ignored.
+ */
+ if (!match_token(cur, data[block_idx]))
+ goto no_match;
+
+ block_idx++;
+ if (end_of_data())
+ goto no_match;
+ }
+
+ match_start = consumed + block_idx;
+ continue;
+ }
+
+ block_idx++;
+ }
+
+ if (end_of_data())
+ goto found_match;
+
+no_match:
+ return UINT_MAX;
+
+found_match:
+ state->offset = consumed + block_idx;
+ return match_start;
+}
+
+static struct ts_config *fsm_init(const void *pattern, unsigned int len,
+ int gfp_mask)
+{
+ int i, err = -EINVAL;
+ struct ts_config *conf;
+ struct ts_fsm *fsm;
+ struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern;
+ unsigned int ntokens = len / sizeof(*tokens);
+ size_t priv_size = sizeof(*fsm) + len;
+
+ if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
+ goto errout;
+
+ for (i = 0; i < ntokens; i++) {
+ struct ts_fsm_token *t = &tokens[i];
+
+ if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX)
+ goto errout;
+
+ if (t->recur == TS_FSM_HEAD_IGNORE &&
+ (i != 0 || i == (ntokens - 1)))
+ goto errout;
+ }
+
+ conf = alloc_ts_config(priv_size, gfp_mask);
+ if (IS_ERR(conf))
+ return conf;
+
+ fsm = ts_config_priv(conf);
+ fsm->ntokens = ntokens;
+ memcpy(fsm->tokens, pattern, len);
+
+ for (i = 0; i < fsm->ntokens; i++) {
+ struct ts_fsm_token *t = &fsm->tokens[i];
+ t->type = token_map[t->type];
+ }
+
+ return conf;
+
+errout:
+ return ERR_PTR(err);
+}
+
+static void *fsm_get_pattern(struct ts_config *conf)
+{
+ struct ts_fsm *fsm = ts_config_priv(conf);
+ return fsm->tokens;
+}
+
+static unsigned int fsm_get_pattern_len(struct ts_config *conf)
+{
+ struct ts_fsm *fsm = ts_config_priv(conf);
+ return fsm->ntokens * sizeof(struct ts_fsm_token);
+}
+
+static struct ts_ops fsm_ops = {
+ .name = "fsm",
+ .find = fsm_find,
+ .init = fsm_init,
+ .get_pattern = fsm_get_pattern,
+ .get_pattern_len = fsm_get_pattern_len,
+ .owner = THIS_MODULE,
+ .list = LIST_HEAD_INIT(fsm_ops.list)
+};
+
+static int __init init_fsm(void)
+{
+ return textsearch_register(&fsm_ops);
+}
+
+static void __exit exit_fsm(void)
+{
+ textsearch_unregister(&fsm_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_fsm);
+module_exit(exit_fsm);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
new file mode 100644
index 00000000000..73266b97558
--- /dev/null
+++ b/lib/ts_kmp.c
@@ -0,0 +1,145 @@
+/*
+ * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ *
+ * ==========================================================================
+ *
+ * Implements a linear-time string-matching algorithm due to Knuth,
+ * Morris, and Pratt [1]. Their algorithm avoids the explicit
+ * computation of the transition function DELTA altogether. Its
+ * matching time is O(n), for n being length(text), using just an
+ * auxiliary function PI[1..m], for m being length(pattern),
+ * precomputed from the pattern in time O(m). The array PI allows
+ * the transition function DELTA to be computed efficiently
+ * "on the fly" as needed. Roughly speaking, for any state
+ * "q" = 0,1,...,m and any character "a" in SIGMA, the value
+ * PI["q"] contains the information that is independent of "a" and
+ * is needed to compute DELTA("q", "a") [2]. Since the array PI
+ * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we
+ * save a factor of |SIGMA| in the preprocessing time by computing
+ * PI rather than DELTA.
+ *
+ * [1] Cormen, Leiserson, Rivest, Stein
+ * Introdcution to Algorithms, 2nd Edition, MIT Press
+ * [2] See finite automation theory
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/textsearch.h>
+
+struct ts_kmp
+{
+ u8 * pattern;
+ unsigned int pattern_len;
+ unsigned int prefix_tbl[0];
+};
+
+static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
+{
+ struct ts_kmp *kmp = ts_config_priv(conf);
+ unsigned int i, q = 0, text_len, consumed = state->offset;
+ const u8 *text;
+
+ for (;;) {
+ text_len = conf->get_next_block(consumed, &text, conf, state);
+
+ if (unlikely(text_len == 0))
+ break;
+
+ for (i = 0; i < text_len; i++) {
+ while (q > 0 && kmp->pattern[q] != text[i])
+ q = kmp->prefix_tbl[q - 1];
+ if (kmp->pattern[q] == text[i])
+ q++;
+ if (unlikely(q == kmp->pattern_len)) {
+ state->offset = consumed + i + 1;
+ return state->offset - kmp->pattern_len;
+ }
+ }
+
+ consumed += text_len;
+ }
+
+ return UINT_MAX;
+}
+
+static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
+ unsigned int *prefix_tbl)
+{
+ unsigned int k, q;
+
+ for (k = 0, q = 1; q < len; q++) {
+ while (k > 0 && pattern[k] != pattern[q])
+ k = prefix_tbl[k-1];
+ if (pattern[k] == pattern[q])
+ k++;
+ prefix_tbl[q] = k;
+ }
+}
+
+static struct ts_config *kmp_init(const void *pattern, unsigned int len,
+ int gfp_mask)
+{
+ struct ts_config *conf;
+ struct ts_kmp *kmp;
+ unsigned int prefix_tbl_len = len * sizeof(unsigned int);
+ size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
+
+ conf = alloc_ts_config(priv_size, gfp_mask);
+ if (IS_ERR(conf))
+ return conf;
+
+ kmp = ts_config_priv(conf);
+ kmp->pattern_len = len;
+ compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
+ kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
+ memcpy(kmp->pattern, pattern, len);
+
+ return conf;
+}
+
+static void *kmp_get_pattern(struct ts_config *conf)
+{
+ struct ts_kmp *kmp = ts_config_priv(conf);
+ return kmp->pattern;
+}
+
+static unsigned int kmp_get_pattern_len(struct ts_config *conf)
+{
+ struct ts_kmp *kmp = ts_config_priv(conf);
+ return kmp->pattern_len;
+}
+
+static struct ts_ops kmp_ops = {
+ .name = "kmp",
+ .find = kmp_find,
+ .init = kmp_init,
+ .get_pattern = kmp_get_pattern,
+ .get_pattern_len = kmp_get_pattern_len,
+ .owner = THIS_MODULE,
+ .list = LIST_HEAD_INIT(kmp_ops.list)
+};
+
+static int __init init_kmp(void)
+{
+ return textsearch_register(&kmp_ops);
+}
+
+static void __exit exit_kmp(void)
+{
+ textsearch_unregister(&kmp_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_kmp);
+module_exit(exit_kmp);