aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sh
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor@insightbb.com>2007-02-10 01:26:32 -0500
committerDmitry Torokhov <dtor@insightbb.com>2007-02-10 01:26:32 -0500
commitb22364c8eec89e6b0c081a237f3b6348df87796f (patch)
tree233a923281fb640106465d076997ff511efb6edf /include/asm-sh
parent2c8dc071517ec2843869024dc82be2e246f41064 (diff)
parent66efc5a7e3061c3597ac43a8bb1026488d57e66b (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/asm-sh')
-rw-r--r--include/asm-sh/apm.h46
-rw-r--r--include/asm-sh/atomic-irq.h71
-rw-r--r--include/asm-sh/atomic-llsc.h107
-rw-r--r--include/asm-sh/atomic.h153
-rw-r--r--include/asm-sh/bug.h53
-rw-r--r--include/asm-sh/bugs.h12
-rw-r--r--include/asm-sh/checksum.h69
-rw-r--r--include/asm-sh/cpu-sh2/cacheflush.h2
-rw-r--r--include/asm-sh/cpu-sh3/cacheflush.h3
-rw-r--r--include/asm-sh/cpu-sh4/cache.h2
-rw-r--r--include/asm-sh/cpu-sh4/cacheflush.h1
-rw-r--r--include/asm-sh/cpu-sh4/freq.h2
-rw-r--r--include/asm-sh/dma-mapping.h10
-rw-r--r--include/asm-sh/irq.h5
-rw-r--r--include/asm-sh/pgtable.h47
-rw-r--r--include/asm-sh/processor.h8
-rw-r--r--include/asm-sh/push-switch.h3
-rw-r--r--include/asm-sh/termbits.h11
-rw-r--r--include/asm-sh/thread_info.h2
19 files changed, 348 insertions, 259 deletions
diff --git a/include/asm-sh/apm.h b/include/asm-sh/apm.h
deleted file mode 100644
index 8b091e93651..00000000000
--- a/include/asm-sh/apm.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright 2006 (c) Andriy Skulysh <askulysh@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-
-#ifndef __ASM_SH_APM_H
-#define __ASM_SH_APM_H
-
-#define APM_AC_OFFLINE 0
-#define APM_AC_ONLINE 1
-#define APM_AC_BACKUP 2
-#define APM_AC_UNKNOWN 0xff
-
-#define APM_BATTERY_STATUS_HIGH 0
-#define APM_BATTERY_STATUS_LOW 1
-#define APM_BATTERY_STATUS_CRITICAL 2
-#define APM_BATTERY_STATUS_CHARGING 3
-#define APM_BATTERY_STATUS_NOT_PRESENT 4
-#define APM_BATTERY_STATUS_UNKNOWN 0xff
-
-#define APM_BATTERY_LIFE_UNKNOWN 0xFFFF
-#define APM_BATTERY_LIFE_MINUTES 0x8000
-#define APM_BATTERY_LIFE_VALUE_MASK 0x7FFF
-
-#define APM_BATTERY_FLAG_HIGH (1 << 0)
-#define APM_BATTERY_FLAG_LOW (1 << 1)
-#define APM_BATTERY_FLAG_CRITICAL (1 << 2)
-#define APM_BATTERY_FLAG_CHARGING (1 << 3)
-#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7)
-#define APM_BATTERY_FLAG_UNKNOWN 0xff
-
-#define APM_UNITS_MINS 0
-#define APM_UNITS_SECS 1
-#define APM_UNITS_UNKNOWN -1
-
-
-extern int (*apm_get_info)(char *buf, char **start, off_t fpos, int length);
-extern int apm_suspended;
-
-void apm_queue_event(apm_event_t event);
-
-#endif
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h
new file mode 100644
index 00000000000..74f7943cff6
--- /dev/null
+++ b/include/asm-sh/atomic-irq.h
@@ -0,0 +1,71 @@
+#ifndef __ASM_SH_ATOMIC_IRQ_H
+#define __ASM_SH_ATOMIC_IRQ_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v += i;
+ local_irq_restore(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v -= i;
+ local_irq_restore(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp += i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp, flags;
+
+ local_irq_save(flags);
+ temp = *(long *)v;
+ temp -= i;
+ *(long *)v = temp;
+ local_irq_restore(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v &= ~mask;
+ local_irq_restore(flags);
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(long *)v |= mask;
+ local_irq_restore(flags);
+}
+
+#endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h
new file mode 100644
index 00000000000..4b00b78e3f4
--- /dev/null
+++ b/include/asm-sh/atomic-llsc.h
@@ -0,0 +1,107 @@
+#ifndef __ASM_SH_ATOMIC_LLSC_H
+#define __ASM_SH_ATOMIC_LLSC_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+/*
+ * SH-4A note:
+ *
+ * We basically get atomic_xxx_return() for free compared with
+ * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
+ * encoding, so the retval is automatically set without having to
+ * do any special work.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add_return \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub_return \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_clear_mask \n"
+" and %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (~mask), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_set_mask \n"
+" or %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (mask), "r" (&v->counter)
+ : "t");
+}
+
+#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 28305c3cbdd..e12570b9339 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h>
#include <asm/system.h>
-/*
- * To get proper branch prediction for the main line, we must branch
- * forward to code at the end of this object's .text section, then
- * branch back to restart the operation.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
+#include <asm/atomic-llsc.h>
#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v += i;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v -= i;
- local_irq_restore(flags);
+#include <asm/atomic-irq.h>
#endif
-}
-
-/*
- * SH-4A note:
- *
- * We basically get atomic_xxx_return() for free compared with
- * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
- * encoding, so the retval is automatically set without having to
- * do any special work.
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_add_return \n"
-" add %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp += i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long temp;
-
-#ifdef CONFIG_CPU_SH4A
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_sub_return \n"
-" sub %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
-" synco \n"
- : "=&z" (temp)
- : "r" (i), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- temp = *(long *)v;
- temp -= i;
- *(long *)v = temp;
- local_irq_restore(flags);
-#endif
-
- return temp;
-}
-
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
@@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_clear_mask \n"
-" and %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (~mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v &= ~mask;
- local_irq_restore(flags);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#ifdef CONFIG_CPU_SH4A
- unsigned long tmp;
-
- __asm__ __volatile__ (
-"1: movli.l @%2, %0 ! atomic_set_mask \n"
-" or %1, %0 \n"
-" movco.l %0, @%2 \n"
-" bf 1b \n"
- : "=&z" (tmp)
- : "r" (mask), "r" (&v->counter)
- : "t");
-#else
- unsigned long flags;
-
- local_irq_save(flags);
- *(long *)v |= mask;
- local_irq_restore(flags);
-#endif
-}
-
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h
index 1b4fc52a59e..2f89dd06d0c 100644
--- a/include/asm-sh/bug.h
+++ b/include/asm-sh/bug.h
@@ -1,19 +1,54 @@
#ifndef __ASM_SH_BUG_H
#define __ASM_SH_BUG_H
-
#ifdef CONFIG_BUG
-/*
- * Tell the user there is some problem.
- */
-#define BUG() do { \
- printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
- *(volatile int *)0 = 0; \
+
+struct bug_frame {
+ unsigned short opcode;
+ unsigned short line;
+ const char *file;
+ const char *func;
+};
+
+struct pt_regs;
+
+extern void handle_BUG(struct pt_regs *);
+
+#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define BUG() \
+do { \
+ __asm__ __volatile__ ( \
+ ".align 2\n\t" \
+ ".short %O0\n\t" \
+ ".short %O1\n\t" \
+ ".long %O2\n\t" \
+ ".long %O3\n\t" \
+ : \
+ : "n" (TRAPA_BUG_OPCODE), \
+ "i" (__LINE__), "X" (__FILE__), \
+ "X" (__FUNCTION__)); \
+} while (0)
+
+#else
+
+#define BUG() \
+do { \
+ __asm__ __volatile__ ( \
+ ".align 2\n\t" \
+ ".short %O0\n\t" \
+ : \
+ : "n" (TRAPA_BUG_OPCODE)); \
} while (0)
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
#define HAVE_ARCH_BUG
-#endif
+
+#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
-#endif
+#endif /* __ASM_SH_BUG_H */
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h
index 795047da5e1..a294997a841 100644
--- a/include/asm-sh/bugs.h
+++ b/include/asm-sh/bugs.h
@@ -16,9 +16,8 @@
static void __init check_bugs(void)
{
- extern char *get_cpu_subtype(void);
extern unsigned long loops_per_jiffy;
- char *p= &init_utsname()->machine[2]; /* "sh" */
+ char *p = &init_utsname()->machine[2]; /* "sh" */
cpu_data->loops_per_jiffy = loops_per_jiffy;
@@ -40,6 +39,15 @@ static void __init check_bugs(void)
*p++ = '4';
*p++ = 'a';
break;
+ case CPU_SH73180 ... CPU_SH7722:
+ *p++ = '4';
+ *p++ = 'a';
+ *p++ = 'l';
+ *p++ = '-';
+ *p++ = 'd';
+ *p++ = 's';
+ *p++ = 'p';
+ break;
default:
*p++ = '?';
*p++ = '!';
diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h
index d44344c88e7..4bc8357e889 100644
--- a/include/asm-sh/checksum.h
+++ b/include/asm-sh/checksum.h
@@ -34,25 +34,26 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
- * passed in an incorrect kernel address to one of these functions.
- *
- * If you use these functions directly please don't forget the
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
* access_ok().
*/
-static __inline__
+static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+ int len, __wsum sum)
{
- return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
-static __inline__
+static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len, __wsum sum, int *err_ptr)
{
return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL);
@@ -62,7 +63,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
* Fold a partial checksum
*/
-static __inline__ __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum sum)
{
unsigned int __dummy;
__asm__("swap.w %0, %1\n\t"
@@ -85,7 +86,7 @@ static __inline__ __sum16 csum_fold(__wsum sum)
* i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
* for linux by * Arnt Gulbrandsen.
*/
-static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum, __dummy0, __dummy1;
@@ -113,10 +114,10 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return csum_fold(sum);
}
-static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
#ifdef __LITTLE_ENDIAN__
unsigned long len_proto = (proto + len) << 8;
@@ -132,6 +133,7 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
: "t");
+
return sum;
}
@@ -139,30 +141,28 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
{
- return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-
-static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
{
- return csum_fold (csum_partial(buff, len, 0));
+ return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
-#ifdef CONFIG_IPV6
-static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u32 len, unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
{
unsigned int __dummy;
__asm__("clrt\n\t"
@@ -187,22 +187,21 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
"movt %1\n\t"
"add %1, %0\n"
: "=r" (sum), "=&r" (__dummy)
- : "r" (saddr), "r" (daddr),
+ : "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "t");
return csum_fold(sum);
}
-#endif
-/*
+/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user (const void *src,
- void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+static inline __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum,
+ int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic((__force const void *)src,
diff --git a/include/asm-sh/cpu-sh2/cacheflush.h b/include/asm-sh/cpu-sh2/cacheflush.h
index f556fa80ea9..2979efb26de 100644
--- a/include/asm-sh/cpu-sh2/cacheflush.h
+++ b/include/asm-sh/cpu-sh2/cacheflush.h
@@ -15,6 +15,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
+ * - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
@@ -27,6 +28,7 @@
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-sh/cpu-sh3/cacheflush.h b/include/asm-sh/cpu-sh3/cacheflush.h
index 03fde97a7fd..f70d8ef76a1 100644
--- a/include/asm-sh/cpu-sh3/cacheflush.h
+++ b/include/asm-sh/cpu-sh3/cacheflush.h
@@ -15,6 +15,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
+ * - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
@@ -39,6 +40,7 @@
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
@@ -48,6 +50,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h
index 6e9c7e6ee8e..f92b20a0983 100644
--- a/include/asm-sh/cpu-sh4/cache.h
+++ b/include/asm-sh/cpu-sh4/cache.h
@@ -22,7 +22,7 @@
#define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */
#define CCR_CACHE_ICI 0x0800 /* IC Invalidate */
#define CCR_CACHE_IIX 0x8000 /* IC Index Enable */
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
+#ifndef CONFIG_CPU_SH4A
#define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */
#endif
diff --git a/include/asm-sh/cpu-sh4/cacheflush.h b/include/asm-sh/cpu-sh4/cacheflush.h
index 515fd574267..b01a10f3122 100644
--- a/include/asm-sh/cpu-sh4/cacheflush.h
+++ b/include/asm-sh/cpu-sh4/cacheflush.h
@@ -18,6 +18,7 @@
*/
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h
index ef2b9b1ae41..602d061ca2d 100644
--- a/include/asm-sh/cpu-sh4/freq.h
+++ b/include/asm-sh/cpu-sh4/freq.h
@@ -10,7 +10,7 @@
#ifndef __ASM_CPU_SH4_FREQ_H
#define __ASM_CPU_SH4_FREQ_H
-#if defined(CONFIG_CPU_SUBTYPE_SH73180)
+#if defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7722)
#define FRQCR 0xa4150000
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
#define FRQCR 0xffc80000
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 37ab0c131a4..8d0867b98e0 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev,
if (dev->bus == &pci_bus_type)
return virt_to_bus(ptr);
#endif
- dma_cache_sync(ptr, size, dir);
+ dma_cache_sync(dev, ptr, size, dir);
return virt_to_bus(ptr);
}
@@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
@@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(bus_to_virt(dma_handle), size, dir);
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
@@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev,
if (dev->bus == &pci_bus_type)
return;
#endif
- dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
+ dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir);
}
static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
@@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
+ dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
sg[i].length, dir);
#endif
sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h
index fd576088e47..bff965ef4b9 100644
--- a/include/asm-sh/irq.h
+++ b/include/asm-sh/irq.h
@@ -37,7 +37,8 @@
# define ONCHIP_NR_IRQS 144
#elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \
defined(CONFIG_CPU_SUBTYPE_SH73180) || \
- defined(CONFIG_CPU_SUBTYPE_SH7343)
+ defined(CONFIG_CPU_SUBTYPE_SH7343) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7722)
# define ONCHIP_NR_IRQS 109
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
# define ONCHIP_NR_IRQS 111
@@ -79,6 +80,8 @@
# define OFFCHIP_NR_IRQS 16
#elif defined(CONFIG_SH_7343_SOLUTION_ENGINE)
# define OFFCHIP_NR_IRQS 12
+#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE)
+# define OFFCHIP_NR_IRQS 14
#elif defined(CONFIG_SH_UNKNOWN)
# define OFFCHIP_NR_IRQS 16 /* Must also be last */
#else
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index c84901dbd8e..036ca284386 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -508,16 +508,50 @@ struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte);
-/* Encode and de-code a swap entry */
/*
+ * Encode and de-code a swap entry
+ *
+ * Constraints:
+ * _PAGE_FILE at bit 0
+ * _PAGE_PRESENT at bit 8
+ * _PAGE_PROTNONE at bit 9
+ *
+ * For the normal case, we encode the swap type into bits 0:7 and the
+ * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
+ * preserved bits in the low 32-bits and use the upper 32 as the swap
+ * offset (along with a 5-bit type), following the same approach as x86
+ * PAE. This keeps the logic quite simple, and allows for a full 32
+ * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
+ * in the pte_low case.
+ *
+ * As is evident by the Alpha code, if we ever get a 64-bit unsigned
+ * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
+ * much cleaner..
+ *
* NOTE: We should set ZEROs at the position of _PAGE_PRESENT
* and _PAGE_PROTNONE bits
*/
-#define __swp_type(x) ((x).val & 0xff)
-#define __swp_offset(x) ((x).val >> 10)
-#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
+#ifdef CONFIG_X2TLB
+#define __swp_type(x) ((x).val & 0x1f)
+#define __swp_offset(x) ((x).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5})
+#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
+
+/*
+ * Encode and decode a nonlinear file mapping entry
+ */
+#define pte_to_pgoff(pte) ((pte).pte_high)
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
+
+#define PTE_FILE_MAX_BITS 32
+#else
+#define __swp_type(x) ((x).val & 0xff)
+#define __swp_offset(x) ((x).val >> 10)
+#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10})
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 })
/*
* Encode and decode a nonlinear file mapping entry
@@ -525,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define PTE_FILE_MAX_BITS 29
#define pte_to_pgoff(pte) (pte_val(pte) >> 1)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE })
+#endif
typedef pte_t *pte_addr_t;
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h
index 6f1dd7ca1b1..e29f2abb92d 100644
--- a/include/asm-sh/processor.h
+++ b/include/asm-sh/processor.h
@@ -27,6 +27,8 @@
#define CCN_CVR 0xff000040
#define CCN_PRR 0xff000044
+const char *get_cpu_subtype(void);
+
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
*
@@ -52,8 +54,10 @@ enum cpu_type {
CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
/* SH-4A types */
- CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781,
- CPU_SH7785,
+ CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785,
+
+ /* SH4AL-DSP types */
+ CPU_SH73180, CPU_SH7343, CPU_SH7722,
/* Unknown subtype */
CPU_SH_NONE
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
index dfc6bad567f..4903f9e52dd 100644
--- a/include/asm-sh/push-switch.h
+++ b/include/asm-sh/push-switch.h
@@ -4,6 +4,7 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
+#include <linux/platform_device.h>
struct push_switch {
/* switch state */
@@ -12,6 +13,8 @@ struct push_switch {
struct timer_list debounce;
/* workqueue */
struct work_struct work;
+ /* platform device, for workqueue handler */
+ struct platform_device *pdev;
};
struct push_switch_platform_info {
diff --git a/include/asm-sh/termbits.h b/include/asm-sh/termbits.h
index 4f9822a8e7b..f1b7b46f4e9 100644
--- a/include/asm-sh/termbits.h
+++ b/include/asm-sh/termbits.h
@@ -17,6 +17,17 @@ struct termios {
cc_t c_cc[NCCS]; /* control characters */
};
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
/* c_cc characters */
#define VINTR 0
#define VQUIT 1
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 0c01dc55081..879f741105d 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
+#define TIF_FREEZE 19
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -114,6 +115,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x000000FE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x000000FF /* work to do on any return to u-space */