aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sh64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sh64')
-rw-r--r--include/asm-sh64/bitops.h6
-rw-r--r--include/asm-sh64/dma-mapping.h18
-rw-r--r--include/asm-sh64/gpio.h8
-rw-r--r--include/asm-sh64/ide.h1
-rw-r--r--include/asm-sh64/io.h55
-rw-r--r--include/asm-sh64/ipc.h1
-rw-r--r--include/asm-sh64/scatterlist.h5
-rw-r--r--include/asm-sh64/semaphore.h1
-rw-r--r--include/asm-sh64/system.h3
-rw-r--r--include/asm-sh64/tlbflush.h4
-rw-r--r--include/asm-sh64/types.h6
11 files changed, 39 insertions, 69 deletions
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
index f3bdcdb5d04..600c59efb4c 100644
--- a/include/asm-sh64/bitops.h
+++ b/include/asm-sh64/bitops.h
@@ -13,6 +13,11 @@
*/
#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
#include <linux/compiler.h>
#include <asm/system.h>
/* For __swab32 */
@@ -136,6 +141,7 @@ static __inline__ unsigned long ffz(unsigned long word)
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index de430996020..1438b763a5e 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -2,7 +2,7 @@
#define __ASM_SH_DMA_MAPPING_H
#include <linux/mm.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
struct pci_dev;
@@ -42,7 +42,11 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
- dma_cache_wback_inv((unsigned long)vaddr, size);
+ unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
+ unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
+
+ for (; s <= e; s += L1_CACHE_BYTES)
+ asm volatile ("ocbp %0, 0" : : "r" (s));
}
static inline dma_addr_t dma_map_single(struct device *dev,
@@ -67,10 +71,9 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
- sg[i].length, dir);
+ dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
- sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+ sg[i].dma_address = sg_phys(&sg[i]);
}
return nents;
@@ -120,10 +123,9 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
- dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset,
- sg[i].length, dir);
+ dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
- sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+ sg[i].dma_address = sg_phys(&sg[i]);
}
}
diff --git a/include/asm-sh64/gpio.h b/include/asm-sh64/gpio.h
new file mode 100644
index 00000000000..6bc5a13d841
--- /dev/null
+++ b/include/asm-sh64/gpio.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_SH64_GPIO_H
+#define __ASM_SH64_GPIO_H
+
+/*
+ * This is just a stub, so that every arch using sh-sci has a gpio.h
+ */
+
+#endif /* __ASM_SH64_GPIO_H */
diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h
index c9d84d5f772..b6e31e8b941 100644
--- a/include/asm-sh64/ide.h
+++ b/include/asm-sh64/ide.h
@@ -19,7 +19,6 @@
/* Without this, the initialisation of PCI IDE cards end up calling
* ide_init_hwif_ports, which won't work. */
#ifdef CONFIG_BLK_DEV_IDEPCI
-#define IDE_ARCH_OBSOLETE_INIT 1
#define ide_default_io_ctl(base) (0)
#endif
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h
index 1f37b693192..7bd7314d38c 100644
--- a/include/asm-sh64/io.h
+++ b/include/asm-sh64/io.h
@@ -119,6 +119,13 @@ void insw(unsigned long port, void *addr, unsigned long count);
void outsl(unsigned long port, const void *addr, unsigned long count);
void insl(unsigned long port, void *addr, unsigned long count);
+#define inb_p(addr) inb(addr)
+#define inw_p(addr) inw(addr)
+#define inl_p(addr) inl(addr)
+#define outb_p(x,addr) outb(x,addr)
+#define outw_p(x,addr) outw(x,addr)
+#define outl_p(x,addr) outl(x,addr)
+
#define __raw_readb readb
#define __raw_readw readw
#define __raw_readl readl
@@ -175,54 +182,6 @@ unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* n
extern void onchip_unmap(unsigned long vaddr);
/*
- * The caches on some architectures aren't dma-coherent and have need to
- * handle this in software. There are three types of operations that
- * can be applied to dma buffers.
- *
- * - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
- * writing the content of the caches back to memory, if necessary.
- * The function also invalidates the affected part of the caches as
- * necessary before DMA transfers from outside to memory.
- * - dma_cache_inv(start, size) invalidates the affected parts of the
- * caches. Dirty lines of the caches may be written back or simply
- * be discarded. This operation is necessary before dma operations
- * to the memory.
- * - dma_cache_wback(start, size) writes back any dirty lines but does
- * not invalidate the cache. This can be used before DMA reads from
- * memory,
- */
-
-static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
-{
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbp %0, 0" : : "r" (s));
-}
-
-static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
-{
- // Note that caller has to be careful with overzealous
- // invalidation should there be partial cache lines at the extremities
- // of the specified range
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbi %0, 0" : : "r" (s));
-}
-
-static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
-{
- unsigned long s = start & L1_CACHE_ALIGN_MASK;
- unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
-
- for (; s <= e; s += L1_CACHE_BYTES)
- asm volatile ("ocbwb %0, 0" : : "r" (s));
-}
-
-/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
diff --git a/include/asm-sh64/ipc.h b/include/asm-sh64/ipc.h
deleted file mode 100644
index a46e3d9c2a3..00000000000
--- a/include/asm-sh64/ipc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipc.h>
diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h
index 1c723f2d7a9..5109251970e 100644
--- a/include/asm-sh64/scatterlist.h
+++ b/include/asm-sh64/scatterlist.h
@@ -14,7 +14,10 @@
#include <asm/types.h>
struct scatterlist {
- struct page * page; /* Location for highmem page, if any */
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
unsigned int offset;/* for highmem, page offset */
dma_addr_t dma_address;
unsigned int length;
diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h
index 46952645914..f027cc14b55 100644
--- a/include/asm-sh64/semaphore.h
+++ b/include/asm-sh64/semaphore.h
@@ -44,7 +44,6 @@ struct semaphore {
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
-#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static inline void sema_init (struct semaphore *sem, int val)
{
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
index 5ff94644e8c..be2a15ffcc5 100644
--- a/include/asm-sh64/system.h
+++ b/include/asm-sh64/system.h
@@ -62,8 +62,7 @@ extern void __xchg_called_with_bad_pointer(void);
#define smp_read_barrier_depends() do { } while (0)
#endif /* CONFIG_SMP */
-#define set_rmb(var, value) do { (void)xchg(&var, value); } while (0)
-#define set_mb(var, value) set_rmb(var, value)
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
/* Interrupt Control */
#ifndef HARD_CLI
diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h
index e45beadc29e..16a164a2375 100644
--- a/include/asm-sh64/tlbflush.h
+++ b/include/asm-sh64/tlbflush.h
@@ -20,10 +20,6 @@ extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
diff --git a/include/asm-sh64/types.h b/include/asm-sh64/types.h
index 8d41db2153b..2c7ad73b388 100644
--- a/include/asm-sh64/types.h
+++ b/include/asm-sh64/types.h
@@ -30,9 +30,9 @@ typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
-typedef __signed__ long long __s64;
-typedef unsigned long long __u64;
+#if defined(__GNUC__)
+__extension__ typedef __signed__ long long __s64;
+__extension__ typedef unsigned long long __u64;
#endif
#endif /* __ASSEMBLY__ */