aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/div64.h7
-rw-r--r--include/asm-generic/dma-mapping-broken.h4
-rw-r--r--include/asm-generic/gpio.h25
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/mman.h1
-rw-r--r--include/asm-generic/page.h38
-rw-r--r--include/asm-generic/pgtable.h26
7 files changed, 64 insertions, 39 deletions
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index 8f4e3193342..a4a49370793 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -30,6 +30,11 @@
__rem; \
})
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+ return dividend / divisor;
+}
+
#elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
@@ -49,6 +54,8 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
__rem; \
})
+extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
+
#else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index a7f1a55ce6b..29413d3d460 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -3,7 +3,6 @@
/* This is used for archs that do not support DMA */
-
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
@@ -19,4 +18,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
BUG();
}
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
new file mode 100644
index 00000000000..2d0aab1d861
--- /dev/null
+++ b/include/asm-generic/gpio.h
@@ -0,0 +1,25 @@
+#ifndef _ASM_GENERIC_GPIO_H
+#define _ASM_GENERIC_GPIO_H
+
+/* platforms that don't directly support access to GPIOs through I2C, SPI,
+ * or other blocking infrastructure can use these wrappers.
+ */
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ might_sleep();
+ return gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ might_sleep();
+ gpio_set_value(gpio, value);
+}
+
+#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 8078cbd2c01..30d8d33491d 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -54,7 +54,7 @@
#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
- __pg - __section_mem_map_addr(__nr_to_section(__sec)); \
+ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
#define __pfn_to_page(pfn) \
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
index 3b41d2bb70d..5e3dde2ee5a 100644
--- a/include/asm-generic/mman.h
+++ b/include/asm-generic/mman.h
@@ -36,7 +36,6 @@
#define MADV_DOFORK 11 /* do inherit across fork */
/* compatibility flags */
-#define MAP_ANON MAP_ANONYMOUS
#define MAP_FILE 0
#endif
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index b55052ce233..a96b5d986b6 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -4,51 +4,21 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#include <linux/log2.h>
+#include <linux/compiler.h>
-/*
- * non-const pure 2^n version of get_order
- * - the arch may override these in asm/bitops.h if they can be implemented
- * more efficiently than using the arch log2 routines
- * - we use the non-const log2() instead if the arch has defined one suitable
- */
-#ifndef ARCH_HAS_GET_ORDER
-static inline __attribute__((const))
-int __get_order(unsigned long size, int page_shift)
+/* Pure 2^n version of get_order */
+static __inline__ __attribute_const__ int get_order(unsigned long size)
{
-#if BITS_PER_LONG == 32 && defined(ARCH_HAS_ILOG2_U32)
- int order = __ilog2_u32(size) - page_shift;
- return order >= 0 ? order : 0;
-#elif BITS_PER_LONG == 64 && defined(ARCH_HAS_ILOG2_U64)
- int order = __ilog2_u64(size) - page_shift;
- return order >= 0 ? order : 0;
-#else
int order;
- size = (size - 1) >> (page_shift - 1);
+ size = (size - 1) >> (PAGE_SHIFT - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
-#endif
}
-#endif
-
-/**
- * get_order - calculate log2(pages) to hold a block of the specified size
- * @n - size
- *
- * calculate allocation order based on the current page size
- * - this can be used to initialise global variables from constant data
- */
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? \
- ((n < (1UL << PAGE_SHIFT)) ? 0 : ilog2(n) - PAGE_SHIFT) : \
- __get_order(n, PAGE_SHIFT) \
- )
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 9d774d07d95..dc8f99ee305 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define pte_same(A,B) (pte_val(A) == pte_val(B))
#endif
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
-#define page_test_and_clear_dirty(page) (0)
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
+#define page_test_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
+#define page_clear_dirty(page) do { } while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define pte_maybe_dirty(pte) pte_dirty(pte)
#else
#define pte_maybe_dirty(pte) (1)
@@ -180,6 +187,21 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables with the
+ * actual context switch code for paravirtualized guests. By convention,
+ * only one of the lazy modes (CPU, MMU) should be active at any given
+ * time, entry should never be nested, and entry and exits should always
+ * be paired. This is for sanity of maintaining and reasoning about the
+ * kernel code.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() do {} while (0)
+#define arch_leave_lazy_cpu_mode() do {} while (0)
+#define arch_flush_lazy_cpu_mode() do {} while (0)
#endif
/*