aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bitops/sched.h21
-rw-r--r--include/asm-generic/pgtable.h17
-rw-r--r--include/asm-generic/vmlinux.lds.h10
3 files changed, 27 insertions, 21 deletions
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h
index 815bb014806..604fab7031a 100644
--- a/include/asm-generic/bitops/sched.h
+++ b/include/asm-generic/bitops/sched.h
@@ -6,28 +6,23 @@
/*
* Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#if BITS_PER_LONG == 64
- if (unlikely(b[0]))
+ if (b[0])
return __ffs(b[0]);
- if (likely(b[1]))
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
+ return __ffs(b[1]) + 64;
#elif BITS_PER_LONG == 32
- if (unlikely(b[0]))
+ if (b[0])
return __ffs(b[0]);
- if (unlikely(b[1]))
+ if (b[1])
return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
+ if (b[2])
return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
+ return __ffs(b[3]) + 96;
#else
#error BITS_PER_LONG not defined
#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index dc8f99ee305..7d7bcf990e9 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -27,13 +27,20 @@ do { \
* Largely same as above, but only sets the access flags (dirty,
* accessed, and writable). Furthermore, we know it always gets set
* to a "more permissive" setting, which allows most architectures
- * to optimize this.
+ * to optimize this. We return whether the PTE actually changed, which
+ * in turn instructs the caller to do things like update__mmu_cache.
+ * This used to be done in the caller, but sparc needs minor faults to
+ * force that call on sun4c so we changed this macro slightly
*/
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-do { \
- set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
- flush_tlb_page(__vma, __address); \
-} while (0)
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __changed; \
+})
#endif
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8307b1bb337..84155eb67f1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -14,8 +14,8 @@
*(.data) \
*(.data.init.refok)
-#define RODATA \
- . = ALIGN(4096); \
+#define RO_DATA(align) \
+ . = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
@@ -135,7 +135,11 @@
VMLINUX_SYMBOL(__end_rodata) = .; \
} \
\
- . = ALIGN(4096);
+ . = ALIGN((align));
+
+/* RODATA provided for backward compatibility.
+ * All archs are supposed to use RO_DATA() */
+#define RODATA RO_DATA(4096)
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \