aboutsummaryrefslogtreecommitdiff
path: root/include/asm-um
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2005-09-09 20:14:47 -0500
committerDmitry Torokhov <dtor_core@ameritech.net>2005-09-09 20:14:47 -0500
commitd344c5e0856ad03278d8700b503762dbc8b86e12 (patch)
treea6d893a643470a3c2580a58f3228a55fa1fd1d82 /include/asm-um
parent010988e888a0abbe7118635c1b33d049caae6b29 (diff)
parent87fc767b832ef5a681a0ff9d203c3289bc3be2bf (diff)
Manual merge with Linus
Diffstat (limited to 'include/asm-um')
-rw-r--r--include/asm-um/auxvec.h4
-rw-r--r--include/asm-um/futex.h53
-rw-r--r--include/asm-um/hdreg.h6
-rw-r--r--include/asm-um/mmu_context.h10
-rw-r--r--include/asm-um/page.h16
-rw-r--r--include/asm-um/pgalloc.h12
-rw-r--r--include/asm-um/pgtable-2level.h27
-rw-r--r--include/asm-um/pgtable-3level.h45
-rw-r--r--include/asm-um/pgtable.h54
9 files changed, 129 insertions, 98 deletions
diff --git a/include/asm-um/auxvec.h b/include/asm-um/auxvec.h
new file mode 100644
index 00000000000..1e5e1c2fc9b
--- /dev/null
+++ b/include/asm-um/auxvec.h
@@ -0,0 +1,4 @@
+#ifndef __UM_AUXVEC_H
+#define __UM_AUXVEC_H
+
+#endif
diff --git a/include/asm-um/futex.h b/include/asm-um/futex.h
new file mode 100644
index 00000000000..2cac5ecd9d0
--- /dev/null
+++ b/include/asm-um/futex.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tem;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ case FUTEX_OP_ADD:
+ case FUTEX_OP_OR:
+ case FUTEX_OP_ANDN:
+ case FUTEX_OP_XOR:
+ default:
+ ret = -ENOSYS;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+#endif
+#endif
diff --git a/include/asm-um/hdreg.h b/include/asm-um/hdreg.h
deleted file mode 100644
index cf6363abcab..00000000000
--- a/include/asm-um/hdreg.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __UM_HDREG_H
-#define __UM_HDREG_H
-
-#include "asm/arch/hdreg.h"
-
-#endif
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index 095bb627b96..2edb4f1f789 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -20,7 +20,15 @@ extern void force_flush_all(void);
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
- if (old != new)
+ /*
+ * This is called by fs/exec.c and fs/aio.c. In the first case, for an
+ * exec, we don't need to do anything as we're called from userspace
+ * and thus going to use a new host PID. In the second, we're called
+ * from a kernel thread, and thus need to go doing the mmap's on the
+ * host. Since they're very expensive, we want to avoid that as far as
+ * possible.
+ */
+ if (old != new && (current->flags & PF_BORROWED_MM))
force_flush_all();
}
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index f58aedadeb4..bd850a24918 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -116,24 +116,12 @@ extern void *to_virt(unsigned long phys);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
-/* Pure 2^n version of get_order */
-static __inline__ int get_order(unsigned long size)
-{
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
-
extern struct page *arch_validate(struct page *page, int mask, int order);
#define HAVE_ARCH_VALIDATE
extern void arch_free_page(struct page *page, int order);
#define HAVE_ARCH_FREE_PAGE
+#include <asm-generic/page.h>
+
#endif
diff --git a/include/asm-um/pgalloc.h b/include/asm-um/pgalloc.h
index 8fcb2fc0a89..ea49411236d 100644
--- a/include/asm-um/pgalloc.h
+++ b/include/asm-um/pgalloc.h
@@ -42,11 +42,13 @@ static inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#ifdef CONFIG_3_LEVEL_PGTABLES
-/*
- * In the 3-level case we free the pmds as part of the pgd.
- */
-#define pmd_free(x) do { } while (0)
-#define __pmd_free_tlb(tlb,x) do { } while (0)
+
+extern __inline__ void pmd_free(pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#endif
#define check_pgt_cache() do { } while (0)
diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h
index 9b3abc01d60..ffe017f6b64 100644
--- a/include/asm-um/pgtable-2level.h
+++ b/include/asm-um/pgtable-2level.h
@@ -35,35 +35,8 @@
static inline int pgd_newpage(pgd_t pgd) { return 0; }
static inline void pgd_mkuptodate(pgd_t pgd) { }
-#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-static inline pte_t pte_mknewprot(pte_t pte)
-{
- pte_val(pte) |= _PAGE_NEWPROT;
- return(pte);
-}
-
-static inline pte_t pte_mknewpage(pte_t pte)
-{
- pte_val(pte) |= _PAGE_NEWPAGE;
- return(pte);
-}
-
-static inline void set_pte(pte_t *pteptr, pte_t pteval)
-{
- /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
- * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
- * mapped pages.
- */
- *pteptr = pte_mknewpage(pteval);
- if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
-}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_none(x) !(pte_val(x) & ~_PAGE_NEWPAGE)
#define pte_pfn(x) phys_to_pfn(pte_val(x))
#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h
index 65e8bfc55fc..786c2572728 100644
--- a/include/asm-um/pgtable-3level.h
+++ b/include/asm-um/pgtable-3level.h
@@ -57,35 +57,6 @@ static inline int pgd_newpage(pgd_t pgd)
static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
-
-#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-static inline pte_t pte_mknewprot(pte_t pte)
-{
- pte_set_bits(pte, _PAGE_NEWPROT);
- return(pte);
-}
-
-static inline pte_t pte_mknewpage(pte_t pte)
-{
- pte_set_bits(pte, _PAGE_NEWPAGE);
- return(pte);
-}
-
-static inline void set_pte(pte_t *pteptr, pte_t pteval)
-{
- pte_copy(*pteptr, pteval);
-
- /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
- * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
- * mapped pages.
- */
-
- *pteptr = pte_mknewpage(*pteptr);
- if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
-}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -98,14 +69,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return pmd;
}
-static inline void pmd_free(pmd_t *pmd){
- free_page((unsigned long) pmd);
+extern inline void pud_clear (pud_t *pud)
+{
+ set_pud(pud, __pud(0));
}
-#define __pmd_free_tlb(tlb,x) do { } while (0)
-
-static inline void pud_clear (pud_t * pud) { }
-
#define pud_page(pud) \
((struct page *) __va(pud_val(pud) & PAGE_MASK))
@@ -113,13 +81,6 @@ static inline void pud_clear (pud_t * pud) { }
#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
pmd_index(address))
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
-static inline int pte_none(pte_t pte)
-{
- return pte_is_zero(pte);
-}
-
static inline unsigned long pte_pfn(pte_t pte)
{
return phys_to_pfn(pte_val(pte));
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index a8804092031..b48e0966ecd 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -16,13 +16,15 @@
#define _PAGE_PRESENT 0x001
#define _PAGE_NEWPAGE 0x002
-#define _PAGE_NEWPROT 0x004
-#define _PAGE_FILE 0x008 /* set:pagecache unset:swap */
-#define _PAGE_PROTNONE 0x010 /* If not present */
+#define _PAGE_NEWPROT 0x004
#define _PAGE_RW 0x020
#define _PAGE_USER 0x040
#define _PAGE_ACCESSED 0x080
#define _PAGE_DIRTY 0x100
+/* If _PAGE_PRESENT is clear, we use these: */
+#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
+ pte_present gives true */
#ifdef CONFIG_3_LEVEL_PGTABLES
#include "asm/pgtable-3level.h"
@@ -151,10 +153,24 @@ extern unsigned long pg0[1024];
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
+#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
#define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT))
#define phys_addr(p) ((p) & ~REGION_MASK)
+#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
+
+/*
+ * =================================
+ * Flags checking section.
+ * =================================
+ */
+
+static inline int pte_none(pte_t pte)
+{
+ return pte_is_zero(pte);
+}
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -210,6 +226,18 @@ static inline int pte_newprot(pte_t pte)
return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
}
+/*
+ * =================================
+ * Flags setting section.
+ * =================================
+ */
+
+static inline pte_t pte_mknewprot(pte_t pte)
+{
+ pte_set_bits(pte, _PAGE_NEWPROT);
+ return(pte);
+}
+
static inline pte_t pte_rdprotect(pte_t pte)
{
pte_clear_bits(pte, _PAGE_USER);
@@ -278,6 +306,26 @@ static inline pte_t pte_mkuptodate(pte_t pte)
return(pte);
}
+static inline pte_t pte_mknewpage(pte_t pte)
+{
+ pte_set_bits(pte, _PAGE_NEWPAGE);
+ return(pte);
+}
+
+static inline void set_pte(pte_t *pteptr, pte_t pteval)
+{
+ pte_copy(*pteptr, pteval);
+
+ /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
+ * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
+ * mapped pages.
+ */
+
+ *pteptr = pte_mknewpage(*pteptr);
+ if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
+}
+#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
extern phys_t page_to_phys(struct page *page);
/*