diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-28 12:31:34 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-28 12:31:34 -0400 |
commit | 96b88fb850cc419171f926ad04650ec509e9f543 (patch) | |
tree | 5bf9537bde467534608b3acdbfa5f7726ede8c3f /mm/hugetlb.c | |
parent | e78a57de94480226f7fc90d0b4837bfc6c99a9e0 (diff) | |
parent | 5fadd053d9bb4345ec6f405d24db4e7eb49cf81e (diff) |
Merge branch 'master'
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1b30d45459..61d38067803 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -394,6 +394,28 @@ out: return ret; } +/* + * On ia64 at least, it is possible to receive a hugetlb fault from a + * stale zero entry left in the TLB from earlier hardware prefetching. + * Low-level arch code should already have flushed the stale entry as + * part of its fault handling, but we do need to accept this minor fault + * and return successfully. Whereas the "normal" case is that this is + * an access to a hugetlb page which has been truncated off since mmap. + */ +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + int ret = VM_FAULT_SIGBUS; + pte_t *pte; + + spin_lock(&mm->page_table_lock); + pte = huge_pte_offset(mm, address); + if (pte && !pte_none(*pte)) + ret = VM_FAULT_MINOR; + spin_unlock(&mm->page_table_lock); + return ret; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i) |