aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/mm/pat.c30
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/memory.c43
4 files changed, 11 insertions, 70 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 579f8ceee94..2aa792bbd7e 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -230,11 +230,6 @@ static inline unsigned long pte_pfn(pte_t pte)
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
-static inline u64 pte_pa(pte_t pte)
-{
- return pte_val(pte) & PTE_PFN_MASK;
-}
-
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index d5254bae84f..541bcc944a5 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -685,8 +685,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
int retval = 0;
unsigned long i, j;
u64 paddr;
- pgprot_t prot;
- pte_t pte;
+ unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
@@ -696,26 +695,22 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
if (is_linear_pfn_mapping(vma)) {
/*
- * reserve the whole chunk starting from vm_pgoff,
- * But, we have to get the protection from pte.
+ * reserve the whole chunk covered by vma. We need the
+ * starting address and protection from pte.
*/
- if (follow_pfnmap_pte(vma, vma_start, &pte)) {
+ if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
WARN_ON_ONCE(1);
- return -1;
+ return -EINVAL;
}
- prot = pte_pgprot(pte);
- paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
- return reserve_pfn_range(paddr, vma_size, prot);
+ return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
}
/* reserve entire vma page by page, using pfn and prot from pte */
for (i = 0; i < vma_size; i += PAGE_SIZE) {
- if (follow_pfnmap_pte(vma, vma_start + i, &pte))
+ if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
- paddr = pte_pa(pte);
- prot = pte_pgprot(pte);
- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+ retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
if (retval)
goto cleanup_ret;
}
@@ -724,10 +719,9 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
cleanup_ret:
/* Reserve error: Cleanup partial reservation and return error */
for (j = 0; j < i; j += PAGE_SIZE) {
- if (follow_pfnmap_pte(vma, vma_start + j, &pte))
+ if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
continue;
- paddr = pte_pa(pte);
free_pfn_range(paddr, PAGE_SIZE);
}
@@ -797,6 +791,7 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
{
unsigned long i;
u64 paddr;
+ unsigned long prot;
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
@@ -821,12 +816,9 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
} else {
/* free entire vma, page by page, using the pfn from pte */
for (i = 0; i < vma_size; i += PAGE_SIZE) {
- pte_t pte;
-
- if (follow_pfnmap_pte(vma, vma_start + i, &pte))
+ if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
- paddr = pte_pa(pte);
free_pfn_range(paddr, PAGE_SIZE);
}
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2f6e2f886d4..36f9b3fa5e1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1239,9 +1239,6 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
-int follow_pfnmap_pte(struct vm_area_struct *vma,
- unsigned long address, pte_t *ret_ptep);
-
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
diff --git a/mm/memory.c b/mm/memory.c
index 79f28e35d4f..6b29f39a5a3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1168,49 +1168,6 @@ no_page_table:
return page;
}
-int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t *ret_ptep)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
- spinlock_t *ptl;
- struct page *page;
- struct mm_struct *mm = vma->vm_mm;
-
- if (!is_pfn_mapping(vma))
- goto err;
-
- page = NULL;
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- goto err;
-
- pud = pud_offset(pgd, address);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- goto err;
-
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- goto err;
-
- ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-
- pte = *ptep;
- if (!pte_present(pte))
- goto err_unlock;
-
- *ret_ptep = pte;
- pte_unmap_unlock(ptep, ptl);
- return 0;
-
-err_unlock:
- pte_unmap_unlock(ptep, ptl);
-err:
- return -EINVAL;
-}
-
/* Can we do the FOLL_ANON optimization? */
static inline int use_zero_page(struct vm_area_struct *vma)
{