Skip to content

Commit

Permalink
ioremap: rework pXd_free_pYd_page() API
Browse files Browse the repository at this point in the history
The recently merged API for ensuring break-before-make on page-table
entries when installing huge mappings in the vmalloc/ioremap region is
fairly counter-intuitive, resulting in the arch freeing functions (e.g.
pmd_free_pte_page()) being called even on entries that aren't present.
This resulted in a minor bug in the arm64 implementation, giving rise to
spurious VM_WARN messages.

This patch moves the pXd_present() checks out into the core code,
refactoring the callsites at the same time so that we avoid the complex
conjunctions when determining whether or not we can put down a huge
mapping.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Will Deacon <[email protected]>
Reviewed-by: Toshi Kani <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: Chintan Pandya <[email protected]>
Cc: Toshi Kani <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Sean Christopherson <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
wildea01 authored and torvalds committed Dec 28, 2018
1 parent c16eb00 commit d239865
Showing 1 changed file with 42 additions and 14 deletions.
56 changes: 42 additions & 14 deletions lib/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,25 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}

static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr,
pgprot_t prot)
{
if (!ioremap_pmd_enabled())
return 0;

if ((end - addr) != PMD_SIZE)
return 0;

if (!IS_ALIGNED(phys_addr, PMD_SIZE))
return 0;

if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
return 0;

return pmd_set_huge(pmd, phys_addr, prot);
}

static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
Expand All @@ -89,20 +108,34 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);

if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr + addr, prot))
continue;

if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}

static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys_addr,
pgprot_t prot)
{
if (!ioremap_pud_enabled())
return 0;

if ((end - addr) != PUD_SIZE)
return 0;

if (!IS_ALIGNED(phys_addr, PUD_SIZE))
return 0;

if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
return 0;

return pud_set_huge(pud, phys_addr, prot);
}

static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
Expand All @@ -116,13 +149,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
do {
next = pud_addr_end(addr, end);

if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
if (ioremap_try_huge_pud(pud, addr, next, phys_addr + addr, prot))
continue;

if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
return -ENOMEM;
Expand Down

0 comments on commit d239865

Please sign in to comment.