Skip to content

Commit d05f0cd

Browse files
Hugh Dickinstorvalds
Hugh Dickins
authored andcommitted
mm: fix crashes from mbind() merging vmas
In v2.6.34 commit 9d8cebd ("mm: fix mbind vma merge problem") introduced vma merging to mbind(), but it should have also changed the convention of passing start vma from queue_pages_range() (formerly check_range()) to new_vma_page(): vma merging may have already freed that structure, resulting in BUG at mm/mempolicy.c:1738 and probably worse crashes. Fixes: 9d8cebd ("mm: fix mbind vma merge problem") Reported-by: Naoya Horiguchi <[email protected]> Tested-by: Naoya Horiguchi <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Christoph Lameter <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Cc: Minchan Kim <[email protected]> Cc: <[email protected]> [2.6.34+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent b43ae21 commit d05f0cd

File tree

1 file changed

+20
-26
lines changed

1 file changed

+20
-26
lines changed

mm/mempolicy.c

+20-26
Original file line numberDiff line numberDiff line change
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
656656
* @nodes and @flags,) it's isolated and queued to the pagelist which is
657657
* passed via @private.)
658658
*/
659-
static struct vm_area_struct *
659+
static int
660660
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
661661
const nodemask_t *nodes, unsigned long flags, void *private)
662662
{
663-
int err;
664-
struct vm_area_struct *first, *vma, *prev;
665-
663+
int err = 0;
664+
struct vm_area_struct *vma, *prev;
666665

667-
first = find_vma(mm, start);
668-
if (!first)
669-
return ERR_PTR(-EFAULT);
666+
vma = find_vma(mm, start);
667+
if (!vma)
668+
return -EFAULT;
670669
prev = NULL;
671-
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
670+
for (; vma && vma->vm_start < end; vma = vma->vm_next) {
672671
unsigned long endvma = vma->vm_end;
673672

674673
if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
678677

679678
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
680679
if (!vma->vm_next && vma->vm_end < end)
681-
return ERR_PTR(-EFAULT);
680+
return -EFAULT;
682681
if (prev && prev->vm_end < vma->vm_start)
683-
return ERR_PTR(-EFAULT);
682+
return -EFAULT;
684683
}
685684

686685
if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
694693

695694
err = queue_pages_pgd_range(vma, start, endvma, nodes,
696695
flags, private);
697-
if (err) {
698-
first = ERR_PTR(err);
696+
if (err)
699697
break;
700-
}
701698
}
702699
next:
703700
prev = vma;
704701
}
705-
return first;
702+
return err;
706703
}
707704

708705
/*
@@ -1156,16 +1153,17 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11561153

11571154
/*
11581155
* Allocate a new page for page migration based on vma policy.
1159-
* Start assuming that page is mapped by vma pointed to by @private.
1156+
* Start by assuming the page is mapped by the same vma as contains @start.
11601157
* Search forward from there, if not. N.B., this assumes that the
11611158
* list of pages handed to migrate_pages()--which is how we get here--
11621159
* is in virtual address order.
11631160
*/
1164-
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1161+
static struct page *new_page(struct page *page, unsigned long start, int **x)
11651162
{
1166-
struct vm_area_struct *vma = (struct vm_area_struct *)private;
1163+
struct vm_area_struct *vma;
11671164
unsigned long uninitialized_var(address);
11681165

1166+
vma = find_vma(current->mm, start);
11691167
while (vma) {
11701168
address = page_address_in_vma(page, vma);
11711169
if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11951193
return -ENOSYS;
11961194
}
11971195

1198-
static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1196+
static struct page *new_page(struct page *page, unsigned long start, int **x)
11991197
{
12001198
return NULL;
12011199
}
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
12051203
unsigned short mode, unsigned short mode_flags,
12061204
nodemask_t *nmask, unsigned long flags)
12071205
{
1208-
struct vm_area_struct *vma;
12091206
struct mm_struct *mm = current->mm;
12101207
struct mempolicy *new;
12111208
unsigned long end;
@@ -1271,21 +1268,18 @@ static long do_mbind(unsigned long start, unsigned long len,
12711268
if (err)
12721269
goto mpol_out;
12731270

1274-
vma = queue_pages_range(mm, start, end, nmask,
1271+
err = queue_pages_range(mm, start, end, nmask,
12751272
flags | MPOL_MF_INVERT, &pagelist);
1276-
1277-
err = PTR_ERR(vma); /* maybe ... */
1278-
if (!IS_ERR(vma))
1273+
if (!err)
12791274
err = mbind_range(mm, start, end, new);
12801275

12811276
if (!err) {
12821277
int nr_failed = 0;
12831278

12841279
if (!list_empty(&pagelist)) {
12851280
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1286-
nr_failed = migrate_pages(&pagelist, new_vma_page,
1287-
NULL, (unsigned long)vma,
1288-
MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1281+
nr_failed = migrate_pages(&pagelist, new_page, NULL,
1282+
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
12891283
if (nr_failed)
12901284
putback_movable_pages(&pagelist);
12911285
}

0 commit comments

Comments
 (0)