@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
656
656
* @nodes and @flags,) it's isolated and queued to the pagelist which is
657
657
* passed via @private.)
658
658
*/
659
- static struct vm_area_struct *
659
+ static int
660
660
queue_pages_range (struct mm_struct * mm , unsigned long start , unsigned long end ,
661
661
const nodemask_t * nodes , unsigned long flags , void * private )
662
662
{
663
- int err ;
664
- struct vm_area_struct * first , * vma , * prev ;
665
-
663
+ int err = 0 ;
664
+ struct vm_area_struct * vma , * prev ;
666
665
667
- first = find_vma (mm , start );
668
- if (!first )
669
- return ERR_PTR ( - EFAULT ) ;
666
+ vma = find_vma (mm , start );
667
+ if (!vma )
668
+ return - EFAULT ;
670
669
prev = NULL ;
671
- for (vma = first ; vma && vma -> vm_start < end ; vma = vma -> vm_next ) {
670
+ for (; vma && vma -> vm_start < end ; vma = vma -> vm_next ) {
672
671
unsigned long endvma = vma -> vm_end ;
673
672
674
673
if (endvma > end )
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
678
677
679
678
if (!(flags & MPOL_MF_DISCONTIG_OK )) {
680
679
if (!vma -> vm_next && vma -> vm_end < end )
681
- return ERR_PTR ( - EFAULT ) ;
680
+ return - EFAULT ;
682
681
if (prev && prev -> vm_end < vma -> vm_start )
683
- return ERR_PTR ( - EFAULT ) ;
682
+ return - EFAULT ;
684
683
}
685
684
686
685
if (flags & MPOL_MF_LAZY ) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
694
693
695
694
err = queue_pages_pgd_range (vma , start , endvma , nodes ,
696
695
flags , private );
697
- if (err ) {
698
- first = ERR_PTR (err );
696
+ if (err )
699
697
break ;
700
- }
701
698
}
702
699
next :
703
700
prev = vma ;
704
701
}
705
- return first ;
702
+ return err ;
706
703
}
707
704
708
705
/*
@@ -1156,16 +1153,17 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1156
1153
1157
1154
/*
1158
1155
* Allocate a new page for page migration based on vma policy.
1159
- * Start assuming that page is mapped by vma pointed to by @private .
1156
+ * Start by assuming the page is mapped by the same vma as contains @start .
1160
1157
* Search forward from there, if not. N.B., this assumes that the
1161
1158
* list of pages handed to migrate_pages()--which is how we get here--
1162
1159
* is in virtual address order.
1163
1160
*/
1164
- static struct page * new_vma_page (struct page * page , unsigned long private , int * * x )
1161
+ static struct page * new_page (struct page * page , unsigned long start , int * * x )
1165
1162
{
1166
- struct vm_area_struct * vma = ( struct vm_area_struct * ) private ;
1163
+ struct vm_area_struct * vma ;
1167
1164
unsigned long uninitialized_var (address );
1168
1165
1166
+ vma = find_vma (current -> mm , start );
1169
1167
while (vma ) {
1170
1168
address = page_address_in_vma (page , vma );
1171
1169
if (address != - EFAULT )
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1195
1193
return - ENOSYS ;
1196
1194
}
1197
1195
1198
- static struct page * new_vma_page (struct page * page , unsigned long private , int * * x )
1196
+ static struct page * new_page (struct page * page , unsigned long start , int * * x )
1199
1197
{
1200
1198
return NULL ;
1201
1199
}
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
1205
1203
unsigned short mode , unsigned short mode_flags ,
1206
1204
nodemask_t * nmask , unsigned long flags )
1207
1205
{
1208
- struct vm_area_struct * vma ;
1209
1206
struct mm_struct * mm = current -> mm ;
1210
1207
struct mempolicy * new ;
1211
1208
unsigned long end ;
@@ -1271,21 +1268,18 @@ static long do_mbind(unsigned long start, unsigned long len,
1271
1268
if (err )
1272
1269
goto mpol_out ;
1273
1270
1274
- vma = queue_pages_range (mm , start , end , nmask ,
1271
+ err = queue_pages_range (mm , start , end , nmask ,
1275
1272
flags | MPOL_MF_INVERT , & pagelist );
1276
-
1277
- err = PTR_ERR (vma ); /* maybe ... */
1278
- if (!IS_ERR (vma ))
1273
+ if (!err )
1279
1274
err = mbind_range (mm , start , end , new );
1280
1275
1281
1276
if (!err ) {
1282
1277
int nr_failed = 0 ;
1283
1278
1284
1279
if (!list_empty (& pagelist )) {
1285
1280
WARN_ON_ONCE (flags & MPOL_MF_LAZY );
1286
- nr_failed = migrate_pages (& pagelist , new_vma_page ,
1287
- NULL , (unsigned long )vma ,
1288
- MIGRATE_SYNC , MR_MEMPOLICY_MBIND );
1281
+ nr_failed = migrate_pages (& pagelist , new_page , NULL ,
1282
+ start , MIGRATE_SYNC , MR_MEMPOLICY_MBIND );
1289
1283
if (nr_failed )
1290
1284
putback_movable_pages (& pagelist );
1291
1285
}
0 commit comments