Skip to content

Commit

Permalink
mm, vmalloc: constify allocation mask
Browse files Browse the repository at this point in the history
tmp_mask in the __vmalloc_area_node() iteration never changes so it can
be moved into function scope and marked with const.  This causes the
movl and orl to only be done once per call rather than area->nr_pages
times.

nested_gfp can also be marked const.

Signed-off-by: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rientjes authored and torvalds committed Aug 7, 2014
1 parent 660654f commit 930f036
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1566,7 +1566,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;

nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
Expand All @@ -1589,12 +1590,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,

for (i = 0; i < area->nr_pages; i++) {
struct page *page;
gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;

if (node == NUMA_NO_NODE)
page = alloc_page(tmp_mask);
page = alloc_page(alloc_mask);
else
page = alloc_pages_node(node, tmp_mask, order);
page = alloc_pages_node(node, alloc_mask, order);

if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
Expand Down

0 comments on commit 930f036

Please sign in to comment.