Skip to content

Commit

Permalink
[PATCH] ia64 uncached alloc
Browse files Browse the repository at this point in the history
This patch contains the ia64 uncached page allocator and the generic
allocator (genalloc).  The uncached allocator was formerly part of the SN2
mspec driver but there are several other users of it so it has been split
off from the driver.

The generic allocator can be used by device driver to manage special memory
etc.  The generic allocator is based on the allocator from the sym53c8xx_2
driver.

Various users on ia64 needs uncached memory.  The SGI SN architecture requires
it for inter-partition communication between partitions within a large NUMA
cluster.  The specific user for this is the XPC code.  Another application is
large MPI style applications which use it for synchronization, on SN this can
be done using special 'fetchop' operations but it also benefits non SN
hardware which may use regular uncached memory for this purpose.  Performance
of doing this through uncached vs cached memory is pretty substantial.  This
is handled by the mspec driver which I will push out in a seperate patch.

Rather than creating a specific allocator for just uncached memory I came up
with genalloc which is a generic purpose allocator that can be used by device
drivers and other subsystems as they please.  For instance to handle onboard
device memory.  It was derived from the sym53c7xx_2 driver's allocator which
is also an example of a potential user (I am refraining from modifying sym2
right now as it seems to have been under fairly heavy development recently).

On ia64 memory has various properties within a granule, ie.  it isn't safe to
access memory as uncached within the same granule as currently has memory
accessed in cached mode.  The regular system therefore doesn't utilize memory
in the lower granules which is mixed in with device PAL code etc.  The
uncached driver walks the EFI memmap and pulls out the spill uncached pages
and sticks them into the uncached pool.  Only after these chunks have been
utilized, will it start converting regular cached memory into uncached memory.
Hence the reason for the EFI related code additions.

Signed-off-by: Jes Sorensen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Jes Sorensen authored and Linus Torvalds committed Jun 22, 2005
1 parent 2caaad4 commit f14f75b
Show file tree
Hide file tree
Showing 9 changed files with 530 additions and 0 deletions.
4 changes: 4 additions & 0 deletions arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y

config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR

choice
prompt "System type"
default IA64_GENERIC
Expand Down
1 change: 1 addition & 0 deletions arch/ia64/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
mca_recovery-y += mca_drv.o mca_drv_asm.o

# The gate DSO image is built using a special linker script.
Expand Down
32 changes: 32 additions & 0 deletions arch/ia64/kernel/efi.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,38 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
}
}

/*
* Walk the EFI memory map to pull out leftover pages in the lower
* memory regions which do not end up in the regular memory map and
* stick them into the uncached allocator
*
* The regular walk function is significantly more complex than the
* uncached walk which means it really doesn't make sense to try and
* marge the two.
*/
void __init
efi_memmap_walk_uc (efi_freemem_callback_t callback)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
u64 efi_desc_size, start, end;

efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
efi_desc_size = ia64_boot_param->efi_memdesc_size;

for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
if (md->attribute == EFI_MEMORY_UC) {
start = PAGE_ALIGN(md->phys_addr);
end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK);
if ((*callback)(start, end, NULL) < 0)
return;
}
}
}


/*
* Look for the PAL_CODE region reported by EFI and maps it using an
* ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
Expand Down
246 changes: 246 additions & 0 deletions arch/ia64/kernel/uncached.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,246 @@
/*
* Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* A simple uncached page allocator using the generic allocator. This
* allocator first utilizes the spare (spill) pages found in the EFI
* memmap and will then start converting cached pages to uncached ones
* at a granule at a time. Node awareness is implemented by having a
* pool of pages per node.
*/

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/efi.h>
#include <linux/genalloc.h>
#include <asm/page.h>
#include <asm/pal.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/atomic.h>
#include <asm/tlbflush.h>
#include <asm/sn/arch.h>

#define DEBUG 0

#if DEBUG
#define dprintk printk
#else
#define dprintk(x...) do { } while (0)
#endif

void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);

#define MAX_UNCACHED_GRANULES 5
static int allocated_granules;

struct gen_pool *uncached_pool[MAX_NUMNODES];


static void uncached_ipi_visibility(void *data)
{
int status;

status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if ((status != PAL_VISIBILITY_OK) &&
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
"CPU %i\n", status, get_cpu());
}


static void uncached_ipi_mc_drain(void *data)
{
int status;
status = ia64_pal_mc_drain();
if (status)
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
"CPU %i\n", status, get_cpu());
}


static unsigned long
uncached_get_new_chunk(struct gen_pool *poolp)
{
struct page *page;
void *tmp;
int status, i;
unsigned long addr, node;

if (allocated_granules >= MAX_UNCACHED_GRANULES)
return 0;

node = poolp->private;
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);

dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);

/*
* Do magic if no mem on local node! XXX
*/
if (!page)
return 0;
tmp = page_address(page);

/*
* There's a small race here where it's possible for someone to
* access the page through /dev/mem halfway through the conversion
* to uncached - not sure it's really worth bothering about
*/
for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
SetPageUncached(&page[i]);

flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);

status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);

dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
status, get_cpu());

if (!status) {
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
if (status)
printk(KERN_WARNING "smp_call_function failed for "
"uncached_ipi_visibility! (%i)\n", status);
}

if (ia64_platform_is("sn2"))
sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
else
flush_icache_range((unsigned long)tmp,
(unsigned long)tmp+IA64_GRANULE_SIZE);

ia64_pal_mc_drain();
status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
if (status)
printk(KERN_WARNING "smp_call_function failed for "
"uncached_ipi_mc_drain! (%i)\n", status);

addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;

allocated_granules++;
return addr;
}


/*
* uncached_alloc_page
*
* Allocate 1 uncached page. Allocates on the requested node. If no
* uncached pages are available on the requested node, roundrobin starting
* with higher nodes.
*/
unsigned long
uncached_alloc_page(int nid)
{
unsigned long maddr;

maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);

dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
maddr, nid);

/*
* If no memory is availble on our local node, try the
* remaining nodes in the system.
*/
if (!maddr) {
int i;

for (i = MAX_NUMNODES - 1; i >= 0; i--) {
if (i == nid || !node_online(i))
continue;
maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
"returns %lx on node %i\n", maddr, i);
if (maddr) {
break;
}
}
}

return maddr;
}
EXPORT_SYMBOL(uncached_alloc_page);


/*
* uncached_free_page
*
* Free a single uncached page.
*/
void
uncached_free_page(unsigned long maddr)
{
int node;

node = nasid_to_cnodeid(NASID_GET(maddr));

dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);

if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
panic("uncached_free_page invalid address %lx\n", maddr);

gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
}
EXPORT_SYMBOL(uncached_free_page);


/*
* uncached_build_memmap,
*
* Called at boot time to build a map of pages that can be used for
* memory special operations.
*/
static int __init
uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
{
long length;
unsigned long vstart, vend;
int node;

length = end - start;
vstart = start + __IA64_UNCACHED_OFFSET;
vend = end + __IA64_UNCACHED_OFFSET;

dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);

memset((char *)vstart, 0, length);

node = nasid_to_cnodeid(NASID_GET(start));

for (; vstart < vend ; vstart += PAGE_SIZE) {
dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart);
gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE);
}

return 0;
}


static int __init uncached_init(void) {
int i;

for (i = 0; i < MAX_NUMNODES; i++) {
if (!node_online(i))
continue;
uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
&uncached_get_new_chunk, i);
}

efi_memmap_walk_uc(uncached_build_memmap);

return 0;
}

__initcall(uncached_init);
12 changes: 12 additions & 0 deletions include/asm-ia64/uncached.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
/*
* Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* Prototypes for the uncached page allocator
*/

extern unsigned long uncached_alloc_page(int nid);
extern void uncached_free_page(unsigned long);
40 changes: 40 additions & 0 deletions include/linux/genalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/*
* Basic general purpose allocator for managing special purpose memory
* not managed by the regular kmalloc/kfree interface.
* Uses for this includes on-device special memory, uncached memory
* etc.
*
* This code is based on the buddy allocator found in the sym53c8xx_2
* driver, adapted for general purpose use.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/

#include <linux/spinlock.h>

#define ALLOC_MIN_SHIFT 5 /* 32 bytes minimum */
/*
* Link between free memory chunks of a given size.
*/
struct gen_pool_link {
struct gen_pool_link *next;
};

/*
* Memory pool descriptor.
*/
struct gen_pool {
spinlock_t lock;
unsigned long (*get_new_chunk)(struct gen_pool *);
struct gen_pool *next;
struct gen_pool_link *h;
unsigned long private;
int max_chunk_shift;
};

unsigned long gen_pool_alloc(struct gen_pool *poolp, int size);
void gen_pool_free(struct gen_pool *mp, unsigned long ptr, int size);
struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
unsigned long (*fp)(struct gen_pool *),
unsigned long data);
6 changes: 6 additions & 0 deletions lib/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,12 @@ config ZLIB_INFLATE
config ZLIB_DEFLATE
tristate

#
# Generic allocator support is selected if needed
#
config GENERIC_ALLOCATOR
boolean

#
# reed solomon support is select'ed if needed
#
Expand Down
1 change: 1 addition & 0 deletions lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o

obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
Expand Down
Loading

0 comments on commit f14f75b

Please sign in to comment.