From 7aa6ba41362a7f888ad11fdcfe51ca8d92226cd3 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Fri, 17 Feb 2006 05:18:43 -0500 Subject: [IA64-SGI] SN2-XP reduce kmalloc wrapper inlining Take advantage of kzalloc() as well as reduce the size of code generated for the error returns in xpc_setup_infrastructure(). Signed-off-by: Jes Sorensen Acked-by: Dean Nelson Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/xpc_partition.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'arch/ia64/sn/kernel/xpc_partition.c') diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c index 88a730e6cfdb..94211429fd0c 100644 --- a/arch/ia64/sn/kernel/xpc_partition.c +++ b/arch/ia64/sn/kernel/xpc_partition.c @@ -80,6 +80,31 @@ char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES]; +/* + * Guarantee that the kmalloc'd memory is cacheline aligned. + */ +static void * +xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kmalloc will give us cachline aligned memory by default */ + *base = kmalloc(size, flags); + if (*base == NULL) { + return NULL; + } + if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { + return *base; + } + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kmalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) { + return NULL; + } + return (void *) L1_CACHE_ALIGN((u64) *base); +} + + /* * Given a nasid, get the physical address of the partition's reserved page * for that nasid. This function returns 0 on any error. @@ -1038,13 +1063,12 @@ xpc_discovery(void) remote_vars = (struct xpc_vars *) remote_rp; - discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words, + discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, GFP_KERNEL); if (discovered_nasids == NULL) { kfree(remote_rp_base); return; } - memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words); rp = (struct xpc_rsvd_page *) xpc_rsvd_page; -- cgit v1.2.1