X86_64: Fold percpu area into the cpu area. Use boot_cpu_alloc to allocate a cpu area chunk that is needed to store the statically declared per cpu data and then point the per_cpu_offset pointers to the cpu area instead. Signed-off-by: Christoph Lameter --- arch/x86/kernel/setup64.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) Index: linux-2.6/arch/x86/kernel/setup64.c =================================================================== --- linux-2.6.orig/arch/x86/kernel/setup64.c 2007-11-16 07:03:05.262416451 -0800 +++ linux-2.6/arch/x86/kernel/setup64.c 2007-11-16 14:51:11.133680928 -0800 @@ -87,35 +87,29 @@ __setup("noexec32=", nonx32_setup); void __init setup_per_cpu_areas(void) { int i; - unsigned long size; + char *base; #ifdef CONFIG_HOTPLUG_CPU prefill_possible_map(); #endif /* Copy section for each CPU (we discard the original) */ - size = PERCPU_ENOUGH_ROOM; + base = boot_cpu_alloc(PERCPU_ENOUGH_ROOM); + if (!base) + panic("Cannot allocate cpu data\n"); - printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); + printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", + PERCPU_ENOUGH_ROOM); for_each_cpu_mask (i, cpu_possible_map) { - char *ptr; + cpu_pda(i)->data_offset = CPU_PTR(base, i) - __per_cpu_start; - if (!NODE_DATA(cpu_to_node(i))) { - printk("cpu with no node %d, num_online_nodes %d\n", - i, num_online_nodes()); - ptr = alloc_bootmem_pages(size); - } else { - ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size); - } - if (!ptr) - panic("Cannot allocate cpu data for CPU %d\n", i); - cpu_pda(i)->data_offset = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + memcpy(CPU_PTR(base, i), __per_cpu_start, __per_cpu_end - __per_cpu_start); } -} + count_vm_events(CPU_BYTES, PERCPU_ENOUGH_ROOM); +} void pda_init(int cpu) -{ +{ struct x8664_pda *pda = cpu_pda(cpu); /* Setup up data that may be needed in __get_free_pages early */