Skip to content

Commit

Permalink
Merge branch 'slub/lockless' into for-linus
Browse files Browse the repository at this point in the history
Conflicts:
	include/linux/slub_def.h
  • Loading branch information
penberg committed Mar 20, 2011
2 parents c53badd + a24c5a0 commit e8c500c
Show file tree
Hide file tree
Showing 25 changed files with 505 additions and 48 deletions.
5 changes: 3 additions & 2 deletions arch/alpha/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/page.h>

OUTPUT_FORMAT("elf64-alpha")
Expand Down Expand Up @@ -38,15 +39,15 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);
__init_end = .;
/* Freed after init ends here */

_data = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)

.got : {
*(.got)
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ SECTIONS
#endif
}

PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)

#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);
Expand Down
2 changes: 1 addition & 1 deletion arch/blackfin/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ SECTIONS

. = ALIGN(16);
INIT_DATA_SECTION(16)
PERCPU(4)
PERCPU(32, 4)

.exit.data :
{
Expand Down
2 changes: 1 addition & 1 deletion arch/cris/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ SECTIONS
#endif
__vmlinux_end = .; /* Last address of the physical file. */
#ifdef CONFIG_ETRAX_ARCH_V32
PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)

.init.ramfs : {
INIT_RAM_FS
Expand Down
2 changes: 1 addition & 1 deletion arch/frv/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ SECTIONS
_einittext = .;

INIT_DATA_SECTION(8)
PERCPU(4096)
PERCPU(L1_CACHE_BYTES, 4096)

. = ALIGN(PAGE_SIZE);
__init_end = .;
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ SECTIONS {

/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(PERCPU_ADDR, :percpu)
PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
__phys_per_cpu_start = __per_cpu_load;
/*
* ensure percpu data fits
Expand Down
2 changes: 1 addition & 1 deletion arch/m32r/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ SECTIONS
EXIT_DATA
}

PERCPU(PAGE_SIZE)
PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
Expand Down
2 changes: 1 addition & 1 deletion arch/mn10300/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }

PERCPU(PAGE_SIZE)
PERCPU(32, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ SECTIONS
EXIT_DATA
}

PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ SECTIONS
INIT_RAM_FS
}

PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)

. = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)

PERCPU(PAGE_SIZE)
PERCPU(0x100, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */

Expand Down
2 changes: 1 addition & 1 deletion arch/sh/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ SECTIONS
__machvec_end = .;
}

PERCPU(PAGE_SIZE)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)

/*
* .exit.text is discarded at runtime, not link time, to deal with
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ SECTIONS
__sun4v_2insn_patch_end = .;
}

PERCPU(PAGE_SIZE)
PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)

. = ALIGN(PAGE_SIZE);
__init_end = .;
Expand Down
2 changes: 1 addition & 1 deletion arch/tile/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ SECTIONS
*(.init.page)
} :data =0
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .;

Expand Down
2 changes: 1 addition & 1 deletion arch/um/include/asm/common.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
INIT_SETUP(0)
}

PERCPU(32)
PERCPU(32, 32)

.initcall.init : {
INIT_CALLS
Expand Down
48 changes: 48 additions & 0 deletions arch/x86/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,26 @@ do { \
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#endif /* !CONFIG_M386 */

#ifdef CONFIG_X86_CMPXCHG64
#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \
({ \
char __ret; \
typeof(o1) __o1 = o1; \
typeof(o1) __n1 = n1; \
typeof(o2) __o2 = o2; \
typeof(o2) __n2 = n2; \
typeof(o2) __dummy = n2; \
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
: "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \
: "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \
__ret; \
})

#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
#endif /* CONFIG_X86_CMPXCHG64 */

/*
* Per cpu atomic 64 bit operations are only available under 64 bit.
* 32 bit must fall back to generic operations.
Expand Down Expand Up @@ -480,6 +500,34 @@ do { \
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)

/*
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
* is not supported on early AMD64 processors so we must be able to emulate
* it in software. The address used in the cmpxchg16 instruction must be
* aligned to a 16 byte boundary.
*/
#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
({ \
char __ret; \
typeof(o1) __o1 = o1; \
typeof(o1) __n1 = n1; \
typeof(o2) __o2 = o2; \
typeof(o2) __n2 = n2; \
typeof(o2) __dummy; \
alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \
"cmpxchg16b %%gs:(%%rsi)\n\tsetz %0\n\t", \
X86_FEATURE_CX16, \
ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
"S" (&pcp1), "b"(__n1), "c"(__n2), \
"a"(__o1), "d"(__o2)); \
__ret; \
})

#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)

#endif

/* This is not atomic against other CPUs -- CPU preemption needs to be off */
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ SECTIONS
* output PHDR, so the next output section - .init.text - should
* start another segment - init.
*/
PERCPU_VADDR(0, :percpu)
PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
#endif

INIT_TEXT_SECTION(PAGE_SIZE)
Expand Down Expand Up @@ -305,7 +305,7 @@ SECTIONS
}

#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
PERCPU(THREAD_SIZE)
PERCPU(INTERNODE_CACHE_BYTES, THREAD_SIZE)
#endif

. = ALIGN(PAGE_SIZE);
Expand Down
1 change: 1 addition & 0 deletions arch/x86/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,5 @@ else
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o
lib-y += cmpxchg16b_emu.o
endif
59 changes: 59 additions & 0 deletions arch/x86/lib/cmpxchg16b_emu.S
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>

.text

/*
* Inputs:
* %rsi : memory location to compare
* %rax : low 64 bits of old value
* %rdx : high 64 bits of old value
* %rbx : low 64 bits of new value
* %rcx : high 64 bits of new value
* %al : Operation successful
*/
ENTRY(this_cpu_cmpxchg16b_emu)
CFI_STARTPROC

#
# Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
# via the ZF. Caller will access %al to get result.
#
# Note that this is only useful for a cpuops operation. Meaning that we
# do *not* have a fully atomic operation but just an operation that is
# *atomic* on a single cpu (as provided by the this_cpu_xx class of
# macros).
#
this_cpu_cmpxchg16b_emu:
pushf
cli

cmpq %gs:(%rsi), %rax
jne not_same
cmpq %gs:8(%rsi), %rdx
jne not_same

movq %rbx, %gs:(%rsi)
movq %rcx, %gs:8(%rsi)

popf
mov $1, %al
ret

not_same:
popf
xor %al,%al
ret

CFI_ENDPROC

ENDPROC(this_cpu_cmpxchg16b_emu)
2 changes: 1 addition & 1 deletion arch/xtensa/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ SECTIONS
INIT_RAM_FS
}

PERCPU(PAGE_SIZE)
PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)

/* We need this dummy segment here */

Expand Down
Loading

0 comments on commit e8c500c

Please sign in to comment.