Skip to content

Commit

Permalink
Merge branch 'core/percpu' into x86/core
Browse files Browse the repository at this point in the history
  • Loading branch information
Ingo Molnar committed Feb 13, 2009
2 parents f8a6b2b + 58105ef commit ab639f3
Show file tree
Hide file tree
Showing 51 changed files with 949 additions and 612 deletions.
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -532,8 +532,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
endif

# Force gcc to behave correct even for buggy distributions
# Arch Makefiles may override this setting
ifndef CONFIG_CC_STACKPROTECTOR
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
endif

ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@ extern void *per_cpu_init(void);

#else /* ! SMP */

#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))

#define per_cpu_init() (__phys_per_cpu_start)

#endif /* SMP */

#define PER_CPU_BASE_SECTION ".data.percpu"

/*
* Be extremely careful when taking the address of this variable! Due to virtual
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
Expand Down
13 changes: 13 additions & 0 deletions arch/ia64/include/asm/uv/uv.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#ifndef _ASM_IA64_UV_UV_H
#define _ASM_IA64_UV_UV_H

#include <asm/system.h>
#include <asm/sn/simulator.h>

static inline int is_uv_system(void)
{
/* temporary support for running on hardware simulator */
return IS_MEDUSA() || ia64_platform_is("uv");
}

#endif /* _ASM_IA64_UV_UV_H */
5 changes: 4 additions & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,10 @@ config X86_TRAMPOLINE
depends on SMP || (64BIT && ACPI_SLEEP)
default y

config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR

config KTIME_SCALAR
def_bool X86_32
source "init/Kconfig"
Expand Down Expand Up @@ -1339,7 +1343,6 @@ config CC_STACKPROTECTOR_ALL

config CC_STACKPROTECTOR
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
depends on X86_64
select CC_STACKPROTECTOR_ALL
---help---
This option turns on the -fstack-protector GCC feature. This
Expand Down
17 changes: 10 additions & 7 deletions arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,17 @@ else
# this works around some issues with generating unwind tables in older gccs
# newer gccs do it by default
KBUILD_CFLAGS += -maccumulate-outgoing-args
endif

stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
"$(CC)" "-fstack-protector -DGCC_HAS_SP" )
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
"$(CC)" -fstack-protector-all )

KBUILD_CFLAGS += $(stackp-y)
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
stackp-y := -fstack-protector
stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
KBUILD_CFLAGS += $(stackp-y)
else
$(warning stack protector enabled but no compiler support)
endif
endif

# Stackpointer is addressed different for 32 bit and 64 bit x86
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/a.out-core.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
savesegment(gs, dump->regs.gs);
dump->regs.gs = get_user_gs(regs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;
Expand Down
15 changes: 13 additions & 2 deletions arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
* now struct_user_regs, they are different)
*/

#define ELF_CORE_COPY_REGS(pr_reg, regs) \
#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
do { \
pr_reg[0] = regs->bx; \
pr_reg[1] = regs->cx; \
Expand All @@ -124,7 +124,6 @@ do { \
pr_reg[7] = regs->ds & 0xffff; \
pr_reg[8] = regs->es & 0xffff; \
pr_reg[9] = regs->fs & 0xffff; \
savesegment(gs, pr_reg[10]); \
pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \
pr_reg[13] = regs->cs & 0xffff; \
Expand All @@ -133,6 +132,18 @@ do { \
pr_reg[16] = regs->ss & 0xffff; \
} while (0);

#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
pr_reg[10] = get_user_gs(regs); \
} while (0);

#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
do { \
ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
savesegment(gs, pr_reg[10]); \
} while (0);

#define ELF_PLATFORM (utsname()->machine)
#define set_personality_64bit() do { } while (0)

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ do { \
#ifdef CONFIG_X86_32
#define deactivate_mm(tsk, mm) \
do { \
loadsegment(gs, 0); \
lazy_load_gs(0); \
} while (0)
#else
#define deactivate_mm(tsk, mm) \
Expand Down
22 changes: 22 additions & 0 deletions arch/x86/include/asm/percpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@
#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */

#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else
#define INIT_PER_CPU_VAR(var) per_cpu__##var
#endif

#else /* ...!ASSEMBLY */

#include <linux/stringify.h>
Expand All @@ -45,6 +51,22 @@
#define __percpu_arg(x) "%" #x
#endif

/*
* Initialized pointers to per-cpu variables needed for the boot
* processor need to use these macros to get the proper address
* offset from __per_cpu_load on SMP.
*
* There also must be an entry in vmlinux_64.lds.S
*/
#define DECLARE_INIT_PER_CPU(var) \
extern typeof(per_cpu_var(var)) init_per_cpu_var(var)

#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var) init_per_cpu__##var
#else
#define init_per_cpu_var(var) per_cpu_var(var)
#endif

/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
extern void __bad_percpu_size(void);
Expand Down
6 changes: 6 additions & 0 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -393,8 +393,14 @@ union irq_stack_union {
};

DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
DECLARE_INIT_PER_CPU(irq_stack_union);

DECLARE_PER_CPU(char *, irq_stack_ptr);
#else /* X86_64 */
#ifdef CONFIG_CC_STACKPROTECTOR
DECLARE_PER_CPU(unsigned long, stack_canary);
#endif
#endif /* X86_64 */

extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int xstate_size;
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ struct pt_regs {
int xds;
int xes;
int xfs;
/* int gs; */
int xgs;
long orig_eax;
long eip;
int xcs;
Expand All @@ -50,7 +50,7 @@ struct pt_regs {
unsigned long ds;
unsigned long es;
unsigned long fs;
/* int gs; */
unsigned long gs;
unsigned long orig_ax;
unsigned long ip;
unsigned long cs;
Expand Down
9 changes: 8 additions & 1 deletion arch/x86/include/asm/segment.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
*
* 26 - ESPFIX small SS
* 27 - per-cpu [ offset to per-cpu data area ]
* 28 - unused
* 28 - stack_canary-20 [ for stack protector ]
* 29 - unused
* 30 - unused
* 31 - TSS for double fault handler
Expand Down Expand Up @@ -95,6 +95,13 @@
#define __KERNEL_PERCPU 0
#endif

#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16)
#ifdef CONFIG_CC_STACKPROTECTOR
#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8)
#else
#define __KERNEL_STACK_CANARY 0
#endif

#define GDT_ENTRY_DOUBLEFAULT_TSS 31

/*
Expand Down
96 changes: 91 additions & 5 deletions arch/x86/include/asm/stackprotector.h
Original file line number Diff line number Diff line change
@@ -1,8 +1,54 @@
/*
* GCC stack protector support.
*
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and unfortunately gcc requires it to be at a fixed offset from %gs.
* On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
* and x86_32 use segment registers differently and thus handles this
* requirement differently.
*
* On x86_64, %gs is shared by percpu area and stack canary. All
* percpu symbols are zero based and %gs points to the base of percpu
* area. The first occupant of the percpu area is always
* irq_stack_union which contains stack_canary at offset 40. Userland
* %gs is always saved and restored on kernel entry and exit using
* swapgs, so stack protector doesn't add any complexity there.
*
* On x86_32, it's slightly more complicated. As in x86_64, %gs is
* used for userland TLS. Unfortunately, some processors are much
* slower at loading segment registers with different value when
* entering and leaving the kernel, so the kernel uses %fs for percpu
* area and manages %gs lazily so that %gs is switched only when
* necessary, usually during task switch.
*
* As gcc requires the stack canary at %gs:20, %gs can't be managed
* lazily if stack protector is enabled, so the kernel saves and
* restores userland %gs on kernel entry and exit. This behavior is
* controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
* system.h to hide the details.
*/

#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H 1

#ifdef CONFIG_CC_STACKPROTECTOR

#include <asm/tsc.h>
#include <asm/processor.h>
#include <asm/percpu.h>
#include <asm/system.h>
#include <asm/desc.h>
#include <linux/random.h>

/*
* 24 byte read-only segment initializer for stack canary. Linker
* can't handle the address bit shifting. Address will be set in
* head_32 for boot CPU and setup_per_cpu_areas() for others.
*/
#define GDT_STACK_CANARY_INIT \
[GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },

/*
* Initialize the stackprotector canary value.
Expand All @@ -15,12 +61,9 @@ static __always_inline void boot_init_stack_canary(void)
u64 canary;
u64 tsc;

/*
* Build time only check to make sure the stack_canary is at
* offset 40 in the pda; this is a gcc ABI requirement
*/
#ifdef CONFIG_X86_64
BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);

#endif
/*
* We both use the random pool and the current TSC as a source
* of randomness. The TSC only matters for very early init,
Expand All @@ -32,7 +75,50 @@ static __always_inline void boot_init_stack_canary(void)
canary += tsc + (tsc << 32UL);

current->stack_canary = canary;
#ifdef CONFIG_X86_64
percpu_write(irq_stack_union.stack_canary, canary);
#else
percpu_write(stack_canary, canary);
#endif
}

static inline void setup_stack_canary_segment(int cpu)
{
#ifdef CONFIG_X86_32
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
struct desc_struct desc;

desc = gdt_table[GDT_ENTRY_STACK_CANARY];
desc.base0 = canary & 0xffff;
desc.base1 = (canary >> 16) & 0xff;
desc.base2 = (canary >> 24) & 0xff;
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
#endif
}

static inline void load_stack_canary_segment(void)
{
#ifdef CONFIG_X86_32
asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
#endif
}

#else /* CC_STACKPROTECTOR */

#define GDT_STACK_CANARY_INIT

/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */

static inline void setup_stack_canary_segment(int cpu)
{ }

static inline void load_stack_canary_segment(void)
{
#ifdef CONFIG_X86_32
asm volatile ("mov %0, %%gs" : : "r" (0));
#endif
}

#endif /* CC_STACKPROTECTOR */
#endif /* _ASM_STACKPROTECTOR_H */
20 changes: 10 additions & 10 deletions arch/x86/include/asm/syscalls.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
/* X86_32 only */
#ifdef CONFIG_X86_32
/* kernel/process_32.c */
asmlinkage int sys_fork(struct pt_regs);
asmlinkage int sys_clone(struct pt_regs);
asmlinkage int sys_vfork(struct pt_regs);
asmlinkage int sys_execve(struct pt_regs);
int sys_fork(struct pt_regs *);
int sys_clone(struct pt_regs *);
int sys_vfork(struct pt_regs *);
int sys_execve(struct pt_regs *);

/* kernel/signal_32.c */
asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
struct old_sigaction __user *);
asmlinkage int sys_sigaltstack(unsigned long);
asmlinkage unsigned long sys_sigreturn(unsigned long);
asmlinkage int sys_rt_sigreturn(unsigned long);
int sys_sigaltstack(struct pt_regs *);
unsigned long sys_sigreturn(struct pt_regs *);
long sys_rt_sigreturn(struct pt_regs *);

/* kernel/ioport.c */
asmlinkage long sys_iopl(unsigned long);
long sys_iopl(struct pt_regs *);

/* kernel/sys_i386_32.c */
asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
Expand All @@ -59,8 +59,8 @@ struct oldold_utsname;
asmlinkage int sys_olduname(struct oldold_utsname __user *);

/* kernel/vm86_32.c */
asmlinkage int sys_vm86old(struct pt_regs);
asmlinkage int sys_vm86(struct pt_regs);
int sys_vm86old(struct pt_regs *);
int sys_vm86(struct pt_regs *);

#else /* CONFIG_X86_32 */

Expand Down
Loading

0 comments on commit ab639f3

Please sign in to comment.