Skip to content

Commit

Permalink
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/arm64/linux

Pull more arm64 updates from Catalin Marinas:
 "As I mentioned in the last pull request, there's a second batch of
  security updates for arm64 with mitigations for Spectre/v1 and an
  improved one for Spectre/v2 (via a newly defined firmware interface
  API).

  Spectre v1 mitigation:

   - back-end version of array_index_mask_nospec()

   - masking of the syscall number to restrict speculation through the
     syscall table

   - masking of __user pointers prior to deference in uaccess routines

  Spectre v2 mitigation update:

   - using the new firmware SMC calling convention specification update

   - removing the current PSCI GET_VERSION firmware call mitigation as
     vendors are deploying new SMCCC-capable firmware

   - additional branch predictor hardening for synchronous exceptions
     and interrupts while in user mode

  Meltdown v3 mitigation update:

    - Cavium Thunder X is unaffected but a hardware erratum gets in the
      way. The kernel now starts with the page tables mapped as global
      and switches to non-global if kpti needs to be enabled.

  Other:

   - Theoretical trylock bug fixed"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (38 commits)
  arm64: Kill PSCI_GET_VERSION as a variant-2 workaround
  arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support
  arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
  arm/arm64: smccc: Make function identifiers an unsigned quantity
  firmware/psci: Expose SMCCC version through psci_ops
  firmware/psci: Expose PSCI conduit
  arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
  arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
  arm/arm64: KVM: Turn kvm_psci_version into a static inline
  arm/arm64: KVM: Advertise SMCCC v1.1
  arm/arm64: KVM: Implement PSCI 1.0 support
  arm/arm64: KVM: Add smccc accessors to PSCI code
  arm/arm64: KVM: Add PSCI_VERSION helper
  arm/arm64: KVM: Consolidate the PSCI include files
  arm64: KVM: Increment PC after handling an SMC trap
  arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
  arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
  arm64: entry: Apply BP hardening for suspicious interrupts from EL0
  arm64: entry: Apply BP hardening for high-priority synchronous exceptions
  arm64: futex: Mask __user pointers prior to dereference
  ...
  • Loading branch information
torvalds committed Feb 8, 2018
2 parents 846ade7 + 3a0a397 commit c013632
Show file tree
Hide file tree
Showing 38 changed files with 1,032 additions and 292 deletions.
7 changes: 7 additions & 0 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -306,4 +306,11 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {}

static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}

static inline bool kvm_arm_harden_branch_predictor(void)
{
/* No way to detect it yet, pretend it is not there. */
return false;
}

#endif /* __ARM_KVM_HOST_H__ */
27 changes: 0 additions & 27 deletions arch/arm/include/asm/kvm_psci.h

This file was deleted.

17 changes: 13 additions & 4 deletions arch/arm/kvm/handle_exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <kvm/arm_psci.h>
#include <trace/events/kvm.h>

#include "trace.h"
Expand All @@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;

ret = kvm_psci_call(vcpu);
ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
kvm_inject_undefined(vcpu);
vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
}

Expand All @@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)

static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
* Trap exception, not a Secure Monitor Call exception [...]"
*
* We need to advance the PC after the trap, as it would
* otherwise return to the same address...
*/
vcpu_set_reg(vcpu, 0, ~0UL);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}

Expand Down
43 changes: 42 additions & 1 deletion arch/arm64/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,24 @@
hint #16
.endm

/*
* Value prediction barrier
*/
.macro csdb
hint #20
.endm

/*
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
* of bounds.
*/
.macro mask_nospec64, idx, limit, tmp
sub \tmp, \idx, \limit
bic \tmp, \tmp, \idx
and \idx, \idx, \tmp, asr #63
csdb
.endm

/*
* NOP sequence
*/
Expand Down Expand Up @@ -514,7 +532,7 @@ alternative_endif
* phys: physical address, preserved
* ttbr: returns the TTBR value
*/
.macro phys_to_ttbr, phys, ttbr
.macro phys_to_ttbr, ttbr, phys
#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
Expand All @@ -523,6 +541,29 @@ alternative_endif
#endif
.endm

.macro phys_to_pte, pte, phys
#ifdef CONFIG_ARM64_PA_BITS_52
/*
* We assume \phys is 64K aligned and this is guaranteed by only
* supporting this configuration with 64K pages.
*/
orr \pte, \phys, \phys, lsr #36
and \pte, \pte, #PTE_ADDR_MASK
#else
mov \pte, \phys
#endif
.endm

.macro pte_to_phys, phys, pte
#ifdef CONFIG_ARM64_PA_BITS_52
ubfiz \phys, \pte, #(48 - 16 - 12), #16
bfxil \phys, \pte, #16, #32
lsl \phys, \phys, #16
#else
and \phys, \pte, #PTE_ADDR_MASK
#endif
.endm

/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
Expand Down
22 changes: 22 additions & 0 deletions arch/arm64/include/asm/barrier.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")

#define psb_csync() asm volatile("hint #17" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")

#define mb() dsb(sy)
#define rmb() dsb(ld)
Expand All @@ -40,6 +41,27 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)

/*
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
* and 0 otherwise.
*/
#define array_index_mask_nospec array_index_mask_nospec
static inline unsigned long array_index_mask_nospec(unsigned long idx,
unsigned long sz)
{
unsigned long mask;

asm volatile(
" cmp %1, %2\n"
" sbc %0, xzr, xzr\n"
: "=r" (mask)
: "r" (idx), "Ir" (sz)
: "cc");

csdb();
return mask;
}

#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
Expand Down
9 changes: 6 additions & 3 deletions arch/arm64/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,10 @@ do { \
} while (0)

static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
int oldval = 0, ret, tmp;
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);

pagefault_disable();

Expand Down Expand Up @@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
}

static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 val, tmp;
u32 __user *uaddr;

if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
return -EFAULT;

uaddr = __uaccess_mask_ptr(_uaddr);
uaccess_enable();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
Expand Down
12 changes: 2 additions & 10 deletions arch/arm64/include/asm/kernel-pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,16 +123,8 @@
/*
* Initial memory map attributes.
*/
#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)

#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG)
#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
#else
#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS
#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS
#endif
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)

#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -415,4 +415,10 @@ static inline void kvm_arm_vhe_guest_exit(void)
{
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}

static inline bool kvm_arm_harden_branch_predictor(void)
{
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}

#endif /* __ARM64_KVM_HOST_H__ */
27 changes: 0 additions & 27 deletions arch/arm64/include/asm/kvm_psci.h

This file was deleted.

30 changes: 14 additions & 16 deletions arch/arm64/include/asm/pgtable-prot.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,11 @@
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)

#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG)
#else
#define PROT_DEFAULT _PROT_DEFAULT
#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)

#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)

#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
Expand All @@ -55,22 +53,22 @@
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))

#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG)
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT

#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)

#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)

#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)

#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@

#define TASK_SIZE_64 (UL(1) << VA_BITS)

#define KERNEL_DS UL(-1)
#define USER_DS (TASK_SIZE_64 - 1)

#ifndef __ASSEMBLY__

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
" cbnz %w1, 1f\n"
" add %w1, %w0, %3\n"
" casa %w0, %w1, %2\n"
" and %w1, %w1, #0xffff\n"
" eor %w1, %w1, %w0, lsr #16\n"
" sub %w1, %w1, %3\n"
" eor %w1, %w1, %w0\n"
"1:")
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
: "I" (1 << TICKET_SHIFT)
Expand Down
Loading

0 comments on commit c013632

Please sign in to comment.