From 63fb30e4b4415455d47b3da5a19d79c12f4f2d1f Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 8 Mar 2023 13:00:40 -0600 Subject: [PATCH] Merge pull request from GHSA-ff4p-7xrq-q5r8 * x64: Remove incorrect `amode_add` lowering rules This commit removes two incorrect rules as part of the x64 backend's computation of addressing modes. These two rules folded a zero-extended 32-bit computation into the address mode operand, but this isn't correct as the 32-bit computation should be truncated to 32-bits but when folded into the address mode computation it happens with 64-bit operands, meaning truncation doesn't happen. * Add release notes --- RELEASES.md | 42 +++++++++++++++++++ cranelift/codegen/src/isa/x64/inst.isle | 14 ------- .../filetests/isa/x64/amode-opt.clif | 16 ++++--- 3 files changed, 52 insertions(+), 20 deletions(-) diff --git a/RELEASES.md b/RELEASES.md index 1a7b151fb1b6..7b557ccfa5a9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -20,6 +20,20 @@ Unreleased. -------------------------------------------------------------------------------- +## 6.0.1 + +Released 2023-03-08. + +### Fixed + +* Guest-controlled out-of-bounds read/write on x86\_64 + [GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8) + +* Miscompilation of `i8x16.select` with the same inputs on x86\_64 + [GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw) + +-------------------------------------------------------------------------------- + ## 6.0.0 Released 2023-02-20 @@ -74,6 +88,20 @@ Released 2023-02-20 -------------------------------------------------------------------------------- +## 5.0.1 + +Released 2023-03-08. + +### Fixed + +* Guest-controlled out-of-bounds read/write on x86\_64 + [GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8) + +* Miscompilation of `i8x16.select` with the same inputs on x86\_64 + [GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw) + +-------------------------------------------------------------------------------- + ## 5.0.0 Released 2023-01-20 @@ -123,6 +151,20 @@ Released 2023-01-20 -------------------------------------------------------------------------------- +## 4.0.1 + +Released 2023-03-08. + +### Fixed + +* Guest-controlled out-of-bounds read/write on x86\_64 + [GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8) + +* Miscompilation of `i8x16.select` with the same inputs on x86\_64 + [GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw) + +-------------------------------------------------------------------------------- + ## 4.0.0 Released 2022-12-20 diff --git a/cranelift/codegen/src/isa/x64/inst.isle b/cranelift/codegen/src/isa/x64/inst.isle index 993b4984d7f6..5293d7071a33 100644 --- a/cranelift/codegen/src/isa/x64/inst.isle +++ b/cranelift/codegen/src/isa/x64/inst.isle @@ -1063,20 +1063,6 @@ (rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift)))) (if (u32_lteq (u8_as_u32 shift) 3)) (Amode.ImmRegRegShift off base index shift flags)) -(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (uextend (ishl index (iconst (uimm8 shift))))) - (if (u32_lteq (u8_as_u32 shift) 3)) - (Amode.ImmRegRegShift off base (extend_to_gpr index $I64 (ExtendKind.Zero)) shift flags)) - -;; Same, but with a uextend of a shift of a 32-bit add. This is valid -;; because we know our lowering of a narrower-than-64-bit `iadd` will -;; always write the full register width, so we can effectively ignore -;; the `uextend` and look through it to the `ishl`. -;; -;; Priority 3 to avoid conflict with the previous rule. -(rule 3 (amode_add (Amode.ImmReg off (valid_reg base) flags) - (uextend (ishl index @ (iadd _ _) (iconst (uimm8 shift))))) - (if (u32_lteq (u8_as_u32 shift) 3)) - (Amode.ImmRegRegShift off base index shift flags)) ;; -- Case 4 (absorbing constant offsets). ;; diff --git a/cranelift/filetests/filetests/isa/x64/amode-opt.clif b/cranelift/filetests/filetests/isa/x64/amode-opt.clif index 2de94832630f..eef1d2893313 100644 --- a/cranelift/filetests/filetests/isa/x64/amode-opt.clif +++ b/cranelift/filetests/filetests/isa/x64/amode-opt.clif @@ -209,8 +209,9 @@ block0(v0: i64, v1: i32): ; pushq %rbp ; movq %rsp, %rbp ; block0: -; movl %esi, %ecx -; movq -1(%rdi,%rcx,8), %rax +; movq %rsi, %rdx +; shll $3, %edx, %edx +; movq -1(%rdi,%rdx,1), %rax ; movq %rbp, %rsp ; popq %rbp ; ret @@ -220,8 +221,9 @@ block0(v0: i64, v1: i32): ; pushq %rbp ; movq %rsp, %rbp ; block1: ; offset 0x4 -; movl %esi, %ecx -; movq -1(%rdi, %rcx, 8), %rax ; trap: heap_oob +; movq %rsi, %rdx +; shll $3, %edx +; movq -1(%rdi, %rdx), %rax ; trap: heap_oob ; movq %rbp, %rsp ; popq %rbp ; retq @@ -244,7 +246,8 @@ block0(v0: i64, v1: i32, v2: i32): ; block0: ; movq %rsi, %r8 ; addl %r8d, %edx, %r8d -; movq -1(%rdi,%r8,4), %rax +; shll $2, %r8d, %r8d +; movq -1(%rdi,%r8,1), %rax ; movq %rbp, %rsp ; popq %rbp ; ret @@ -256,7 +259,8 @@ block0(v0: i64, v1: i32, v2: i32): ; block1: ; offset 0x4 ; movq %rsi, %r8 ; addl %edx, %r8d -; movq -1(%rdi, %r8, 4), %rax ; trap: heap_oob +; shll $2, %r8d +; movq -1(%rdi, %r8), %rax ; trap: heap_oob ; movq %rbp, %rsp ; popq %rbp ; retq