diff --git a/compiler/rustc_codegen_gcc/.github/workflows/ci.yml b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml index 8ebdabe82610e..d2b7724a2215f 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/ci.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml @@ -4,36 +4,72 @@ on: - push - pull_request +permissions: + contents: read + +env: + # Enable backtraces for easier debugging + RUST_BACKTRACE: 1 + jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: - libgccjit_version: ["libgccjit.so", "libgccjit_without_int128.so", "libgccjit12.so"] + libgccjit_version: + - { gcc: "libgccjit.so", extra: "", env_extra: "", artifacts_branch: "master" } + - { gcc: "libgccjit_without_int128.so", extra: "", env_extra: "", artifacts_branch: "master-without-128bit-integers" } + - { gcc: "libgccjit12.so", extra: "--no-default-features", env_extra: "TEST_FLAGS='-Cpanic=abort -Zpanic-abort-tests'", artifacts_branch: "gcc12" } + commands: [ + "--mini-tests", + "--std-tests", + # FIXME: re-enable asm tests when GCC can emit in the right syntax. + # "--asm-tests", + "--test-libcore", + "--extended-rand-tests", + "--extended-regex-example-tests", + "--extended-regex-tests", + "--test-successful-rustc --nb-parts 2 --current-part 0", + "--test-successful-rustc --nb-parts 2 --current-part 1", + "--test-failing-rustc", + ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: llvm/llvm-project path: llvm - name: Install packages - run: sudo apt-get install ninja-build ripgrep + # `llvm-14-tools` is needed to install the `FileCheck` binary which is used for asm tests. + run: sudo apt-get install ninja-build ripgrep llvm-14-tools + + - name: Install libgccjit12 + if: matrix.libgccjit_version.gcc == 'libgccjit12.so' + run: sudo apt-get install libgccjit-12-dev - name: Download artifact + if: matrix.libgccjit_version.gcc != 'libgccjit12.so' uses: dawidd6/action-download-artifact@v2 with: workflow: main.yml - name: ${{ matrix.libgccjit_version }} + name: ${{ matrix.libgccjit_version.gcc }} path: gcc-build repo: antoyo/gcc + branch: ${{ matrix.libgccjit_version.artifacts_branch }} + event: push search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts. - name: Setup path to libgccjit + if: matrix.libgccjit_version.gcc == 'libgccjit12.so' + run: echo /usr/lib/gcc/x86_64-linux-gnu/12 > gcc_path + + - name: Setup path to libgccjit + if: matrix.libgccjit_version.gcc != 'libgccjit12.so' run: | echo $(readlink -f gcc-build) > gcc_path # NOTE: the filename is still libgccjit.so even when the artifact name is different. @@ -48,49 +84,44 @@ jobs: - name: Set RUST_COMPILER_RT_ROOT run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV - # https://github.com/actions/cache/issues/133 - - name: Fixup owner of ~/.cargo/ - # Don't remove the trailing /. It is necessary to follow the symlink. - run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/ - - name: Cache cargo installed crates - uses: actions/cache@v1.1.2 + uses: actions/cache@v3 with: path: ~/.cargo/bin key: cargo-installed-crates2-ubuntu-latest - name: Cache cargo registry - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ~/.cargo/registry key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} - name: Cache cargo index - uses: actions/cache@v1 + uses: actions/cache@v3 with: path: ~/.cargo/git key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} - name: Cache cargo target dir - uses: actions/cache@v1.1.2 + uses: actions/cache@v3 with: path: target key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} - - name: Build - if: matrix.libgccjit_version != 'libgccjit12.so' - run: | - ./prepare_build.sh - ./build.sh - cargo test - ./clean_all.sh + #- name: Cache rust repository + ## We only clone the rust repository for rustc tests + #if: ${{ contains(matrix.commands, 'rustc') }} + #uses: actions/cache@v3 + #id: cache-rust-repository + #with: + #path: rust + #key: ${{ runner.os }}-packages-${{ hashFiles('rust/.git/HEAD') }} - name: Build - if: matrix.libgccjit_version == 'libgccjit12.so' run: | ./prepare_build.sh - ./build.sh --no-default-features - cargo test --no-default-features + ${{ matrix.libgccjit_version.env_extra }} ./build.sh ${{ matrix.libgccjit_version.extra }} + ${{ matrix.libgccjit_version.env_extra }} cargo test ${{ matrix.libgccjit_version.extra }} ./clean_all.sh - name: Prepare dependencies @@ -106,26 +137,16 @@ jobs: command: build args: --release - - name: Test - if: matrix.libgccjit_version != 'libgccjit12.so' - run: | - # Enable backtraces for easier debugging - export RUST_BACKTRACE=1 - - # Reduce amount of benchmark runs as they are slow - export COMPILE_RUNS=2 - export RUN_RUNS=2 + - name: Add more failing tests for GCC 12 + if: ${{ matrix.libgccjit_version.gcc == 'libgccjit12.so' }} + run: cat failing-ui-tests12.txt >> failing-ui-tests.txt - ./test.sh --release - - - name: Test - if: matrix.libgccjit_version == 'libgccjit12.so' + - name: Run tests run: | - # Enable backtraces for easier debugging - export RUST_BACKTRACE=1 - - # Reduce amount of benchmark runs as they are slow - export COMPILE_RUNS=2 - export RUN_RUNS=2 + ${{ matrix.libgccjit_version.env_extra }} ./test.sh --release --clean --build-sysroot ${{ matrix.commands }} ${{ matrix.libgccjit_version.extra }} - ./test.sh --release --no-default-features + duplicates: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: python tools/check_intrinsics_duplicates.py diff --git a/compiler/rustc_codegen_gcc/.github/workflows/release.yml b/compiler/rustc_codegen_gcc/.github/workflows/release.yml new file mode 100644 index 0000000000000..c4e99469bc20b --- /dev/null +++ b/compiler/rustc_codegen_gcc/.github/workflows/release.yml @@ -0,0 +1,111 @@ +name: CI with sysroot compiled in release mode + +on: + - push + - pull_request + +permissions: + contents: read + +env: + # Enable backtraces for easier debugging + RUST_BACKTRACE: 1 + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + libgccjit_version: + - { gcc: "libgccjit.so", artifacts_branch: "master" } + commands: [ + "--test-successful-rustc --nb-parts 2 --current-part 0", + "--test-successful-rustc --nb-parts 2 --current-part 1", + ] + + steps: + - uses: actions/checkout@v3 + + - uses: actions/checkout@v3 + with: + repository: llvm/llvm-project + path: llvm + + - name: Install packages + run: sudo apt-get install ninja-build ripgrep + + - name: Download artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: main.yml + name: ${{ matrix.libgccjit_version.gcc }} + path: gcc-build + repo: antoyo/gcc + branch: ${{ matrix.libgccjit_version.artifacts_branch }} + event: push + search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts. + + - name: Setup path to libgccjit + run: | + echo $(readlink -f gcc-build) > gcc_path + # NOTE: the filename is still libgccjit.so even when the artifact name is different. + ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0 + + - name: Set env + run: | + echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV + + - name: Set RUST_COMPILER_RT_ROOT + run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV + + - name: Cache cargo installed crates + uses: actions/cache@v3 + with: + path: ~/.cargo/bin + key: cargo-installed-crates2-ubuntu-latest + + - name: Cache cargo registry + uses: actions/cache@v3 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo index + uses: actions/cache@v3 + with: + path: ~/.cargo/git + key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo target dir + uses: actions/cache@v3 + with: + path: target + key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} + + - name: Build + run: | + ./prepare_build.sh + ./build.sh --release --release-sysroot + cargo test + ./clean_all.sh + + - name: Prepare dependencies + run: | + git config --global user.email "user@example.com" + git config --global user.name "User" + ./prepare.sh + + # Compile is a separate step, as the actions-rs/cargo action supports error annotations + - name: Compile + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --release + + - name: Run tests + run: | + ./test.sh --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }} diff --git a/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml b/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml new file mode 100644 index 0000000000000..42fb35e738ffd --- /dev/null +++ b/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml @@ -0,0 +1,116 @@ +name: stdarch tests with sysroot compiled in release mode + +on: + - push + - pull_request + +permissions: + contents: read + +env: + # Enable backtraces for easier debugging + RUST_BACKTRACE: 1 + +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + libgccjit_version: + - { gcc: "libgccjit.so", artifacts_branch: "master" } + commands: [ + "--test-successful-rustc --nb-parts 2 --current-part 0", + "--test-successful-rustc --nb-parts 2 --current-part 1", + ] + + steps: + - uses: actions/checkout@v3 + + - uses: actions/checkout@v3 + with: + repository: llvm/llvm-project + path: llvm + + - name: Install packages + run: sudo apt-get install ninja-build ripgrep + + - name: Download artifact + uses: dawidd6/action-download-artifact@v2 + with: + workflow: main.yml + name: ${{ matrix.libgccjit_version.gcc }} + path: gcc-build + repo: antoyo/gcc + branch: ${{ matrix.libgccjit_version.artifacts_branch }} + event: push + search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts. + + - name: Setup path to libgccjit + run: | + echo $(readlink -f gcc-build) > gcc_path + # NOTE: the filename is still libgccjit.so even when the artifact name is different. + ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0 + + - name: Set env + run: | + echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV + echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV + + - name: Set RUST_COMPILER_RT_ROOT + run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV + + - name: Cache cargo installed crates + uses: actions/cache@v3 + with: + path: ~/.cargo/bin + key: cargo-installed-crates2-ubuntu-latest + + - name: Cache cargo registry + uses: actions/cache@v3 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo index + uses: actions/cache@v3 + with: + path: ~/.cargo/git + key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} + + - name: Cache cargo target dir + uses: actions/cache@v3 + with: + path: target + key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }} + + - name: Build + run: | + ./prepare_build.sh + ./build.sh --release --release-sysroot + cargo test + ./clean_all.sh + + - name: Prepare dependencies + run: | + git config --global user.email "user@example.com" + git config --global user.name "User" + ./prepare.sh + + # Compile is a separate step, as the actions-rs/cargo action supports error annotations + - name: Compile + uses: actions-rs/cargo@v1.0.3 + with: + command: build + args: --release + + - name: Run tests + run: | + ./test.sh --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore + + - name: Run stdarch tests + run: | + cd build_sysroot/sysroot_src/library/stdarch/ + CHANNEL=release TARGET=x86_64-unknown-linux-gnu ../../../../cargo.sh test diff --git a/compiler/rustc_codegen_gcc/Cargo.lock b/compiler/rustc_codegen_gcc/Cargo.lock index 1cb219e12e04d..80e5741894088 100644 --- a/compiler/rustc_codegen_gcc/Cargo.lock +++ b/compiler/rustc_codegen_gcc/Cargo.lock @@ -35,7 +35,7 @@ dependencies = [ [[package]] name = "gccjit" version = "1.0.0" -source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6" +source = "git+https://github.com/antoyo/gccjit.rs#eefb8c662d61477f34b7c32d26bcda5f1ef08432" dependencies = [ "gccjit_sys", ] @@ -43,7 +43,7 @@ dependencies = [ [[package]] name = "gccjit_sys" version = "0.0.1" -source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6" +source = "git+https://github.com/antoyo/gccjit.rs#eefb8c662d61477f34b7c32d26bcda5f1ef08432" dependencies = [ "libc 0.1.12", ] @@ -208,6 +208,7 @@ version = "0.1.0" dependencies = [ "gccjit", "lang_tester", + "smallvec", "tempfile", ] @@ -220,6 +221,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + [[package]] name = "tempfile" version = "3.2.0" diff --git a/compiler/rustc_codegen_gcc/Cargo.toml b/compiler/rustc_codegen_gcc/Cargo.toml index 1f3da2f799b78..81066d9ce1f0a 100644 --- a/compiler/rustc_codegen_gcc/Cargo.toml +++ b/compiler/rustc_codegen_gcc/Cargo.toml @@ -27,6 +27,8 @@ gccjit = { git = "https://github.com/antoyo/gccjit.rs" } # Local copy. #gccjit = { path = "../gccjit.rs" } +smallvec = { version = "1.6.1", features = ["union", "may_dangle"] } + [dev-dependencies] lang_tester = "0.3.9" tempfile = "3.1.0" diff --git a/compiler/rustc_codegen_gcc/Readme.md b/compiler/rustc_codegen_gcc/Readme.md index fe23a26769663..bb74194389254 100644 --- a/compiler/rustc_codegen_gcc/Readme.md +++ b/compiler/rustc_codegen_gcc/Readme.md @@ -1,5 +1,7 @@ # WIP libgccjit codegen backend for rust +[![Chat on IRC](https://img.shields.io/badge/irc.libera.chat-%23rustc__codegen__gcc-blue.svg)](https://web.libera.chat/#rustc_codegen_gcc) + This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used. **Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.** @@ -16,21 +18,61 @@ The patches in [this repository](https://github.com/antoyo/libgccjit-patches) ne (Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.) You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.** +To build it (most of these instructions come from [here](https://gcc.gnu.org/onlinedocs/jit/internals/index.html), so don't hesitate to take a look there if you encounter an issue): + +```bash +$ git clone https://github.com/antoyo/gcc +$ sudo apt install flex libmpfr-dev libgmp-dev libmpc3 libmpc-dev +$ mkdir gcc-build gcc-install +$ cd gcc-build +$ ../gcc/configure \ + --enable-host-shared \ + --enable-languages=jit \ + --enable-checking=release \ # it enables extra checks which allow to find bugs + --disable-bootstrap \ + --disable-multilib \ + --prefix=$(pwd)/../gcc-install +$ make -j4 # You can replace `4` with another number depending on how many cores you have. +``` + +If you want to run libgccjit tests, you will need to also enable the C++ language in the `configure`: + +```bash +--enable-languages=jit,c++ +``` + +Then to run libgccjit tests: + +```bash +$ cd gcc # from the `gcc-build` folder +$ make check-jit +# To run one specific test: +$ make check-jit RUNTESTFLAGS="-v -v -v jit.exp=jit.dg/test-asm.cc" +``` + **Put the path to your custom build of libgccjit in the file `gcc_path`.** ```bash -$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git -$ cd rustc_codegen_gcc +$ dirname $(readlink -f `find . -name libgccjit.so`) > gcc_path +``` + +You also need to set RUST_COMPILER_RT_ROOT: + +```bash $ git clone https://github.com/llvm/llvm-project llvm --depth 1 --single-branch $ export RUST_COMPILER_RT_ROOT="$PWD/llvm/compiler-rt" -$ ./prepare_build.sh # download and patch sysroot src -$ ./build.sh --release ``` -To run the tests: +Then you can run commands like this: ```bash $ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking +$ LIBRARY_PATH=$(cat gcc_path) LD_LIBRARY_PATH=$(cat gcc_path) ./build.sh --release +``` + +To run the tests: + +```bash $ ./test.sh --release ``` @@ -120,13 +162,52 @@ To print a debug representation of a tree: debug_tree(expr); ``` +(defined in print-tree.h) + +To print a debug reprensentation of a gimple struct: + +```c +debug_gimple_stmt(gimple_struct) +``` + To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`. +To have the correct file paths in `gdb` instead of `/usr/src/debug/gcc/libstdc++-v3/libsupc++/eh_personality.cc`: + +Maybe by calling the following at the beginning of gdb: + +``` +set substitute-path /usr/src/debug/gcc /path/to/gcc-repo/gcc +``` + +TODO(antoyo): but that's not what I remember I was doing. + ### How to use a custom-build rustc * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`). * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`. +### How to install a forked git-subtree + +Using git-subtree with `rustc` requires a patched git to make it work. +The PR that is needed is [here](https://github.com/gitgitgadget/git/pull/493). +Use the following instructions to install it: + +``` +git clone git@github.com:tqc/git.git +cd git +git checkout tqc/subtree +make +make install +cd contrib/subtree +make +cp git-subtree ~/bin +``` + +### How to use [mem-trace](https://github.com/antoyo/mem-trace) + +`rustc` needs to be built without `jemalloc` so that `mem-trace` can overload `malloc` since `jemalloc` is linked statically, so a `LD_PRELOAD`-ed library won't a chance to intercept the calls to `malloc`. + ### How to build a cross-compiling libgccjit #### Building libgccjit @@ -142,6 +223,5 @@ To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo b * Since rustc doesn't support this architecture yet, set it back to `TARGET_TRIPLE="mips-unknown-linux-gnu"` (or another target having the same attributes). Alternatively, create a [target specification file](https://book.avr-rust.com/005.1-the-target-specification-json-file.html) (note that the `arch` specified in this file must be supported by the rust compiler). * Set `linker='-Clinker=m68k-linux-gcc'`. * Set the path to the cross-compiling libgccjit in `gcc_path`. - * Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::();` in `context.rs` (same for u128_type). * Comment the line: `context.add_command_line_option("-masm=intel");` in src/base.rs. * (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?). diff --git a/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh index f293192a099de..9d692d599f6be 100755 --- a/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh +++ b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh @@ -16,7 +16,7 @@ rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true rm -r sysroot/ 2>/dev/null || true # Build libs -export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort" +export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked" if [[ "$1" == "--release" ]]; then sysroot_channel='release' RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release diff --git a/compiler/rustc_codegen_gcc/config.sh b/compiler/rustc_codegen_gcc/config.sh index b25e215fb9ee9..166e83901c4f9 100644 --- a/compiler/rustc_codegen_gcc/config.sh +++ b/compiler/rustc_codegen_gcc/config.sh @@ -38,7 +38,7 @@ if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then fi fi -export RUSTFLAGS="$CG_RUSTFLAGS $linker -Cpanic=abort -Csymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot" +export RUSTFLAGS="$CG_RUSTFLAGS $linker -Csymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot $TEST_FLAGS" # FIXME(antoyo): remove once the atomic shim is gone if [[ `uname` == 'Darwin' ]]; then diff --git a/compiler/rustc_codegen_gcc/example/alloc_example.rs b/compiler/rustc_codegen_gcc/example/alloc_example.rs index 74ea7ec4ede69..c80348ca54970 100644 --- a/compiler/rustc_codegen_gcc/example/alloc_example.rs +++ b/compiler/rustc_codegen_gcc/example/alloc_example.rs @@ -1,4 +1,4 @@ -#![feature(start, box_syntax, core_intrinsics, alloc_error_handler)] +#![feature(start, box_syntax, core_intrinsics, alloc_error_handler, lang_items)] #![no_std] extern crate alloc; @@ -18,16 +18,22 @@ extern "C" { #[panic_handler] fn panic_handler(_: &core::panic::PanicInfo) -> ! { - unsafe { - core::intrinsics::abort(); - } + core::intrinsics::abort(); } #[alloc_error_handler] fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { - unsafe { - core::intrinsics::abort(); - } + core::intrinsics::abort(); +} + +#[lang = "eh_personality"] +fn eh_personality() -> ! { + loop {} +} + +#[no_mangle] +unsafe extern "C" fn _Unwind_Resume() { + core::intrinsics::unreachable(); } #[start] diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs index ddcbb0d9fc7e8..637b8dc53fefd 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core.rs @@ -1,6 +1,6 @@ #![feature( no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types, - untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits, + decl_macro, rustc_attrs, transparent_unions, auto_traits, thread_local )] #![no_core] @@ -17,6 +17,9 @@ pub trait Sized {} #[lang = "destruct"] pub trait Destruct {} +#[lang = "tuple_trait"] +pub trait Tuple {} + #[lang = "unsize"] pub trait Unsize {} @@ -39,14 +42,14 @@ impl<'a, T: ?Sized+Unsize, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut impl, U: ?Sized> DispatchFromDyn<*const U> for *const T {} // *mut T -> *mut U impl, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {} -impl, U: ?Sized> DispatchFromDyn> for Box {} +impl, U: ?Sized> DispatchFromDyn> for Box {} #[lang = "receiver"] pub trait Receiver {} impl Receiver for &T {} impl Receiver for &mut T {} -impl Receiver for Box {} +impl Receiver for Box {} #[lang = "copy"] pub unsafe trait Copy {} @@ -396,7 +399,7 @@ pub struct PhantomData; #[lang = "fn_once"] #[rustc_paren_sugar] -pub trait FnOnce { +pub trait FnOnce { #[lang = "fn_once_output"] type Output; @@ -405,13 +408,21 @@ pub trait FnOnce { #[lang = "fn_mut"] #[rustc_paren_sugar] -pub trait FnMut: FnOnce { +pub trait FnMut: FnOnce { extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; } #[lang = "panic"] #[track_caller] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { + unsafe { + libc::puts("Panicking\n\0" as *const str as *const u8); + intrinsics::abort(); + } +} + +#[lang = "panic_cannot_unwind"] +fn panic_cannot_unwind() -> ! { unsafe { libc::puts("Panicking\n\0" as *const str as *const u8); intrinsics::abort(); @@ -450,17 +461,32 @@ pub trait Deref { pub trait Allocator { } +impl Allocator for () {} + pub struct Global; impl Allocator for Global {} +#[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(1)] +#[rustc_nonnull_optimization_guaranteed] +pub struct NonNull(pub *const T); + +impl CoerceUnsized> for NonNull where T: Unsize {} +impl DispatchFromDyn> for NonNull where T: Unsize {} + +pub struct Unique { + pub pointer: NonNull, + pub _marker: PhantomData, +} + +impl CoerceUnsized> for Unique where T: Unsize {} +impl DispatchFromDyn> for Unique where T: Unsize {} + #[lang = "owned_box"] -pub struct Box< - T: ?Sized, - A: Allocator = Global, ->(*mut T, A); +pub struct Box(Unique, A); -impl, U: ?Sized> CoerceUnsized> for Box {} +impl, U: ?Sized, A: Allocator> CoerceUnsized> for Box {} impl Drop for Box { fn drop(&mut self) { @@ -468,7 +494,7 @@ impl Drop for Box { } } -impl Deref for Box { +impl Deref for Box { type Target = T; fn deref(&self) -> &Self::Target { @@ -482,8 +508,8 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { } #[lang = "box_free"] -unsafe fn box_free(ptr: *mut T, alloc: A) { - libc::free(ptr as *mut u8); +unsafe fn box_free(ptr: Unique, _alloc: ()) { + libc::free(ptr.pointer.0 as *mut u8); } #[lang = "drop"] @@ -505,17 +531,25 @@ pub union MaybeUninit { } pub mod intrinsics { + use crate::Sized; + extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; + #[rustc_safe_intrinsic] pub fn size_of() -> usize; - pub fn size_of_val(val: *const T) -> usize; + pub fn size_of_val(val: *const T) -> usize; + #[rustc_safe_intrinsic] pub fn min_align_of() -> usize; - pub fn min_align_of_val(val: *const T) -> usize; + pub fn min_align_of_val(val: *const T) -> usize; pub fn copy(src: *const T, dst: *mut T, count: usize); pub fn transmute(e: T) -> U; pub fn ctlz_nonzero(x: T) -> T; - pub fn needs_drop() -> bool; + #[rustc_safe_intrinsic] + pub fn needs_drop() -> bool; + #[rustc_safe_intrinsic] pub fn bitreverse(x: T) -> T; + #[rustc_safe_intrinsic] pub fn bswap(x: T) -> T; pub fn write_bytes(dst: *mut T, val: u8, count: usize); pub fn unreachable() -> !; diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs index 14fd9eeffa6ac..993a31e68eabc 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs @@ -85,6 +85,7 @@ fn start( main: fn() -> T, argc: isize, argv: *const *const u8, + _sigpipe: u8, ) -> isize { if argc == 3 { unsafe { puts(*argv); } @@ -228,6 +229,7 @@ fn main() { } as Box; const FUNC_REF: Option = Some(main); + #[allow(unreachable_code)] match FUNC_REF { Some(_) => {}, None => assert!(false), diff --git a/compiler/rustc_codegen_gcc/example/mod_bench.rs b/compiler/rustc_codegen_gcc/example/mod_bench.rs index 2e2b0052dee8b..95bcad2cd173e 100644 --- a/compiler/rustc_codegen_gcc/example/mod_bench.rs +++ b/compiler/rustc_codegen_gcc/example/mod_bench.rs @@ -6,9 +6,7 @@ extern {} #[panic_handler] fn panic_handler(_: &core::panic::PanicInfo) -> ! { - unsafe { - core::intrinsics::abort(); - } + core::intrinsics::abort(); } #[lang="eh_personality"] @@ -32,6 +30,6 @@ fn main(_argc: isize, _argv: *const *const u8) -> isize { #[inline(never)] fn black_box(i: u32) { if i != 1 { - unsafe { core::intrinsics::abort(); } + core::intrinsics::abort(); } } diff --git a/compiler/rustc_codegen_gcc/example/std_example.rs b/compiler/rustc_codegen_gcc/example/std_example.rs index 31069058aea34..5c171c49fd194 100644 --- a/compiler/rustc_codegen_gcc/example/std_example.rs +++ b/compiler/rustc_codegen_gcc/example/std_example.rs @@ -1,5 +1,6 @@ #![feature(core_intrinsics, generators, generator_trait, is_sorted)] +#[cfg(feature="master")] use std::arch::x86_64::*; use std::io::Write; use std::ops::Generator; diff --git a/compiler/rustc_codegen_gcc/failing-ui-tests.txt b/compiler/rustc_codegen_gcc/failing-ui-tests.txt new file mode 100644 index 0000000000000..8539e27ea6a58 --- /dev/null +++ b/compiler/rustc_codegen_gcc/failing-ui-tests.txt @@ -0,0 +1,68 @@ +tests/ui/allocator/custom-in-block.rs +tests/ui/allocator/custom-in-submodule.rs +tests/ui/allocator/custom.rs +tests/ui/allocator/hygiene.rs +tests/ui/allocator/no_std-alloc-error-handler-custom.rs +tests/ui/allocator/no_std-alloc-error-handler-default.rs +tests/ui/allocator/xcrate-use.rs +tests/ui/allocator/xcrate-use2.rs +tests/ui/asm/may_unwind.rs +tests/ui/asm/x86_64/multiple-clobber-abi.rs +tests/ui/debuginfo/debuginfo-emit-llvm-ir-and-split-debuginfo.rs +tests/ui/functions-closures/parallel-codegen-closures.rs +tests/ui/linkage-attr/linkage1.rs +tests/ui/lto/dylib-works.rs +tests/ui/numbers-arithmetic/saturating-float-casts.rs +tests/ui/polymorphization/promoted-function.rs +tests/ui/process/nofile-limit.rs +tests/ui/sepcomp/sepcomp-cci.rs +tests/ui/sepcomp/sepcomp-extern.rs +tests/ui/sepcomp/sepcomp-fns-backwards.rs +tests/ui/sepcomp/sepcomp-fns.rs +tests/ui/sepcomp/sepcomp-statics.rs +tests/ui/simd/intrinsic/generic-arithmetic-pass.rs +tests/ui/sse2.rs +tests/ui/target-feature/missing-plusminus.rs +tests/ui/asm/x86_64/may_unwind.rs +tests/ui/backtrace.rs +tests/ui/catch-unwind-bang.rs +tests/ui/cfg/cfg-panic-abort.rs +tests/ui/drop/dynamic-drop-async.rs +tests/ui/drop/repeat-drop.rs +tests/ui/fmt/format-args-capture.rs +tests/ui/generator/panic-drops-resume.rs +tests/ui/generator/panic-drops.rs +tests/ui/intrinsics/panic-uninitialized-zeroed.rs +tests/ui/iterators/iter-sum-overflow-debug.rs +tests/ui/iterators/iter-sum-overflow-overflow-checks.rs +tests/ui/mir/mir_calls_to_shims.rs +tests/ui/mir/mir_drop_order.rs +tests/ui/mir/mir_let_chains_drop_order.rs +tests/ui/oom_unwind.rs +tests/ui/panic-runtime/abort-link-to-unwinding-crates.rs +tests/ui/panic-runtime/abort.rs +tests/ui/panic-runtime/link-to-abort.rs +tests/ui/unwind-no-uwtable.rs +tests/ui/parser/unclosed-delimiter-in-dep.rs +tests/ui/runtime/rt-explody-panic-payloads.rs +tests/ui/simd/intrinsic/ptr-cast.rs +tests/ui/binding/fn-arg-incomplete-pattern-drop-order.rs +tests/ui/consts/missing_span_in_backtrace.rs +tests/ui/drop/dynamic-drop.rs +tests/ui/dyn-star/box.rs +tests/ui/issues/issue-40883.rs +tests/ui/issues/issue-43853.rs +tests/ui/issues/issue-47364.rs +tests/ui/macros/rfc-2011-nicer-assert-messages/assert-without-captures-does-not-create-unnecessary-code.rs +tests/ui/rfc-2091-track-caller/std-panic-locations.rs +tests/ui/rfcs/rfc1857-drop-order.rs +tests/ui/simd/issue-17170.rs +tests/ui/simd/issue-39720.rs +tests/ui/simd/issue-89193.rs +tests/ui/statics/issue-91050-1.rs +tests/ui/statics/issue-91050-2.rs +tests/ui/alloc-error/default-alloc-error-hook.rs +tests/ui/generator/panic-safe.rs +tests/ui/issues/issue-14875.rs +tests/ui/issues/issue-29948.rs +tests/ui/panic-while-printing.rs diff --git a/compiler/rustc_codegen_gcc/failing-ui-tests12.txt b/compiler/rustc_codegen_gcc/failing-ui-tests12.txt new file mode 100644 index 0000000000000..8c27bd8b8ca89 --- /dev/null +++ b/compiler/rustc_codegen_gcc/failing-ui-tests12.txt @@ -0,0 +1,39 @@ +tests/ui/asm/x86_64/issue-96797.rs +tests/ui/intrinsics/const-eval-select-x86_64.rs +tests/ui/packed/packed-struct-drop-aligned.rs +tests/ui/packed/packed-struct-generic-layout.rs +tests/ui/packed/packed-struct-layout.rs +tests/ui/packed/packed-struct-optimized-enum.rs +tests/ui/packed/packed-struct-size.rs +tests/ui/packed/packed-struct-vec.rs +tests/ui/packed/packed-tuple-struct-layout.rs +tests/ui/simd/array-type.rs +tests/ui/simd/intrinsic/float-minmax-pass.rs +tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs +tests/ui/simd/intrinsic/generic-as.rs +tests/ui/simd/intrinsic/generic-cast-pass.rs +tests/ui/simd/intrinsic/generic-cast-pointer-width.rs +tests/ui/simd/intrinsic/generic-comparison-pass.rs +tests/ui/simd/intrinsic/generic-elements-pass.rs +tests/ui/simd/intrinsic/generic-reduction-pass.rs +tests/ui/simd/intrinsic/generic-select-pass.rs +tests/ui/simd/intrinsic/inlining-issue67557-ice.rs +tests/ui/simd/intrinsic/inlining-issue67557.rs +tests/ui/simd/monomorphize-shuffle-index.rs +tests/ui/simd/shuffle.rs +tests/ui/simd/simd-bitmask.rs +tests/ui/generator/resume-after-return.rs +tests/ui/iterators/iter-step-overflow-debug.rs +tests/ui/macros/rfc-2011-nicer-assert-messages/all-expr-kinds.rs +tests/ui/numbers-arithmetic/next-power-of-two-overflow-debug.rs +tests/ui/privacy/reachable-unnameable-items.rs +tests/ui/rfc-1937-termination-trait/termination-trait-in-test.rs +tests/ui/async-await/async-fn-size-moved-locals.rs +tests/ui/async-await/async-fn-size-uninit-locals.rs +tests/ui/cfg/cfg-panic.rs +tests/ui/generator/size-moved-locals.rs +tests/ui/macros/rfc-2011-nicer-assert-messages/all-not-available-cases.rs +tests/ui/simd/intrinsic/generic-gather-pass.rs +tests/ui/simd/issue-85915-simd-ptrs.rs +tests/ui/issues/issue-68010-large-zst-consts.rs +tests/ui/rust-2018/proc-macro-crate-in-paths.rs diff --git a/compiler/rustc_codegen_gcc/locales/en-US.ftl b/compiler/rustc_codegen_gcc/locales/en-US.ftl index 6101b28ab0cdd..0a94a08f8dca8 100644 --- a/compiler/rustc_codegen_gcc/locales/en-US.ftl +++ b/compiler/rustc_codegen_gcc/locales/en-US.ftl @@ -60,3 +60,9 @@ codegen_gcc_invalid_monomorphization_unsupported_cast = codegen_gcc_invalid_monomorphization_unsupported_operation = invalid monomorphization of `{$name}` intrinsic: unsupported operation on `{$in_ty}` with element `{$in_elem}` + +codegen_gcc_invalid_minimum_alignment = + invalid minimum global alignment: {$err} + +codegen_gcc_tied_target_features = the target features {$features} must all be either enabled or disabled together + .help = add the missing features in a `target_feature` attribute diff --git a/compiler/rustc_codegen_gcc/patches/0001-Add-stdarch-Cargo.toml-for-testing.patch b/compiler/rustc_codegen_gcc/patches/0001-Add-stdarch-Cargo.toml-for-testing.patch new file mode 100644 index 0000000000000..93c63b5dcacfd --- /dev/null +++ b/compiler/rustc_codegen_gcc/patches/0001-Add-stdarch-Cargo.toml-for-testing.patch @@ -0,0 +1,39 @@ +From c3821e02fbd6cb5ad6e06d759fccdc9073712375 Mon Sep 17 00:00:00 2001 +From: Antoni Boucher +Date: Tue, 7 Jun 2022 21:40:13 -0400 +Subject: [PATCH] Add stdarch Cargo.toml for testing + +--- + library/stdarch/Cargo.toml | 20 ++++++++++++++++++++ + 1 file changed, 20 insertions(+) + create mode 100644 library/stdarch/Cargo.toml + +diff --git a/library/stdarch/Cargo.toml b/library/stdarch/Cargo.toml +new file mode 100644 +index 0000000..fbe0a95 +--- /dev/null ++++ b/library/stdarch/Cargo.toml +@@ -0,0 +1,20 @@ ++[workspace] ++members = [ ++ "crates/core_arch", ++ "crates/std_detect", ++ "crates/stdarch-gen", ++ "examples/" ++] ++exclude = [ ++ "crates/wasm-assert-instr-tests" ++] ++ ++[profile.release] ++debug = true ++opt-level = 3 ++incremental = true ++ ++[profile.bench] ++debug = 1 ++opt-level = 3 ++incremental = true +-- +2.26.2.7.g19db9cfb68.dirty + diff --git a/compiler/rustc_codegen_gcc/patches/0001-Disable-examples.patch b/compiler/rustc_codegen_gcc/patches/0001-Disable-examples.patch new file mode 100644 index 0000000000000..1b71df1ca8df2 --- /dev/null +++ b/compiler/rustc_codegen_gcc/patches/0001-Disable-examples.patch @@ -0,0 +1,25 @@ +From a2d53a324a02c04b76c0e9d39dc15cd443a3b8b2 Mon Sep 17 00:00:00 2001 +From: Antoni Boucher +Date: Fri, 25 Nov 2022 11:18:11 -0500 +Subject: [PATCH] Disable examples + +--- + library/stdarch/Cargo.toml | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/library/stdarch/Cargo.toml b/library/stdarch/Cargo.toml +index fbe0a95..748d72d 100644 +--- a/library/stdarch/Cargo.toml ++++ b/library/stdarch/Cargo.toml +@@ -3,7 +3,7 @@ members = [ + "crates/core_arch", + "crates/std_detect", + "crates/stdarch-gen", +- "examples/" ++ #"examples/" + ] + exclude = [ + "crates/wasm-assert-instr-tests" +-- +2.26.2.7.g19db9cfb68.dirty + diff --git a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch index 301b3f9bde4dd..4db56fa3bd2c7 100644 --- a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch +++ b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch @@ -18,7 +18,7 @@ new file mode 100644 index 0000000..46fd999 --- /dev/null +++ b/library/core/tests/Cargo.toml -@@ -0,0 +1,8 @@ +@@ -0,0 +1,12 @@ +[package] +name = "core" +version = "0.0.0" @@ -27,37 +27,18 @@ index 0000000..46fd999 +[lib] +name = "coretests" +path = "lib.rs" -diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs -index a35897e..f0bf645 100644 ---- a/library/core/tests/num/flt2dec/mod.rs -+++ b/library/core/tests/num/flt2dec/mod.rs -@@ -13,7 +13,6 @@ mod strategy { - mod dragon; - mod grisu; - } --mod random; - - pub fn decode_finite(v: T) -> Decoded { - match decode(v).1 { -diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs -index 6609bc3..241b497 100644 ---- a/library/core/tests/slice.rs -+++ b/library/core/tests/slice.rs -@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() { - } - } - -+/* - #[test] - #[cfg(not(target_arch = "wasm32"))] - fn sort_unstable() { -@@ -1394,6 +1395,7 @@ fn partition_at_index() { - v.select_nth_unstable(0); - assert!(v == [0xDEADBEEF]); - } -+*/ - - #[test] - #[should_panic(expected = "index 0 greater than length of slice")] ++ ++[dependencies] ++rand = { version = "0.8.5", default-features = false } ++rand_xorshift = { version = "0.3.0", default-features = false } +diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs +index 42a26ae..5ac1042 100644 +--- a/library/core/tests/lib.rs ++++ b/library/core/tests/lib.rs +@@ -1,3 +1,4 @@ ++#![cfg(test)] + #![feature(alloc_layout_extra)] + #![feature(array_chunks)] + #![feature(array_methods)] -- 2.21.0 (Apple Git-122) diff --git a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch deleted file mode 100644 index c59a40df03988..0000000000000 --- a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch +++ /dev/null @@ -1,28 +0,0 @@ -From b1ae000f6da1abd3b8e9b80c40bc11c89b8ae93c Mon Sep 17 00:00:00 2001 -From: bjorn3 -Date: Thu, 30 Dec 2021 16:54:40 +0100 -Subject: [PATCH] [core] Disable portable-simd test - ---- - library/core/tests/lib.rs | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs -index 06c7be0..359e2e7 100644 ---- a/library/core/tests/lib.rs -+++ b/library/core/tests/lib.rs -@@ -75,7 +75,6 @@ - #![feature(never_type)] - #![feature(unwrap_infallible)] --#![feature(portable_simd)] - #![feature(ptr_metadata)] - #![feature(once_cell)] - #![feature(option_result_contains)] -@@ -127,7 +126,6 @@ mod pin; - mod pin_macro; - mod ptr; - mod result; --mod simd; - mod slice; - mod str; - mod str_lossy; diff --git a/compiler/rustc_codegen_gcc/rust-toolchain b/compiler/rustc_codegen_gcc/rust-toolchain index b20aeb979ad78..933ecd45baadb 100644 --- a/compiler/rustc_codegen_gcc/rust-toolchain +++ b/compiler/rustc_codegen_gcc/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2022-06-06" +channel = "nightly-2023-03-02" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch b/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch deleted file mode 100644 index 59143eac37b3f..0000000000000 --- a/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs -index 887d27fd6dca4..2c2239f2b83d1 100644 ---- a/src/tools/compiletest/src/header.rs -+++ b/src/tools/compiletest/src/header.rs -@@ -806,8 +806,8 @@ pub fn make_test_description( - cfg: Option<&str>, - ) -> test::TestDesc { - let mut ignore = false; - #[cfg(not(bootstrap))] -- let ignore_message: Option = None; -+ let ignore_message: Option<&str> = None; - let mut should_fail = false; - - let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some(); diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs index e2c9ffe9c1c30..4bad33ee879ee 100644 --- a/compiler/rustc_codegen_gcc/src/allocator.rs +++ b/compiler/rustc_codegen_gcc/src/allocator.rs @@ -1,3 +1,5 @@ +#[cfg(feature="master")] +use gccjit::FnAttribute; use gccjit::{FunctionType, GlobalKind, ToRValue}; use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; use rustc_middle::bug; @@ -50,7 +52,8 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); if tcx.sess.target.options.default_hidden_visibility { - // TODO(antoyo): set visibility. + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); } if tcx.sess.must_emit_unwind_tables() { // TODO(antoyo): emit unwind tables. @@ -61,7 +64,8 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); - // TODO(antoyo): set visibility. + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); let block = func.new_block("entry"); @@ -90,12 +94,18 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam .collect(); let func = context.new_function(None, FunctionType::Exported, void, &args, name, false); + if tcx.sess.target.default_hidden_visibility { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + let callee = alloc_error_handler_kind.fn_name(sym::oom); let args: Vec<_> = types.iter().enumerate() .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .collect(); let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false); - //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); + #[cfg(feature="master")] + callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); let block = func.new_block("entry"); diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index c346dbd63cca7..41e9d61a10e50 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -157,7 +157,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { use ConstraintOrRegister::*; let (constraint, ty) = match (reg_to_gcc(reg), place) { - (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)), + (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx)), // When `reg` is a class and not an explicit register but the out place is not specified, // we need to create an unused output variable to assign the output to. This var // needs to be of a type that's "compatible" with the register class, but specific type @@ -226,7 +226,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // This decision is also backed by the fact that LLVM needs in and out // values to be of *exactly the same type*, not just "compatible". // I'm not sure if GCC is so picky too, but better safe than sorry. - let ty = in_value.layout.gcc_type(self.cx, false); + let ty = in_value.layout.gcc_type(self.cx); let tmp_var = self.current_func().new_local(None, ty, "output_register"); // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate @@ -286,7 +286,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { continue }; - let ty = out_place.layout.gcc_type(self.cx, false); + let ty = out_place.layout.gcc_type(self.cx); let tmp_var = self.current_func().new_local(None, ty, "output_register"); tmp_var.set_register_name(reg_name); @@ -306,7 +306,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // `in("explicit register") var` InlineAsmOperandRef::In { reg, value } => { if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { - let ty = value.layout.gcc_type(self.cx, false); + let ty = value.layout.gcc_type(self.cx); let reg_var = self.current_func().new_local(None, ty, "input_register"); reg_var.set_register_name(reg_name); self.llbb().add_assignment(None, reg_var, value.immediate()); @@ -325,7 +325,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => { if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) { // See explanation in the first pass. - let ty = in_value.layout.gcc_type(self.cx, false); + let ty = in_value.layout.gcc_type(self.cx); let tmp_var = self.current_func().new_local(None, ty, "output_register"); tmp_var.set_register_name(reg_name); @@ -353,8 +353,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { inputs.push(AsmInOperand { constraint: "X".into(), rust_idx, - val: self.cx.rvalue_as_function(get_fn(self.cx, instance)) - .get_address(None), + val: get_fn(self.cx, instance).get_address(None), }); } @@ -382,15 +381,19 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { for piece in template { match *piece { InlineAsmTemplatePiece::String(ref string) => { - // TODO(@Commeownist): switch to `Iterator::intersperse` once it's stable - let mut iter = string.split('%'); - if let Some(s) = iter.next() { - template_str.push_str(s); - } - - for s in iter { - template_str.push_str("%%"); - template_str.push_str(s); + for char in string.chars() { + // TODO(antoyo): might also need to escape | if rustc doesn't do it. + let escaped_char = + match char { + '%' => "%%", + '{' => "%{", + '}' => "%}", + _ => { + template_str.push(char); + continue; + }, + }; + template_str.push_str(escaped_char); } } InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => { @@ -565,39 +568,52 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { _ => unimplemented!(), } }, + // They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html InlineAsmRegOrRegClass::RegClass(reg) => match reg { - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x", + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + unreachable!("clobber-only") + } + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r", InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(), - InlineAsmRegClass::Avr(_) => unimplemented!(), - InlineAsmRegClass::Bpf(_) => unimplemented!(), - InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(), - InlineAsmRegClass::Msp430(_) => unimplemented!(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(), + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w", + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e", + InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", + InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r" + InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", + // https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for + // "define_constraint". + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h", + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r", + InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l", + + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f", InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") }, - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { + unreachable!("clobber-only") + } InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r", InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q", InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q", @@ -605,16 +621,18 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x", InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v", InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk", - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => unimplemented!(), - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), InlineAsmRegClass::X86( - X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg, + X86InlineAsmRegClass::kreg0 + | X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::tmm_reg, ) => unreachable!("clobber-only"), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("GCC backend does not support SPIR-V") } - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Err => unreachable!(), } }; @@ -692,21 +710,23 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { && options.contains(InlineAsmOptions::ATT_SYNTAX); // Build the template string - let mut template_str = String::new(); + let mut template_str = ".pushsection .text\n".to_owned(); + if att_dialect { + template_str.push_str(".att_syntax\n"); + } for piece in template { match *piece { InlineAsmTemplatePiece::String(ref string) => { - for line in string.lines() { + let mut index = 0; + while index < string.len() { // NOTE: gcc does not allow inline comment, so remove them. - let line = - if let Some(index) = line.rfind("//") { - &line[..index] - } - else { - line - }; - template_str.push_str(line); - template_str.push('\n'); + let comment_index = string[index..].find("//") + .map(|comment_index| comment_index + index) + .unwrap_or(string.len()); + template_str.push_str(&string[index..comment_index]); + index = string[comment_index..].find('\n') + .map(|index| index + comment_index) + .unwrap_or(string.len()); } }, InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { @@ -719,6 +739,8 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } GlobalAsmOperandRef::SymFn { instance } => { + let function = get_fn(self, instance); + self.add_used_function(function); // TODO(@Amanieu): Additional mangling is needed on // some targets to add a leading underscore (Mach-O) // or byte count suffixes (x86 Windows). @@ -727,6 +749,7 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } GlobalAsmOperandRef::SymStatic { def_id } => { + // TODO(antoyo): set the global variable as used. // TODO(@Amanieu): Additional mangling is needed on // some targets to add a leading underscore (Mach-O). let instance = Instance::mono(self.tcx, def_id); @@ -738,48 +761,51 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - let template_str = - if att_dialect { - format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str) - } - else { - template_str - }; + if att_dialect { + template_str.push_str("\n\t.intel_syntax noprefix"); + } // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually. - let template_str = format!(".pushsection .text\n{}\n.popsection", template_str); + template_str.push_str("\n.popsection"); self.context.add_top_level_asm(None, &template_str); } } fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option) -> Option { + // The modifiers can be retrieved from + // https://gcc.gnu.org/onlinedocs/gcc/Modifiers.html#Modifiers match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier, InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { - unimplemented!() + if modifier == Some('v') { None } else { modifier } + } + InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + unreachable!("clobber-only") } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(), + InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None, InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(), + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None, InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(), + | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'), InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { - unimplemented!() + if modifier.is_none() { + Some('q') + } else { + modifier + } } - InlineAsmRegClass::Avr(_) => unimplemented!(), - InlineAsmRegClass::Bpf(_) => unimplemented!(), - InlineAsmRegClass::Hexagon(_) => unimplemented!(), - InlineAsmRegClass::Mips(_) => unimplemented!(), - InlineAsmRegClass::Msp430(_) => unimplemented!(), - InlineAsmRegClass::Nvptx(_) => unimplemented!(), - InlineAsmRegClass::PowerPC(_) => unimplemented!(), + InlineAsmRegClass::Hexagon(_) => None, + InlineAsmRegClass::Mips(_) => None, + InlineAsmRegClass::Nvptx(_) => None, + InlineAsmRegClass::PowerPC(_) => None, InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) - | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(), + | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None, + InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { + unreachable!("clobber-only") + } InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') }, @@ -803,16 +829,29 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option _ => unreachable!(), }, InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => None, - InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg) => { + InlineAsmRegClass::X86( + X86InlineAsmRegClass::x87_reg + | X86InlineAsmRegClass::mmx_reg + | X86InlineAsmRegClass::kreg0 + | X86InlineAsmRegClass::tmm_reg, + ) => { unreachable!("clobber-only") } - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), + InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None, + InlineAsmRegClass::Bpf(_) => None, + InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) + | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) + | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier { + Some('h') => Some('B'), + Some('l') => Some('A'), + _ => None, + }, + InlineAsmRegClass::Avr(_) => None, + InlineAsmRegClass::S390x(_) => None, + InlineAsmRegClass::Msp430(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") - }, - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(), - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(), + } InlineAsmRegClass::Err => unreachable!(), } } diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs new file mode 100644 index 0000000000000..db841b1b52408 --- /dev/null +++ b/compiler/rustc_codegen_gcc/src/attributes.rs @@ -0,0 +1,113 @@ +#[cfg(feature="master")] +use gccjit::FnAttribute; +use gccjit::Function; +use rustc_attr::InstructionSetAttr; +use rustc_codegen_ssa::target_features::tied_target_features; +use rustc_data_structures::fx::FxHashMap; +use rustc_middle::ty; +use rustc_session::Session; +use rustc_span::symbol::sym; +use smallvec::{smallvec, SmallVec}; + +use crate::{context::CodegenCx, errors::TiedTargetFeatures}; + +// Given a map from target_features to whether they are enabled or disabled, +// ensure only valid combinations are allowed. +pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> { + for tied in tied_target_features(sess) { + // Tied features must be set to the same value, or not set at all + let mut tied_iter = tied.iter(); + let enabled = features.get(tied_iter.next().unwrap()); + if tied_iter.any(|feature| enabled != features.get(feature)) { + return Some(tied); + } + } + None +} + +// TODO(antoyo): maybe move to a new module gcc_util. +// To find a list of GCC's names, check https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> { + let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch }; + match (arch, s) { + ("x86", "sse4.2") => smallvec!["sse4.2", "crc32"], + ("x86", "pclmulqdq") => smallvec!["pclmul"], + ("x86", "rdrand") => smallvec!["rdrnd"], + ("x86", "bmi1") => smallvec!["bmi"], + ("x86", "cmpxchg16b") => smallvec!["cx16"], + ("x86", "avx512vaes") => smallvec!["vaes"], + ("x86", "avx512gfni") => smallvec!["gfni"], + ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"], + // NOTE: seems like GCC requires 'avx512bw' for 'avx512vbmi2'. + ("x86", "avx512vbmi2") => smallvec!["avx512vbmi2", "avx512bw"], + // NOTE: seems like GCC requires 'avx512bw' for 'avx512bitalg'. + ("x86", "avx512bitalg") => smallvec!["avx512bitalg", "avx512bw"], + ("aarch64", "rcpc2") => smallvec!["rcpc-immo"], + ("aarch64", "dpb") => smallvec!["ccpp"], + ("aarch64", "dpb2") => smallvec!["ccdp"], + ("aarch64", "frintts") => smallvec!["fptoint"], + ("aarch64", "fcma") => smallvec!["complxnum"], + ("aarch64", "pmuv3") => smallvec!["perfmon"], + ("aarch64", "paca") => smallvec!["pauth"], + ("aarch64", "pacg") => smallvec!["pauth"], + // Rust ties fp and neon together. In LLVM neon implicitly enables fp, + // but we manually enable neon when a feature only implicitly enables fp + ("aarch64", "f32mm") => smallvec!["f32mm", "neon"], + ("aarch64", "f64mm") => smallvec!["f64mm", "neon"], + ("aarch64", "fhm") => smallvec!["fp16fml", "neon"], + ("aarch64", "fp16") => smallvec!["fullfp16", "neon"], + ("aarch64", "jsconv") => smallvec!["jsconv", "neon"], + ("aarch64", "sve") => smallvec!["sve", "neon"], + ("aarch64", "sve2") => smallvec!["sve2", "neon"], + ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"], + ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"], + ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"], + ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"], + (_, s) => smallvec![s], + } +} + +/// Composite function which sets GCC attributes for function depending on its AST (`#[attribute]`) +/// attributes. +pub fn from_fn_attrs<'gcc, 'tcx>( + cx: &CodegenCx<'gcc, 'tcx>, + #[cfg_attr(not(feature="master"), allow(unused_variables))] + func: Function<'gcc>, + instance: ty::Instance<'tcx>, +) { + let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); + + let function_features = + codegen_fn_attrs.target_features.iter().map(|features| features.as_str()).collect::>(); + + if let Some(features) = check_tied_features(cx.tcx.sess, &function_features.iter().map(|features| (*features, true)).collect()) { + let span = cx.tcx + .get_attr(instance.def_id(), sym::target_feature) + .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); + cx.tcx.sess.create_err(TiedTargetFeatures { + features: features.join(", "), + span, + }) + .emit(); + return; + } + + let mut function_features = function_features + .iter() + .flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter()) + .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x { + InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature. + InstructionSetAttr::ArmT32 => "thumb-mode", + })) + .collect::>(); + + // TODO(antoyo): check if we really need global backend features. (Maybe they could be applied + // globally?) + let mut global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str()); + function_features.extend(&mut global_features); + let target_features = function_features.join(","); + if !target_features.is_empty() { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Target(&target_features)); + } +} diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index efcf18d31eb09..5f54ac4ebc69a 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -57,6 +57,7 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, _diag_han if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") { let _ = fs::create_dir("/tmp/gccjit_dumps"); let path = &format!("/tmp/gccjit_dumps/{}.c", module.name); + context.set_debug_info(true); context.dump_to_file(path, true); } context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs index d464bd3d12a07..dcd560b3dcd95 100644 --- a/compiler/rustc_codegen_gcc/src/base.rs +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -8,6 +8,8 @@ use gccjit::{ }; use rustc_middle::dep_graph; use rustc_middle::ty::TyCtxt; +#[cfg(feature="master")] +use rustc_middle::mir::mono::Visibility; use rustc_middle::mir::mono::Linkage; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; @@ -20,6 +22,15 @@ use crate::GccContext; use crate::builder::Builder; use crate::context::CodegenCx; +#[cfg(feature="master")] +pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility { + match linkage { + Visibility::Default => gccjit::Visibility::Default, + Visibility::Hidden => gccjit::Visibility::Hidden, + Visibility::Protected => gccjit::Visibility::Protected, + } +} + pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { match linkage { Linkage::External => GlobalKind::Imported, @@ -76,16 +87,34 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i // Instantiate monomorphizations without filling out definitions yet... //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); let context = Context::default(); + + context.add_command_line_option("-fexceptions"); + context.add_driver_option("-fexceptions"); + // TODO(antoyo): only set on x86 platforms. context.add_command_line_option("-masm=intel"); // TODO(antoyo): only add the following cli argument if the feature is supported. context.add_command_line_option("-msse2"); context.add_command_line_option("-mavx2"); - context.add_command_line_option("-msha"); - context.add_command_line_option("-mpclmul"); // FIXME(antoyo): the following causes an illegal instruction on vmovdqu64 in std_example on my CPU. // Only add if the CPU supports it. - //context.add_command_line_option("-mavx512f"); + context.add_command_line_option("-msha"); + context.add_command_line_option("-mpclmul"); + context.add_command_line_option("-mfma"); + context.add_command_line_option("-mfma4"); + context.add_command_line_option("-m64"); + context.add_command_line_option("-mbmi"); + context.add_command_line_option("-mgfni"); + //context.add_command_line_option("-mavxvnni"); // The CI doesn't support this option. + context.add_command_line_option("-mf16c"); + context.add_command_line_option("-maes"); + context.add_command_line_option("-mxsavec"); + context.add_command_line_option("-mbmi2"); + context.add_command_line_option("-mrtm"); + context.add_command_line_option("-mvaes"); + context.add_command_line_option("-mvpclmulqdq"); + context.add_command_line_option("-mavx"); + for arg in &tcx.sess.opts.cg.llvm_args { context.add_command_line_option(arg); } @@ -95,12 +124,20 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i context.add_command_line_option("-fno-semantic-interposition"); // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292). context.add_command_line_option("-fno-strict-aliasing"); + // NOTE: Rust relies on LLVM doing wrapping on overflow. + context.add_command_line_option("-fwrapv"); if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) { context.add_command_line_option("-ffunction-sections"); context.add_command_line_option("-fdata-sections"); } + if env::var("CG_GCCJIT_DUMP_RTL").as_deref() == Ok("1") { + context.add_command_line_option("-fdump-rtl-vregs"); + } + if env::var("CG_GCCJIT_DUMP_TREE_ALL").as_deref() == Ok("1") { + context.add_command_line_option("-fdump-tree-all"); + } if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") { context.set_dump_code_on_compile(true); } @@ -115,7 +152,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, supports_128bit_i context.set_keep_intermediates(true); } - // TODO(bjorn3): Remove once unwinding is properly implemented + // NOTE: The codegen generates unrechable blocks. context.set_allow_unreachable_blocks(true); { diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index e88c12716ecd3..a3c8142bea2db 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -217,7 +217,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let actual_ty = actual_val.get_type(); if expected_ty != actual_ty { - if !actual_ty.is_vector() && !expected_ty.is_vector() && actual_ty.is_integral() && expected_ty.is_integral() && actual_ty.get_size() != expected_ty.get_size() { + if !actual_ty.is_vector() && !expected_ty.is_vector() && (actual_ty.is_integral() && expected_ty.is_integral()) || (actual_ty.get_pointee().is_some() && expected_ty.get_pointee().is_some()) { self.context.new_cast(None, actual_val, expected_ty) } else if on_stack_param_indices.contains(&index) { @@ -226,6 +226,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { else { assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index); // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. + // TODO: remove bitcast now that vector types can be compared? self.bitcast(actual_val, expected_ty) } } @@ -279,21 +280,30 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { - let args = self.check_ptr_call("call", func_ptr, args); + let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); + let func_name = format!("{:?}", func_ptr); + let previous_arg_count = args.len(); + let orig_args = args; + let args = { + let function_address_names = self.function_address_names.borrow(); + let original_function_name = function_address_names.get(&func_ptr); + llvm::adjust_intrinsic_arguments(&self, gcc_func, args.into(), &func_name, original_function_name) + }; + let args_adjusted = args.len() != previous_arg_count; + let args = self.check_ptr_call("call", func_ptr, &*args); // gccjit requires to use the result of functions, even when it's not used. // That's why we assign the result to a local or call add_eval(). - let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr"); let return_type = gcc_func.get_return_type(); let void_type = self.context.new_type::<()>(); let current_func = self.block.get_function(); if return_type != void_type { unsafe { RETURN_VALUE_COUNT += 1 }; - let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT })); - let func_name = format!("{:?}", func_ptr); - let args = llvm::adjust_intrinsic_arguments(&self, gcc_func, args, &func_name); - self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args)); + let return_value = self.cx.context.new_call_through_ptr(None, func_ptr, &args); + let return_value = llvm::adjust_intrinsic_return_value(&self, return_value, &func_name, &args, args_adjusted, orig_args); + let result = current_func.new_local(None, return_value.get_type(), &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT })); + self.block.add_assignment(None, result, return_value); result.to_rvalue() } else { @@ -366,10 +376,10 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { } } -impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> { +impl<'a, 'gcc, 'tcx> Deref for Builder<'a, 'gcc, 'tcx> { type Target = CodegenCx<'gcc, 'tcx>; - fn deref(&self) -> &Self::Target { + fn deref<'b>(&'b self) -> &'a Self::Target { self.cx } } @@ -387,7 +397,7 @@ impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> { } impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { - fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self { + fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> { Builder::with_cx(cx, block) } @@ -444,17 +454,36 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.block.end_with_switch(None, value, default_block, &gcc_cases); } - fn invoke( - &mut self, - typ: Type<'gcc>, - fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, - func: RValue<'gcc>, - args: &[RValue<'gcc>], - then: Block<'gcc>, - catch: Block<'gcc>, - _funclet: Option<&Funclet>, - ) -> RValue<'gcc> { - // TODO(bjorn3): Properly implement unwinding. + #[cfg(feature="master")] + fn invoke(&mut self, typ: Type<'gcc>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { + let try_block = self.current_func().new_block("try"); + + let current_block = self.block.clone(); + self.block = try_block; + let call = self.call(typ, None, func, args, None); // TODO(antoyo): use funclet here? + self.block = current_block; + + let return_value = self.current_func() + .new_local(None, call.get_type(), "invokeResult"); + + try_block.add_assignment(None, return_value, call); + + try_block.end_with_jump(None, then); + + if self.cleanup_blocks.borrow().contains(&catch) { + self.block.add_try_finally(None, try_block, catch); + } + else { + self.block.add_try_catch(None, try_block, catch); + } + + self.block.end_with_jump(None, then); + + return_value.to_rvalue() + } + + #[cfg(not(feature="master"))] + fn invoke(&mut self, typ: Type<'gcc>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> { let call_site = self.call(typ, None, func, args, None); let condition = self.context.new_rvalue_from_int(self.bool_type, 1); self.llbb().end_with_conditional(None, condition, then, catch); @@ -542,6 +571,31 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + // TODO(antoyo): add check in libgccjit since using the binary operator % causes the following error: + // during RTL pass: expand + // libgccjit.so: error: in expmed_mode_index, at expmed.h:240 + // 0x7f0101d58dc6 expmed_mode_index + // ../../../gcc/gcc/expmed.h:240 + // 0x7f0101d58e35 expmed_op_cost_ptr + // ../../../gcc/gcc/expmed.h:262 + // 0x7f0101d594a1 sdiv_cost_ptr + // ../../../gcc/gcc/expmed.h:531 + // 0x7f0101d594f3 sdiv_cost + // ../../../gcc/gcc/expmed.h:549 + // 0x7f0101d6af7e expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int, optab_methods) + // ../../../gcc/gcc/expmed.cc:4356 + // 0x7f0101d94f9e expand_expr_divmod + // ../../../gcc/gcc/expr.cc:8929 + // 0x7f0101d97a26 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier) + // ../../../gcc/gcc/expr.cc:9566 + // 0x7f0101bef6ef expand_gimple_stmt_1 + // ../../../gcc/gcc/cfgexpand.cc:3967 + // 0x7f0101bef910 expand_gimple_stmt + // ../../../gcc/gcc/cfgexpand.cc:4028 + // 0x7f0101bf6ee7 expand_gimple_basic_block + // ../../../gcc/gcc/cfgexpand.cc:6069 + // 0x7f0101bf9194 execute + // ../../../gcc/gcc/cfgexpand.cc:6795 if a.get_type().is_compatible_with(self.cx.float_type) { let fmodf = self.context.get_builtin_function("fmodf"); // FIXME(antoyo): this seems to produce the wrong result. @@ -616,24 +670,29 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { a * b } - fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs + rhs } - fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs - rhs } - fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs * rhs } - fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs / rhs } - fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + self.frem(lhs, rhs) } fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { @@ -722,7 +781,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else if place.layout.is_gcc_immediate() { let load = self.load( - place.layout.gcc_type(self, false), + place.layout.gcc_type(self), place.llval, place.align, ); @@ -733,7 +792,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { let b_offset = a.size(self).align_to(b.align(self).abi); - let pair_type = place.layout.gcc_type(self, false); + let pair_type = place.layout.gcc_type(self); let mut load = |i, scalar: &abi::Scalar, align| { let llptr = self.struct_gep(pair_type, place.llval, i as u64); @@ -833,26 +892,31 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { - let mut result = ptr; + let ptr_type = ptr.get_type(); + let mut pointee_type = ptr.get_type(); + // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is + // always considered in bounds in GCC (TODO(antoyo): to be verified). + // So, we have to cast to a number. + let mut result = self.context.new_bitcast(None, ptr, self.sizet_type); + // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would + // require dereferencing the pointer. for index in indices { - result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue(); + pointee_type = pointee_type.get_pointee().expect("pointee type"); + let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32); + result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type); } - result + self.context.new_bitcast(None, result, ptr_type) } fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { - // FIXME(antoyo): would be safer if doing the same thing (loop) as gep. - // TODO(antoyo): specify inbounds somehow. - match indices.len() { - 1 => { - self.context.new_array_access(None, ptr, indices[0]).get_address(None) - }, - 2 => { - let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0? - self.context.new_array_access(None, array, indices[1]).get_address(None) - }, - _ => unimplemented!(), + // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified). + let mut indices = indices.into_iter(); + let index = indices.next().expect("first index in inbounds_gep"); + let mut result = self.context.new_array_access(None, ptr, *index); + for index in indices { + result = self.context.new_array_access(None, result, *index); } + result.get_address(None) } fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { @@ -1034,8 +1098,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> { - unimplemented!(); + #[cfg(feature="master")] + fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { + self.context.new_vector_access(None, vec, idx).to_rvalue() + } + + #[cfg(not(feature="master"))] + fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = vec.get_type().unqualified().dyncast_vector().expect("Called extract_element on a non-vector type"); + let element_type = vector_type.get_element_type(); + let vec_num_units = vector_type.get_num_units(); + let array_type = self.context.new_array_type(None, element_type, vec_num_units as u64); + let array = self.context.new_bitcast(None, vec, array_type).to_rvalue(); + self.context.new_array_access(None, array, idx).to_rvalue() } fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> { @@ -1116,22 +1191,52 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { - // TODO(antoyo) + #[cfg(feature="master")] + { + let personality = self.rvalue_as_function(_personality); + self.current_func().set_personality_function(personality); + } + } + + #[cfg(feature="master")] + fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { + self.set_personality_fn(pers_fn); + + // NOTE: insert the current block in a variable so that a later call to invoke knows to + // generate a try/finally instead of a try/catch for this block. + self.cleanup_blocks.borrow_mut().insert(self.block); + + let eh_pointer_builtin = self.cx.context.get_target_builtin_function("__builtin_eh_pointer"); + let zero = self.cx.context.new_rvalue_zero(self.int_type); + let ptr = self.cx.context.new_call(None, eh_pointer_builtin, &[zero]); + + let value1_type = self.u8_type.make_pointer(); + let ptr = self.cx.context.new_cast(None, ptr, value1_type); + let value1 = ptr; + let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?). + + (value1, value2) } + #[cfg(not(feature="master"))] fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) { - ( - self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0") - .to_rvalue(), - self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue(), - ) - // TODO(antoyo): Properly implement unwinding. - // the above is just to make the compilation work as it seems - // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort. + let value1 = self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0") + .to_rvalue(); + let value2 = self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue(); + (value1, value2) + } + + #[cfg(feature="master")] + fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { + let exn_type = exn0.get_type(); + let exn = self.context.new_cast(None, exn0, exn_type); + let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume"); + self.llbb().add_eval(None, self.context.new_call(None, unwind_resume, &[exn])); + self.unreachable(); } + #[cfg(not(feature="master"))] fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) { - // TODO(bjorn3): Properly implement unwinding. self.unreachable(); } @@ -1160,6 +1265,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); self.llbb().add_assignment(None, expected, cmp); + // NOTE: gcc doesn't support a failure memory model that is stronger than the success + // memory model. + let order = + if failure_order as i32 > order as i32 { + failure_order + } + else { + order + }; let success = self.compare_exchange(dst, expected, src, order, failure_order, weak); let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); @@ -1469,7 +1583,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { #[cfg(feature="master")] pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> { - let struct_type = mask.get_type().is_struct().expect("mask of struct type"); + let struct_type = mask.get_type().is_struct().expect("mask should be of struct type"); // TODO(antoyo): use a recursive unqualified() here. let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type"); @@ -1501,22 +1615,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { vector_elements.push(self.context.new_rvalue_zero(mask_element_type)); } - let array_type = self.context.new_array_type(None, element_type, vec_num_units as i32); let result_type = self.context.new_vector_type(element_type, mask_num_units as u64); let (v1, v2) = if vec_num_units < mask_num_units { // NOTE: the mask needs to be the same length as the input vectors, so join the 2 // vectors and create a dummy second vector. - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, v1, array_type); let mut elements = vec![]; for i in 0..vec_num_units { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push(self.context.new_vector_access(None, v1, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); } - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, v2, array_type); for i in 0..(mask_num_units - vec_num_units) { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push(self.context.new_vector_access(None, v2, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); } let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements); let zero = self.context.new_rvalue_zero(element_type); @@ -1536,10 +1645,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { // NOTE: if padding was added, only select the number of elements of the masks to // remove that padding in the result. let mut elements = vec![]; - // TODO(antoyo): switch to using new_vector_access. - let array = self.context.new_bitcast(None, result, array_type); for i in 0..mask_num_units { - elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); + elements.push(self.context.new_vector_access(None, result, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue()); } self.context.new_rvalue_from_vector(None, result_type, &elements) } @@ -1558,18 +1665,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> { let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_type = vector_type.get_element_type(); + let mask_element_type = self.type_ix(element_type.get_size() as u64 * 8); let element_count = vector_type.get_num_units(); let mut vector_elements = vec![]; for i in 0..element_count { vector_elements.push(i); } - let mask_type = self.context.new_vector_type(self.int_type, element_count as u64); + let mask_type = self.context.new_vector_type(mask_element_type, element_count as u64); let mut shift = 1; let mut res = src; while shift < element_count { let vector_elements: Vec<_> = vector_elements.iter() - .map(|i| self.context.new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32)) + .map(|i| self.context.new_rvalue_from_int(mask_element_type, ((i + shift) % element_count) as i32)) .collect(); let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements); let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask); @@ -1581,7 +1690,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } #[cfg(not(feature="master"))] - pub fn vector_reduce(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc> + pub fn vector_reduce(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc> where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc> { unimplemented!(); @@ -1595,15 +1704,47 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { unimplemented!(); } + #[cfg(feature="master")] + pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + (0..element_count).into_iter() + .map(|i| self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue()) + .fold(acc, |x, i| x + i) + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); } + #[cfg(feature="master")] + pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + (0..element_count).into_iter() + .map(|i| self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue()) + .fold(acc, |x, i| x * i) + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!() + } + // Inspired by Hacker's Delight min implementation. pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { self.vector_reduce(src, |a, b, context| { let differences_or_zeros = difference_or_zero(a, b, context); - context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros) + context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros) }) } @@ -1611,38 +1752,148 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { self.vector_reduce(src, |a, b, context| { let differences_or_zeros = difference_or_zero(a, b, context); - context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros) + context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros) }) } + fn vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc> { + let vector_type = a.get_type(); + + // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and + // b get compared & spliced together, we get the numeric values instead of NaNs. + let b_nan_mask = self.context.new_comparison(None, ComparisonOp::NotEquals, b, b); + let mask_type = b_nan_mask.get_type(); + let b_nan_mask_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, mask_type, b_nan_mask); + let a_cast = self.context.new_bitcast(None, a, mask_type); + let b_cast = self.context.new_bitcast(None, b, mask_type); + let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast); + let b = self.context.new_bitcast(None, res, vector_type); + + // now do the actual comparison + let comparison_op = match direction { + ExtremumOperation::Min => ComparisonOp::LessThan, + ExtremumOperation::Max => ComparisonOp::GreaterThan, + }; + let cmp = self.context.new_comparison(None, comparison_op, a, b); + let cmp_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, cmp.get_type(), cmp); + let res = (cmp & a_cast) | (cmp_inverted & res); + self.context.new_bitcast(None, res, vector_type) + } + + pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.vector_extremum(a, b, ExtremumOperation::Min) + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue(); + for i in 1..element_count { + let elem = self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue(); + let cmp = self.context.new_comparison(None, ComparisonOp::LessThan, acc, elem); + acc = self.select(cmp, acc, elem); + } + acc + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + + pub fn vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { + self.vector_extremum(a, b, ExtremumOperation::Max) + } + + #[cfg(feature="master")] + pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> { + let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type"); + let element_count = vector_type.get_num_units(); + let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue(); + for i in 1..element_count { + let elem = self.context + .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _)) + .to_rvalue(); + let cmp = self.context.new_comparison(None, ComparisonOp::GreaterThan, acc, elem); + acc = self.select(cmp, acc, elem); + } + acc + } + + #[cfg(not(feature="master"))] + pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> { + unimplemented!(); + } + pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> { // cond is a vector of integers, not of bools. - let cond_type = cond.get_type(); - let vector_type = cond_type.unqualified().dyncast_vector().expect("vector type"); + let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type"); let num_units = vector_type.get_num_units(); let element_type = vector_type.get_element_type(); + + #[cfg(feature="master")] + let (cond, element_type) = { + let then_val_vector_type = then_val.get_type().dyncast_vector().expect("vector type"); + let then_val_element_type = then_val_vector_type.get_element_type(); + let then_val_element_size = then_val_element_type.get_size(); + + // NOTE: the mask needs to be of the same size as the other arguments in order for the & + // operation to work. + if then_val_element_size != element_type.get_size() { + let new_element_type = self.type_ix(then_val_element_size as u64 * 8); + let new_vector_type = self.context.new_vector_type(new_element_type, num_units as u64); + let cond = self.context.convert_vector(None, cond, new_vector_type); + (cond, new_element_type) + } + else { + (cond, element_type) + } + }; + + let cond_type = cond.get_type(); + let zeros = vec![self.context.new_rvalue_zero(element_type); num_units]; let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros); + let result_type = then_val.get_type(); + let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros); + // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make + // the & operation work. + let then_val = self.bitcast_if_needed(then_val, masks.get_type()); let then_vals = masks & then_val; - let ones = vec![self.context.new_rvalue_one(element_type); num_units]; - let ones = self.context.new_rvalue_from_vector(None, cond_type, &ones); - let inverted_masks = masks + ones; + let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units]; + let minus_ones = self.context.new_rvalue_from_vector(None, cond_type, &minus_ones); + let inverted_masks = masks ^ minus_ones; // NOTE: sometimes, the type of else_val can be different than the type of then_val in // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND // operation to work. + // TODO: remove bitcast now that vector types can be compared? let else_val = self.context.new_bitcast(None, else_val, then_val.get_type()); let else_vals = inverted_masks & else_val; - then_vals | else_vals + let res = then_vals | else_vals; + self.bitcast_if_needed(res, result_type) } } fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> { let difference = a - b; let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a); + // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make + // the & operation work. + let a_type = a.get_type(); + let masks = + if masks.get_type() != a_type { + context.new_bitcast(None, masks, a_type) + } + else { + masks + }; difference & masks } diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs index 9e3a22ee05d7c..ba1e86562089e 100644 --- a/compiler/rustc_codegen_gcc/src/callee.rs +++ b/compiler/rustc_codegen_gcc/src/callee.rs @@ -1,9 +1,10 @@ -use gccjit::{FunctionType, RValue}; -use rustc_codegen_ssa::traits::BaseTypeMethods; +#[cfg(feature="master")] +use gccjit::{FnAttribute, Visibility}; +use gccjit::{FunctionType, Function}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; -use crate::abi::FnAbiGccExt; +use crate::attributes; use crate::context::CodegenCx; /// Codegens a reference to a fn/method item, monomorphizing and @@ -13,22 +14,26 @@ use crate::context::CodegenCx; /// /// - `cx`: the crate context /// - `instance`: the instance to be instantiated -pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> { +pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> Function<'gcc> { let tcx = cx.tcx(); assert!(!instance.substs.needs_infer()); assert!(!instance.substs.has_escaping_bound_vars()); + let sym = tcx.symbol_name(instance).name; + if let Some(&func) = cx.function_instances.borrow().get(&instance) { return func; } - let sym = tcx.symbol_name(instance).name; - let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let func = - if let Some(func) = cx.get_declared_value(&sym) { + if let Some(_func) = cx.get_declared_value(&sym) { + // FIXME(antoyo): we never reach this because get_declared_value only returns global variables + // and here we try to get a function. + unreachable!(); + /* // Create a fn pointer with the new signature. let ptrty = fn_abi.ptr_to_gcc_type(cx); @@ -61,13 +66,105 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) } else { func - } + }*/ } else { cx.linkage.set(FunctionType::Extern); let func = cx.declare_fn(&sym, &fn_abi); + attributes::from_fn_attrs(cx, func, instance); + + let instance_def_id = instance.def_id(); + // TODO(antoyo): set linkage and attributes. + + // Apply an appropriate linkage/visibility value to our item that we + // just declared. + // + // This is sort of subtle. Inside our codegen unit we started off + // compilation by predefining all our own `MonoItem` instances. That + // is, everything we're codegenning ourselves is already defined. That + // means that anything we're actually codegenning in this codegen unit + // will have hit the above branch in `get_declared_value`. As a result, + // we're guaranteed here that we're declaring a symbol that won't get + // defined, or in other words we're referencing a value from another + // codegen unit or even another crate. + // + // So because this is a foreign value we blanket apply an external + // linkage directive because it's coming from a different object file. + // The visibility here is where it gets tricky. This symbol could be + // referencing some foreign crate or foreign library (an `extern` + // block) in which case we want to leave the default visibility. We may + // also, though, have multiple codegen units. It could be a + // monomorphization, in which case its expected visibility depends on + // whether we are sharing generics or not. The important thing here is + // that the visibility we apply to the declaration is the same one that + // has been applied to the definition (wherever that definition may be). + let is_generic = instance.substs.non_erasable_generics().next().is_some(); + + if is_generic { + // This is a monomorphization. Its expected visibility depends + // on whether we are in share-generics mode. + + if cx.tcx.sess.opts.share_generics() { + // We are in share_generics mode. + + if let Some(instance_def_id) = instance_def_id.as_local() { + // This is a definition from the current crate. If the + // definition is unreachable for downstream crates or + // the current crate does not re-export generics, the + // definition of the instance will have been declared + // as `hidden`. + if cx.tcx.is_unreachable_local_definition(instance_def_id) + || !cx.tcx.local_crate_exports_generics() + { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a monomorphization of a generic function + // defined in an upstream crate. + if instance.upstream_monomorphization(tcx).is_some() { + // This is instantiated in another crate. It cannot + // be `hidden`. + } else { + // This is a local instantiation of an upstream definition. + // If the current crate does not re-export it + // (because it is a C library or an executable), it + // will have been declared `hidden`. + if !cx.tcx.local_crate_exports_generics() { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } + } + } else { + // When not sharing generics, all instances are in the same + // crate and have hidden visibility + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a non-generic function + if cx.tcx.is_codegened_item(instance_def_id) { + // This is a function that is instantiated in the local crate + + if instance_def_id.is_local() { + // This is function that is defined in the local crate. + // If it is not reachable, it is hidden. + if !cx.tcx.is_reachable_non_generic(instance_def_id) { + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } else { + // This is a function from an upstream crate that has + // been instantiated here. These are always hidden. + #[cfg(feature="master")] + func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); + } + } + } + func }; diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index c939da9cec3c2..76fc7bd222e1e 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -36,7 +36,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { let context = &cx.context; let byte_type = context.new_type::(); - let typ = context.new_array_type(None, byte_type, bytes.len() as i32); + let typ = context.new_array_type(None, byte_type, bytes.len() as u64); let elements: Vec<_> = bytes.iter() .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)) @@ -115,8 +115,8 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.const_uint(self.usize_type, i) } - fn const_u8(&self, _i: u8) -> RValue<'gcc> { - unimplemented!(); + fn const_u8(&self, i: u8) -> RValue<'gcc> { + self.const_uint(self.type_u8(), i as u64) } fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> { @@ -133,7 +133,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { .1; let len = s.len(); let cs = self.const_ptrcast(str_global.get_address(None), - self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)), + self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self)), ); (cs, self.const_usize(len as u64)) } @@ -174,8 +174,18 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } let value = self.const_uint_big(self.type_ix(bitsize), data); - // TODO(bjorn3): assert size is correct - self.const_bitcast(value, ty) + let bytesize = layout.size(self).bytes(); + if bitsize > 1 && ty.is_integral() && bytesize as u32 == ty.get_size() { + // NOTE: since the intrinsic _xabort is called with a bitcast, which + // is non-const, but expects a constant, do a normal cast instead of a bitcast. + // FIXME(antoyo): fix bitcast to work in constant contexts. + // TODO(antoyo): perhaps only use bitcast for pointers? + self.context.new_cast(None, value, ty) + } + else { + // TODO(bjorn3): assert size is correct + self.const_bitcast(value, ty) + } } Scalar::Ptr(ptr, _size) => { let (alloc_id, offset) = ptr.into_parts(); @@ -227,11 +237,11 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> { assert_eq!(alloc.inner().align, layout.align.abi); - let ty = self.type_ptr_to(layout.gcc_type(self, true)); + let ty = self.type_ptr_to(layout.gcc_type(self)); let value = if layout.size == Size::ZERO { let value = self.const_usize(alloc.inner().align.bytes()); - self.context.new_cast(None, value, ty) + self.const_bitcast(value, ty) } else { let init = const_alloc_to_gcc(self, alloc); diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index dc41cb761b59c..792ab8f890d8f 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -1,8 +1,8 @@ -use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type}; +#[cfg(feature = "master")] +use gccjit::FnAttribute; +use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue, Type}; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods}; -use rustc_hir as hir; -use rustc_hir::Node; -use rustc_middle::{bug, span_bug}; +use rustc_middle::span_bug; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::{self, Instance, Ty}; @@ -13,6 +13,7 @@ use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRan use crate::base; use crate::context::CodegenCx; +use crate::errors::InvalidMinimumAlignment; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -30,6 +31,21 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } +fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) { + // The target may require greater alignment for globals than the type does. + // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, + // which can force it to be smaller. Rust doesn't support this yet. + if let Some(min) = cx.sess().target.min_global_align { + match Align::from_bits(min) { + Ok(min) => align = align.max(min), + Err(err) => { + cx.sess().emit_err(InvalidMinimumAlignment { err }); + } + } + } + gv.set_alignment(align.bytes() as i32); +} + impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { // TODO(antoyo): implement a proper rvalue comparison in libgccjit instead of doing the @@ -79,9 +95,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); - let gcc_type = self.layout_of(ty).gcc_type(self, true); + let gcc_type = self.layout_of(ty).gcc_type(self); - // TODO(antoyo): set alignment. + set_global_alignment(self, global, self.align_of(ty)); let value = self.bitcast_if_needed(value, gcc_type); global.global_set_initializer_rvalue(value); @@ -158,12 +174,19 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // TODO(antoyo) } - fn add_compiler_used_global(&self, _global: RValue<'gcc>) { - // TODO(antoyo) + fn add_compiler_used_global(&self, global: RValue<'gcc>) { + // NOTE: seems like GCC does not make the distinction between compiler.used and used. + self.add_used_global(global); } } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + pub fn add_used_function(&self, function: Function<'gcc>) { + #[cfg(feature = "master")] + function.add_attribute(FnAttribute::Used); + } + pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { let global = match kind { @@ -208,82 +231,59 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let sym = self.tcx.symbol_name(instance).name; let global = - if let Some(def_id) = def_id.as_local() { - let id = self.tcx.hir().local_def_id_to_hir_id(def_id); - let llty = self.layout_of(ty).gcc_type(self, true); - // FIXME: refactor this to work without accessing the HIR - let global = match self.tcx.hir().get(id) { - Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => { - if let Some(global) = self.get_declared_value(&sym) { - if self.val_ty(global) != self.type_ptr_to(llty) { - span_bug!(span, "Conflicting types for static"); - } - } - - let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); - let global = self.declare_global( - &sym, - llty, - GlobalKind::Exported, - is_tls, - fn_attrs.link_section, - ); - - if !self.tcx.is_reachable_non_generic(def_id) { - // TODO(antoyo): set visibility. - } - - global - } - - Node::ForeignItem(&hir::ForeignItem { - span: _, - kind: hir::ForeignItemKind::Static(..), - .. - }) => { - let fn_attrs = self.tcx.codegen_fn_attrs(def_id); - check_and_apply_linkage(&self, &fn_attrs, ty, sym) - } - - item => bug!("get_static: expected static, found {:?}", item), - }; + if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { + let llty = self.layout_of(ty).gcc_type(self); + if let Some(global) = self.get_declared_value(sym) { + if self.val_ty(global) != self.type_ptr_to(llty) { + span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); + } + } - global + let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); + let global = self.declare_global( + &sym, + llty, + GlobalKind::Exported, + is_tls, + fn_attrs.link_section, + ); + + if !self.tcx.is_reachable_non_generic(def_id) { + // TODO(antoyo): set visibility. } - else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); - - let attrs = self.tcx.codegen_fn_attrs(def_id); - let global = check_and_apply_linkage(&self, &attrs, ty, sym); - - let needs_dll_storage_attr = false; // TODO(antoyo) - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!( - !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() - && self.tcx.sess.target.options.is_like_msvc - && self.tcx.sess.opts.cg.prefer_dynamic) - ); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e., it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs - // to make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !self.tcx.is_codegened_item(def_id) { - unimplemented!(); - } + + global + } else { + check_and_apply_linkage(&self, &fn_attrs, ty, sym) + }; + + if !def_id.is_local() { + let needs_dll_storage_attr = false; // TODO(antoyo) + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!( + !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() + && self.tcx.sess.target.options.is_like_msvc + && self.tcx.sess.opts.cg.prefer_dynamic) + ); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e., it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unimplemented!(); } - global - }; + } + } // TODO(antoyo): set dll storage class. @@ -357,7 +357,7 @@ pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str) -> LValue<'gcc> { let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); - let llty = cx.layout_of(ty).gcc_type(cx, true); + let gcc_type = cx.layout_of(ty).gcc_type(cx); if let Some(linkage) = attrs.import_linkage { // Declare a symbol `foo` with the desired linkage. let global1 = cx.declare_global_with_linkage(&sym, cx.type_i8(), base::global_linkage_to_gcc(linkage)); @@ -370,9 +370,10 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let global2 = cx.define_global(&real_name, llty, is_tls, attrs.link_section); + let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section); // TODO(antoyo): set linkage. - global2.global_set_initializer_rvalue(global1.get_address(None)); + let value = cx.const_ptrcast(global1.get_address(None), gcc_type); + global2.global_set_initializer_rvalue(value); // TODO(antoyo): use global_set_initializer() when it will work. global2 } @@ -386,6 +387,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg // don't do this then linker errors can be generated where the linker // complains that one object files has a thread local version of the // symbol and another one doesn't. - cx.declare_global(&sym, llty, GlobalKind::Imported, is_tls, attrs.link_section) + cx.declare_global(&sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section) } } diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index 457006319afb8..661681bdb50f2 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -1,9 +1,10 @@ use std::cell::{Cell, RefCell}; -use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type}; +use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Type}; use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::traits::{ BackendTypes, + BaseTypeMethods, MiscMethods, }; use rustc_data_structures::base_n; @@ -11,7 +12,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_middle::span_bug; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; -use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; +use rustc_middle::ty::layout::{FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers}; use rustc_session::Session; use rustc_span::{Span, source_map::respan}; use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx}; @@ -33,6 +34,7 @@ pub struct CodegenCx<'gcc, 'tcx> { // TODO(bjorn3): Can this field be removed? pub current_func: RefCell>>, pub normal_function_addresses: RefCell>>, + pub function_address_names: RefCell, String>>, pub functions: RefCell>>, pub intrinsics: RefCell>>, @@ -78,12 +80,10 @@ pub struct CodegenCx<'gcc, 'tcx> { pub struct_types: RefCell>, Type<'gcc>>>, - pub types_with_fields_to_set: RefCell, (Struct<'gcc>, TyAndLayout<'tcx>)>>, - /// Cache instances of monomorphic and polymorphic items pub instances: RefCell, LValue<'gcc>>>, /// Cache function instances of monomorphic and polymorphic items - pub function_instances: RefCell, RValue<'gcc>>>, + pub function_instances: RefCell, Function<'gcc>>>, /// Cache generated vtables pub vtables: RefCell, Option>), RValue<'gcc>>>, @@ -110,6 +110,7 @@ pub struct CodegenCx<'gcc, 'tcx> { local_gen_sym_counter: Cell, eh_personality: Cell>>, + pub rust_try_fn: Cell, Function<'gcc>)>>, pub pointee_infos: RefCell, Size), Option>>, @@ -119,6 +120,8 @@ pub struct CodegenCx<'gcc, 'tcx> { /// they can be dereferenced later. /// FIXME(antoyo): fix the rustc API to avoid having this hack. pub structs_as_pointer: RefCell>>, + + pub cleanup_blocks: RefCell>>, } impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { @@ -194,6 +197,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { context, current_func: RefCell::new(None), normal_function_addresses: Default::default(), + function_address_names: Default::default(), functions: RefCell::new(functions), intrinsics: RefCell::new(FxHashMap::default()), @@ -243,11 +247,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { types: Default::default(), tcx, struct_types: Default::default(), - types_with_fields_to_set: Default::default(), local_gen_sym_counter: Cell::new(0), eh_personality: Cell::new(None), + rust_try_fn: Cell::new(None), pointee_infos: Default::default(), structs_as_pointer: Default::default(), + cleanup_blocks: Default::default(), } } @@ -327,8 +332,9 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> { let func = get_fn(self, instance); - *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func)); - func + *self.current_func.borrow_mut() = Some(func); + // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. + unsafe { std::mem::transmute(func) } } fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { @@ -339,8 +345,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.intrinsics.borrow()[func_name].clone() } else { - let func = get_fn(self, instance); - self.rvalue_as_function(func) + get_fn(self, instance) }; let ptr = func.get_address(None); @@ -348,6 +353,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI). self.normal_function_addresses.borrow_mut().insert(ptr); + self.function_address_names.borrow_mut().insert(ptr, func_name.to_string()); ptr } @@ -377,31 +383,40 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { return llpersonality; } let tcx = self.tcx; - let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr( - ty::Instance::resolve( - tcx, - ty::ParamEnv::reveal_all(), - def_id, - ty::List::empty(), - ) - .unwrap().unwrap(), - ), - _ => { - let _name = if wants_msvc_seh(self.sess()) { - "__CxxFrameHandler3" - } else { - "rust_eh_personality" - }; - //let func = self.declare_func(name, self.type_i32(), &[], true); - // FIXME(antoyo): this hack should not be needed. That will probably be removed when - // unwinding support is added. - self.context.new_rvalue_from_int(self.int_type, 0) - } - }; + let func = + match tcx.lang_items().eh_personality() { + Some(def_id) if !wants_msvc_seh(self.sess()) => { + let instance = + ty::Instance::resolve( + tcx, + ty::ParamEnv::reveal_all(), + def_id, + ty::List::empty(), + ) + .unwrap().unwrap(); + + let symbol_name = tcx.symbol_name(instance).name; + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); + self.linkage.set(FunctionType::Extern); + let func = self.declare_fn(symbol_name, &fn_abi); + let func: RValue<'gcc> = unsafe { std::mem::transmute(func) }; + func + }, + _ => { + let name = + if wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } + else { + "rust_eh_personality" + }; + let func = self.declare_func(name, self.type_i32(), &[], true); + unsafe { std::mem::transmute(func) } + } + }; // TODO(antoyo): apply target cpu attributes. - self.eh_personality.set(Some(llfn)); - llfn + self.eh_personality.set(Some(func)); + func } fn sess(&self) -> &Session { diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs index eae77508c973a..4748e7e4be2a3 100644 --- a/compiler/rustc_codegen_gcc/src/declare.rs +++ b/compiler/rustc_codegen_gcc/src/declare.rs @@ -38,12 +38,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { global } - /*pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { - self.linkage.set(FunctionType::Exported); - let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic); - // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. - unsafe { std::mem::transmute(func) } - }*/ + pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { + self.linkage.set(FunctionType::Extern); + declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic) + } pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option) -> LValue<'gcc> { let global = self.context.new_global(None, global_kind, ty, name); @@ -79,12 +77,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { unsafe { std::mem::transmute(func) } } - pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> { + pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Function<'gcc> { let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self); let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic); self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices); - // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API. - unsafe { std::mem::transmute(func) } + func } pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option) -> LValue<'gcc> { diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs index d0ba7e2479111..9305bd1e043d5 100644 --- a/compiler/rustc_codegen_gcc/src/errors.rs +++ b/compiler/rustc_codegen_gcc/src/errors.rs @@ -221,3 +221,18 @@ pub(crate) struct UnwindingInlineAsm { #[primary_span] pub span: Span, } + +#[derive(Diagnostic)] +#[diag(codegen_gcc_invalid_minimum_alignment)] +pub(crate) struct InvalidMinimumAlignment { + pub err: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_gcc_tied_target_features)] +#[help] +pub(crate) struct TiedTargetFeatures { + #[primary_span] + pub span: Span, + pub features: String, +} diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs index 0c5dab0046684..0cf1204791d33 100644 --- a/compiler/rustc_codegen_gcc/src/int.rs +++ b/compiler/rustc_codegen_gcc/src/int.rs @@ -389,18 +389,22 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }; self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit)) } + else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() { + // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize. + lhs = self.context.new_bitcast(None, lhs, self.usize_type); + rhs = self.context.new_bitcast(None, rhs, self.usize_type); + self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) + } else { - let left_type = lhs.get_type(); - let right_type = rhs.get_type(); - if left_type != right_type { + if a_type != b_type { // NOTE: because libgccjit cannot compare function pointers. - if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() { + if a_type.dyncast_function_ptr_type().is_some() && b_type.dyncast_function_ptr_type().is_some() { lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer()); rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer()); } // NOTE: hack because we try to cast a vector type to the same vector type. - else if format!("{:?}", left_type) != format!("{:?}", right_type) { - rhs = self.context.new_cast(None, rhs, left_type); + else if format!("{:?}", a_type) != format!("{:?}", b_type) { + rhs = self.context.new_cast(None, rhs, a_type); } } self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs) diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs index fb6c38fa07265..8a4559355ea67 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs @@ -34,6 +34,7 @@ match name { "llvm.aarch64.dmb" => "__builtin_arm_dmb", "llvm.aarch64.dsb" => "__builtin_arm_dsb", "llvm.aarch64.isb" => "__builtin_arm_isb", + "llvm.aarch64.prefetch" => "__builtin_arm_prefetch", "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8", "llvm.aarch64.sve.aese" => "__builtin_sve_svaese_u8", "llvm.aarch64.sve.aesimc" => "__builtin_sve_svaesimc_u8", @@ -58,13 +59,22 @@ match name { "llvm.amdgcn.cubema" => "__builtin_amdgcn_cubema", "llvm.amdgcn.cubesc" => "__builtin_amdgcn_cubesc", "llvm.amdgcn.cubetc" => "__builtin_amdgcn_cubetc", + "llvm.amdgcn.cvt.f32.bf8" => "__builtin_amdgcn_cvt_f32_bf8", + "llvm.amdgcn.cvt.f32.fp8" => "__builtin_amdgcn_cvt_f32_fp8", + "llvm.amdgcn.cvt.pk.bf8.f32" => "__builtin_amdgcn_cvt_pk_bf8_f32", + "llvm.amdgcn.cvt.pk.f32.bf8" => "__builtin_amdgcn_cvt_pk_f32_bf8", + "llvm.amdgcn.cvt.pk.f32.fp8" => "__builtin_amdgcn_cvt_pk_f32_fp8", + "llvm.amdgcn.cvt.pk.fp8.f32" => "__builtin_amdgcn_cvt_pk_fp8_f32", "llvm.amdgcn.cvt.pk.i16" => "__builtin_amdgcn_cvt_pk_i16", "llvm.amdgcn.cvt.pk.u16" => "__builtin_amdgcn_cvt_pk_u16", "llvm.amdgcn.cvt.pk.u8.f32" => "__builtin_amdgcn_cvt_pk_u8_f32", "llvm.amdgcn.cvt.pknorm.i16" => "__builtin_amdgcn_cvt_pknorm_i16", "llvm.amdgcn.cvt.pknorm.u16" => "__builtin_amdgcn_cvt_pknorm_u16", "llvm.amdgcn.cvt.pkrtz" => "__builtin_amdgcn_cvt_pkrtz", + "llvm.amdgcn.cvt.sr.bf8.f32" => "__builtin_amdgcn_cvt_sr_bf8_f32", + "llvm.amdgcn.cvt.sr.fp8.f32" => "__builtin_amdgcn_cvt_sr_fp8_f32", "llvm.amdgcn.dispatch.id" => "__builtin_amdgcn_dispatch_id", + "llvm.amdgcn.ds.add.gs.reg.rtn" => "__builtin_amdgcn_ds_add_gs_reg_rtn", "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute", "llvm.amdgcn.ds.fadd.v2bf16" => "__builtin_amdgcn_ds_atomic_fadd_v2bf16", "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier", @@ -74,12 +84,16 @@ match name { "llvm.amdgcn.ds.gws.sema.release.all" => "__builtin_amdgcn_ds_gws_sema_release_all", "llvm.amdgcn.ds.gws.sema.v" => "__builtin_amdgcn_ds_gws_sema_v", "llvm.amdgcn.ds.permute" => "__builtin_amdgcn_ds_permute", + "llvm.amdgcn.ds.sub.gs.reg.rtn" => "__builtin_amdgcn_ds_sub_gs_reg_rtn", "llvm.amdgcn.ds.swizzle" => "__builtin_amdgcn_ds_swizzle", "llvm.amdgcn.endpgm" => "__builtin_amdgcn_endpgm", "llvm.amdgcn.fdot2" => "__builtin_amdgcn_fdot2", - "llvm.amdgcn.fmed3" => "__builtin_amdgcn_fmed3", + "llvm.amdgcn.fdot2.bf16.bf16" => "__builtin_amdgcn_fdot2_bf16_bf16", + "llvm.amdgcn.fdot2.f16.f16" => "__builtin_amdgcn_fdot2_f16_f16", + "llvm.amdgcn.fdot2.f32.bf16" => "__builtin_amdgcn_fdot2_f32_bf16", "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy", "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize", + "llvm.amdgcn.iglp.opt" => "__builtin_amdgcn_iglp_opt", "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr", "llvm.amdgcn.implicitarg.ptr" => "__builtin_amdgcn_implicitarg_ptr", "llvm.amdgcn.interp.mov" => "__builtin_amdgcn_interp_mov", @@ -93,11 +107,51 @@ match name { "llvm.amdgcn.lerp" => "__builtin_amdgcn_lerp", "llvm.amdgcn.mbcnt.hi" => "__builtin_amdgcn_mbcnt_hi", "llvm.amdgcn.mbcnt.lo" => "__builtin_amdgcn_mbcnt_lo", + "llvm.amdgcn.mfma.f32.16x16x16bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x16bf16_1k", + "llvm.amdgcn.mfma.f32.16x16x16f16" => "__builtin_amdgcn_mfma_f32_16x16x16f16", + "llvm.amdgcn.mfma.f32.16x16x1f32" => "__builtin_amdgcn_mfma_f32_16x16x1f32", + "llvm.amdgcn.mfma.f32.16x16x2bf16" => "__builtin_amdgcn_mfma_f32_16x16x2bf16", + "llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_bf8", + "llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_bf8_fp8", + "llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_bf8", + "llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8" => "__builtin_amdgcn_mfma_f32_16x16x32_fp8_fp8", + "llvm.amdgcn.mfma.f32.16x16x4bf16.1k" => "__builtin_amdgcn_mfma_f32_16x16x4bf16_1k", + "llvm.amdgcn.mfma.f32.16x16x4f16" => "__builtin_amdgcn_mfma_f32_16x16x4f16", + "llvm.amdgcn.mfma.f32.16x16x4f32" => "__builtin_amdgcn_mfma_f32_16x16x4f32", + "llvm.amdgcn.mfma.f32.16x16x8.xf32" => "__builtin_amdgcn_mfma_f32_16x16x8_xf32", + "llvm.amdgcn.mfma.f32.16x16x8bf16" => "__builtin_amdgcn_mfma_f32_16x16x8bf16", + "llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_bf8", + "llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_bf8_fp8", + "llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_bf8", + "llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8" => "__builtin_amdgcn_mfma_f32_32x32x16_fp8_fp8", + "llvm.amdgcn.mfma.f32.32x32x1f32" => "__builtin_amdgcn_mfma_f32_32x32x1f32", + "llvm.amdgcn.mfma.f32.32x32x2bf16" => "__builtin_amdgcn_mfma_f32_32x32x2bf16", + "llvm.amdgcn.mfma.f32.32x32x2f32" => "__builtin_amdgcn_mfma_f32_32x32x2f32", + "llvm.amdgcn.mfma.f32.32x32x4.xf32" => "__builtin_amdgcn_mfma_f32_32x32x4_xf32", + "llvm.amdgcn.mfma.f32.32x32x4bf16" => "__builtin_amdgcn_mfma_f32_32x32x4bf16", + "llvm.amdgcn.mfma.f32.32x32x4bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x4bf16_1k", + "llvm.amdgcn.mfma.f32.32x32x4f16" => "__builtin_amdgcn_mfma_f32_32x32x4f16", + "llvm.amdgcn.mfma.f32.32x32x8bf16.1k" => "__builtin_amdgcn_mfma_f32_32x32x8bf16_1k", + "llvm.amdgcn.mfma.f32.32x32x8f16" => "__builtin_amdgcn_mfma_f32_32x32x8f16", + "llvm.amdgcn.mfma.f32.4x4x1f32" => "__builtin_amdgcn_mfma_f32_4x4x1f32", + "llvm.amdgcn.mfma.f32.4x4x2bf16" => "__builtin_amdgcn_mfma_f32_4x4x2bf16", + "llvm.amdgcn.mfma.f32.4x4x4bf16.1k" => "__builtin_amdgcn_mfma_f32_4x4x4bf16_1k", + "llvm.amdgcn.mfma.f32.4x4x4f16" => "__builtin_amdgcn_mfma_f32_4x4x4f16", + "llvm.amdgcn.mfma.f64.16x16x4f64" => "__builtin_amdgcn_mfma_f64_16x16x4f64", + "llvm.amdgcn.mfma.f64.4x4x4f64" => "__builtin_amdgcn_mfma_f64_4x4x4f64", + "llvm.amdgcn.mfma.i32.16x16x16i8" => "__builtin_amdgcn_mfma_i32_16x16x16i8", + "llvm.amdgcn.mfma.i32.16x16x32.i8" => "__builtin_amdgcn_mfma_i32_16x16x32_i8", + "llvm.amdgcn.mfma.i32.16x16x4i8" => "__builtin_amdgcn_mfma_i32_16x16x4i8", + "llvm.amdgcn.mfma.i32.32x32x16.i8" => "__builtin_amdgcn_mfma_i32_32x32x16_i8", + "llvm.amdgcn.mfma.i32.32x32x4i8" => "__builtin_amdgcn_mfma_i32_32x32x4i8", + "llvm.amdgcn.mfma.i32.32x32x8i8" => "__builtin_amdgcn_mfma_i32_32x32x8i8", + "llvm.amdgcn.mfma.i32.4x4x4i8" => "__builtin_amdgcn_mfma_i32_4x4x4i8", "llvm.amdgcn.mqsad.pk.u16.u8" => "__builtin_amdgcn_mqsad_pk_u16_u8", "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8", "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8", "llvm.amdgcn.perm" => "__builtin_amdgcn_perm", "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16", + "llvm.amdgcn.permlane64" => "__builtin_amdgcn_permlane64", "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16", "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8", "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr", @@ -122,19 +176,40 @@ match name { "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio", "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg", "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep", + "llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready", "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt", "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8", "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16", "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8", "llvm.amdgcn.sched.barrier" => "__builtin_amdgcn_sched_barrier", + "llvm.amdgcn.sched.group.barrier" => "__builtin_amdgcn_sched_group_barrier", "llvm.amdgcn.sdot2" => "__builtin_amdgcn_sdot2", "llvm.amdgcn.sdot4" => "__builtin_amdgcn_sdot4", "llvm.amdgcn.sdot8" => "__builtin_amdgcn_sdot8", + "llvm.amdgcn.smfmac.f32.16x16x32.bf16" => "__builtin_amdgcn_smfmac_f32_16x16x32_bf16", + "llvm.amdgcn.smfmac.f32.16x16x32.f16" => "__builtin_amdgcn_smfmac_f32_16x16x32_f16", + "llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_bf8", + "llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_bf8_fp8", + "llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_bf8", + "llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_16x16x64_fp8_fp8", + "llvm.amdgcn.smfmac.f32.32x32x16.bf16" => "__builtin_amdgcn_smfmac_f32_32x32x16_bf16", + "llvm.amdgcn.smfmac.f32.32x32x16.f16" => "__builtin_amdgcn_smfmac_f32_32x32x16_f16", + "llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_bf8", + "llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_bf8_fp8", + "llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_bf8", + "llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8" => "__builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8", + "llvm.amdgcn.smfmac.i32.16x16x64.i8" => "__builtin_amdgcn_smfmac_i32_16x16x64_i8", + "llvm.amdgcn.smfmac.i32.32x32x32.i8" => "__builtin_amdgcn_smfmac_i32_32x32x32_i8", + "llvm.amdgcn.sudot4" => "__builtin_amdgcn_sudot4", + "llvm.amdgcn.sudot8" => "__builtin_amdgcn_sudot8", "llvm.amdgcn.udot2" => "__builtin_amdgcn_udot2", "llvm.amdgcn.udot4" => "__builtin_amdgcn_udot4", "llvm.amdgcn.udot8" => "__builtin_amdgcn_udot8", "llvm.amdgcn.wave.barrier" => "__builtin_amdgcn_wave_barrier", "llvm.amdgcn.wavefrontsize" => "__builtin_amdgcn_wavefrontsize", + "llvm.amdgcn.workgroup.id.x" => "__builtin_amdgcn_workgroup_id_x", + "llvm.amdgcn.workgroup.id.y" => "__builtin_amdgcn_workgroup_id_y", + "llvm.amdgcn.workgroup.id.z" => "__builtin_amdgcn_workgroup_id_z", "llvm.amdgcn.writelane" => "__builtin_amdgcn_writelane", // arm "llvm.arm.cdp" => "__builtin_arm_cdp", @@ -249,6 +324,8 @@ match name { "llvm.bpf.pseudo" => "__builtin_bpf_pseudo", // cuda "llvm.cuda.syncthreads" => "__syncthreads", + // dx + "llvm.dx.create.handle" => "__builtin_hlsl_create_handle", // hexagon "llvm.hexagon.A2.abs" => "__builtin_HEXAGON_A2_abs", "llvm.hexagon.A2.absp" => "__builtin_HEXAGON_A2_absp", @@ -459,6 +536,11 @@ match name { "llvm.hexagon.A4.vrminuw" => "__builtin_HEXAGON_A4_vrminuw", "llvm.hexagon.A4.vrminw" => "__builtin_HEXAGON_A4_vrminw", "llvm.hexagon.A5.vaddhubs" => "__builtin_HEXAGON_A5_vaddhubs", + "llvm.hexagon.A6.vcmpbeq.notany" => "__builtin_HEXAGON_A6_vcmpbeq_notany", + "llvm.hexagon.A7.clip" => "__builtin_HEXAGON_A7_clip", + "llvm.hexagon.A7.croundd.ri" => "__builtin_HEXAGON_A7_croundd_ri", + "llvm.hexagon.A7.croundd.rr" => "__builtin_HEXAGON_A7_croundd_rr", + "llvm.hexagon.A7.vclip" => "__builtin_HEXAGON_A7_vclip", "llvm.hexagon.C2.all8" => "__builtin_HEXAGON_C2_all8", "llvm.hexagon.C2.and" => "__builtin_HEXAGON_C2_and", "llvm.hexagon.C2.andn" => "__builtin_HEXAGON_C2_andn", @@ -557,6 +639,10 @@ match name { "llvm.hexagon.F2.dfmax" => "__builtin_HEXAGON_F2_dfmax", "llvm.hexagon.F2.dfmin" => "__builtin_HEXAGON_F2_dfmin", "llvm.hexagon.F2.dfmpy" => "__builtin_HEXAGON_F2_dfmpy", + "llvm.hexagon.F2.dfmpyfix" => "__builtin_HEXAGON_F2_dfmpyfix", + "llvm.hexagon.F2.dfmpyhh" => "__builtin_HEXAGON_F2_dfmpyhh", + "llvm.hexagon.F2.dfmpylh" => "__builtin_HEXAGON_F2_dfmpylh", + "llvm.hexagon.F2.dfmpyll" => "__builtin_HEXAGON_F2_dfmpyll", "llvm.hexagon.F2.dfsub" => "__builtin_HEXAGON_F2_dfsub", "llvm.hexagon.F2.sfadd" => "__builtin_HEXAGON_F2_sfadd", "llvm.hexagon.F2.sfclass" => "__builtin_HEXAGON_F2_sfclass", @@ -578,6 +664,8 @@ match name { "llvm.hexagon.F2.sfmin" => "__builtin_HEXAGON_F2_sfmin", "llvm.hexagon.F2.sfmpy" => "__builtin_HEXAGON_F2_sfmpy", "llvm.hexagon.F2.sfsub" => "__builtin_HEXAGON_F2_sfsub", + "llvm.hexagon.L2.loadw.locked" => "__builtin_HEXAGON_L2_loadw_locked", + "llvm.hexagon.L4.loadd.locked" => "__builtin__HEXAGON_L4_loadd_locked", "llvm.hexagon.M2.acci" => "__builtin_HEXAGON_M2_acci", "llvm.hexagon.M2.accii" => "__builtin_HEXAGON_M2_accii", "llvm.hexagon.M2.cmaci.s0" => "__builtin_HEXAGON_M2_cmaci_s0", @@ -646,6 +734,7 @@ match name { "llvm.hexagon.M2.mmpyul.rs1" => "__builtin_HEXAGON_M2_mmpyul_rs1", "llvm.hexagon.M2.mmpyul.s0" => "__builtin_HEXAGON_M2_mmpyul_s0", "llvm.hexagon.M2.mmpyul.s1" => "__builtin_HEXAGON_M2_mmpyul_s1", + "llvm.hexagon.M2.mnaci" => "__builtin_HEXAGON_M2_mnaci", "llvm.hexagon.M2.mpy.acc.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_hh_s0", "llvm.hexagon.M2.mpy.acc.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_hh_s1", "llvm.hexagon.M2.mpy.acc.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_hl_s0", @@ -894,6 +983,24 @@ match name { "llvm.hexagon.M5.vrmpybuu" => "__builtin_HEXAGON_M5_vrmpybuu", "llvm.hexagon.M6.vabsdiffb" => "__builtin_HEXAGON_M6_vabsdiffb", "llvm.hexagon.M6.vabsdiffub" => "__builtin_HEXAGON_M6_vabsdiffub", + "llvm.hexagon.M7.dcmpyiw" => "__builtin_HEXAGON_M7_dcmpyiw", + "llvm.hexagon.M7.dcmpyiw.acc" => "__builtin_HEXAGON_M7_dcmpyiw_acc", + "llvm.hexagon.M7.dcmpyiwc" => "__builtin_HEXAGON_M7_dcmpyiwc", + "llvm.hexagon.M7.dcmpyiwc.acc" => "__builtin_HEXAGON_M7_dcmpyiwc_acc", + "llvm.hexagon.M7.dcmpyrw" => "__builtin_HEXAGON_M7_dcmpyrw", + "llvm.hexagon.M7.dcmpyrw.acc" => "__builtin_HEXAGON_M7_dcmpyrw_acc", + "llvm.hexagon.M7.dcmpyrwc" => "__builtin_HEXAGON_M7_dcmpyrwc", + "llvm.hexagon.M7.dcmpyrwc.acc" => "__builtin_HEXAGON_M7_dcmpyrwc_acc", + "llvm.hexagon.M7.vdmpy" => "__builtin_HEXAGON_M7_vdmpy", + "llvm.hexagon.M7.vdmpy.acc" => "__builtin_HEXAGON_M7_vdmpy_acc", + "llvm.hexagon.M7.wcmpyiw" => "__builtin_HEXAGON_M7_wcmpyiw", + "llvm.hexagon.M7.wcmpyiw.rnd" => "__builtin_HEXAGON_M7_wcmpyiw_rnd", + "llvm.hexagon.M7.wcmpyiwc" => "__builtin_HEXAGON_M7_wcmpyiwc", + "llvm.hexagon.M7.wcmpyiwc.rnd" => "__builtin_HEXAGON_M7_wcmpyiwc_rnd", + "llvm.hexagon.M7.wcmpyrw" => "__builtin_HEXAGON_M7_wcmpyrw", + "llvm.hexagon.M7.wcmpyrw.rnd" => "__builtin_HEXAGON_M7_wcmpyrw_rnd", + "llvm.hexagon.M7.wcmpyrwc" => "__builtin_HEXAGON_M7_wcmpyrwc", + "llvm.hexagon.M7.wcmpyrwc.rnd" => "__builtin_HEXAGON_M7_wcmpyrwc_rnd", "llvm.hexagon.S2.addasl.rrri" => "__builtin_HEXAGON_S2_addasl_rrri", "llvm.hexagon.S2.asl.i.p" => "__builtin_HEXAGON_S2_asl_i_p", "llvm.hexagon.S2.asl.i.p.acc" => "__builtin_HEXAGON_S2_asl_i_p_acc", @@ -1023,6 +1130,7 @@ match name { "llvm.hexagon.S2.lsr.r.r.or" => "__builtin_HEXAGON_S2_lsr_r_r_or", "llvm.hexagon.S2.lsr.r.vh" => "__builtin_HEXAGON_S2_lsr_r_vh", "llvm.hexagon.S2.lsr.r.vw" => "__builtin_HEXAGON_S2_lsr_r_vw", + "llvm.hexagon.S2.mask" => "__builtin_HEXAGON_S2_mask", "llvm.hexagon.S2.packhl" => "__builtin_HEXAGON_S2_packhl", "llvm.hexagon.S2.parityp" => "__builtin_HEXAGON_S2_parityp", "llvm.hexagon.S2.setbit.i" => "__builtin_HEXAGON_S2_setbit_i", @@ -1031,6 +1139,12 @@ match name { "llvm.hexagon.S2.shuffeh" => "__builtin_HEXAGON_S2_shuffeh", "llvm.hexagon.S2.shuffob" => "__builtin_HEXAGON_S2_shuffob", "llvm.hexagon.S2.shuffoh" => "__builtin_HEXAGON_S2_shuffoh", + "llvm.hexagon.S2.storerb.pbr" => "__builtin_brev_stb", + "llvm.hexagon.S2.storerd.pbr" => "__builtin_brev_std", + "llvm.hexagon.S2.storerf.pbr" => "__builtin_brev_sthhi", + "llvm.hexagon.S2.storerh.pbr" => "__builtin_brev_sth", + "llvm.hexagon.S2.storeri.pbr" => "__builtin_brev_stw", + "llvm.hexagon.S2.storew.locked" => "__builtin_HEXAGON_S2_storew_locked", "llvm.hexagon.S2.svsathb" => "__builtin_HEXAGON_S2_svsathb", "llvm.hexagon.S2.svsathub" => "__builtin_HEXAGON_S2_svsathub", "llvm.hexagon.S2.tableidxb.goodsyntax" => "__builtin_HEXAGON_S2_tableidxb_goodsyntax", @@ -1089,6 +1203,7 @@ match name { "llvm.hexagon.S4.ori.asl.ri" => "__builtin_HEXAGON_S4_ori_asl_ri", "llvm.hexagon.S4.ori.lsr.ri" => "__builtin_HEXAGON_S4_ori_lsr_ri", "llvm.hexagon.S4.parity" => "__builtin_HEXAGON_S4_parity", + "llvm.hexagon.S4.stored.locked" => "__builtin_HEXAGON_S4_stored_locked", "llvm.hexagon.S4.subaddi" => "__builtin_HEXAGON_S4_subaddi", "llvm.hexagon.S4.subi.asl.ri" => "__builtin_HEXAGON_S4_subi_asl_ri", "llvm.hexagon.S4.subi.lsr.ri" => "__builtin_HEXAGON_S4_subi_lsr_ri", @@ -1126,8 +1241,56 @@ match name { "llvm.hexagon.V6.hi.128B" => "__builtin_HEXAGON_V6_hi_128B", "llvm.hexagon.V6.lo" => "__builtin_HEXAGON_V6_lo", "llvm.hexagon.V6.lo.128B" => "__builtin_HEXAGON_V6_lo_128B", + "llvm.hexagon.V6.lvsplatb" => "__builtin_HEXAGON_V6_lvsplatb", + "llvm.hexagon.V6.lvsplatb.128B" => "__builtin_HEXAGON_V6_lvsplatb_128B", + "llvm.hexagon.V6.lvsplath" => "__builtin_HEXAGON_V6_lvsplath", + "llvm.hexagon.V6.lvsplath.128B" => "__builtin_HEXAGON_V6_lvsplath_128B", "llvm.hexagon.V6.lvsplatw" => "__builtin_HEXAGON_V6_lvsplatw", "llvm.hexagon.V6.lvsplatw.128B" => "__builtin_HEXAGON_V6_lvsplatw_128B", + "llvm.hexagon.V6.pred.and" => "__builtin_HEXAGON_V6_pred_and", + "llvm.hexagon.V6.pred.and.128B" => "__builtin_HEXAGON_V6_pred_and_128B", + "llvm.hexagon.V6.pred.and.n" => "__builtin_HEXAGON_V6_pred_and_n", + "llvm.hexagon.V6.pred.and.n.128B" => "__builtin_HEXAGON_V6_pred_and_n_128B", + "llvm.hexagon.V6.pred.not" => "__builtin_HEXAGON_V6_pred_not", + "llvm.hexagon.V6.pred.not.128B" => "__builtin_HEXAGON_V6_pred_not_128B", + "llvm.hexagon.V6.pred.or" => "__builtin_HEXAGON_V6_pred_or", + "llvm.hexagon.V6.pred.or.128B" => "__builtin_HEXAGON_V6_pred_or_128B", + "llvm.hexagon.V6.pred.or.n" => "__builtin_HEXAGON_V6_pred_or_n", + "llvm.hexagon.V6.pred.or.n.128B" => "__builtin_HEXAGON_V6_pred_or_n_128B", + "llvm.hexagon.V6.pred.scalar2" => "__builtin_HEXAGON_V6_pred_scalar2", + "llvm.hexagon.V6.pred.scalar2.128B" => "__builtin_HEXAGON_V6_pred_scalar2_128B", + "llvm.hexagon.V6.pred.scalar2v2" => "__builtin_HEXAGON_V6_pred_scalar2v2", + "llvm.hexagon.V6.pred.scalar2v2.128B" => "__builtin_HEXAGON_V6_pred_scalar2v2_128B", + "llvm.hexagon.V6.pred.xor" => "__builtin_HEXAGON_V6_pred_xor", + "llvm.hexagon.V6.pred.xor.128B" => "__builtin_HEXAGON_V6_pred_xor_128B", + "llvm.hexagon.V6.shuffeqh" => "__builtin_HEXAGON_V6_shuffeqh", + "llvm.hexagon.V6.shuffeqh.128B" => "__builtin_HEXAGON_V6_shuffeqh_128B", + "llvm.hexagon.V6.shuffeqw" => "__builtin_HEXAGON_V6_shuffeqw", + "llvm.hexagon.V6.shuffeqw.128B" => "__builtin_HEXAGON_V6_shuffeqw_128B", + "llvm.hexagon.V6.v6mpyhubs10" => "__builtin_HEXAGON_V6_v6mpyhubs10", + "llvm.hexagon.V6.v6mpyhubs10.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_128B", + "llvm.hexagon.V6.v6mpyhubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx", + "llvm.hexagon.V6.v6mpyhubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B", + "llvm.hexagon.V6.v6mpyvubs10" => "__builtin_HEXAGON_V6_v6mpyvubs10", + "llvm.hexagon.V6.v6mpyvubs10.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_128B", + "llvm.hexagon.V6.v6mpyvubs10.vxx" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx", + "llvm.hexagon.V6.v6mpyvubs10.vxx.128B" => "__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B", + "llvm.hexagon.V6.vS32b.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai", + "llvm.hexagon.V6.vS32b.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B", + "llvm.hexagon.V6.vS32b.nt.nqpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai", + "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B", + "llvm.hexagon.V6.vS32b.nt.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai", + "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B", + "llvm.hexagon.V6.vS32b.qpred.ai" => "__builtin_HEXAGON_V6_vS32b_qpred_ai", + "llvm.hexagon.V6.vS32b.qpred.ai.128B" => "__builtin_HEXAGON_V6_vS32b_qpred_ai_128B", + "llvm.hexagon.V6.vabs.hf" => "__builtin_HEXAGON_V6_vabs_hf", + "llvm.hexagon.V6.vabs.hf.128B" => "__builtin_HEXAGON_V6_vabs_hf_128B", + "llvm.hexagon.V6.vabs.sf" => "__builtin_HEXAGON_V6_vabs_sf", + "llvm.hexagon.V6.vabs.sf.128B" => "__builtin_HEXAGON_V6_vabs_sf_128B", + "llvm.hexagon.V6.vabsb" => "__builtin_HEXAGON_V6_vabsb", + "llvm.hexagon.V6.vabsb.128B" => "__builtin_HEXAGON_V6_vabsb_128B", + "llvm.hexagon.V6.vabsb.sat" => "__builtin_HEXAGON_V6_vabsb_sat", + "llvm.hexagon.V6.vabsb.sat.128B" => "__builtin_HEXAGON_V6_vabsb_sat_128B", "llvm.hexagon.V6.vabsdiffh" => "__builtin_HEXAGON_V6_vabsdiffh", "llvm.hexagon.V6.vabsdiffh.128B" => "__builtin_HEXAGON_V6_vabsdiffh_128B", "llvm.hexagon.V6.vabsdiffub" => "__builtin_HEXAGON_V6_vabsdiffub", @@ -1144,36 +1307,90 @@ match name { "llvm.hexagon.V6.vabsw.128B" => "__builtin_HEXAGON_V6_vabsw_128B", "llvm.hexagon.V6.vabsw.sat" => "__builtin_HEXAGON_V6_vabsw_sat", "llvm.hexagon.V6.vabsw.sat.128B" => "__builtin_HEXAGON_V6_vabsw_sat_128B", + "llvm.hexagon.V6.vadd.hf" => "__builtin_HEXAGON_V6_vadd_hf", + "llvm.hexagon.V6.vadd.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_128B", + "llvm.hexagon.V6.vadd.hf.hf" => "__builtin_HEXAGON_V6_vadd_hf_hf", + "llvm.hexagon.V6.vadd.hf.hf.128B" => "__builtin_HEXAGON_V6_vadd_hf_hf_128B", + "llvm.hexagon.V6.vadd.qf16" => "__builtin_HEXAGON_V6_vadd_qf16", + "llvm.hexagon.V6.vadd.qf16.128B" => "__builtin_HEXAGON_V6_vadd_qf16_128B", + "llvm.hexagon.V6.vadd.qf16.mix" => "__builtin_HEXAGON_V6_vadd_qf16_mix", + "llvm.hexagon.V6.vadd.qf16.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf16_mix_128B", + "llvm.hexagon.V6.vadd.qf32" => "__builtin_HEXAGON_V6_vadd_qf32", + "llvm.hexagon.V6.vadd.qf32.128B" => "__builtin_HEXAGON_V6_vadd_qf32_128B", + "llvm.hexagon.V6.vadd.qf32.mix" => "__builtin_HEXAGON_V6_vadd_qf32_mix", + "llvm.hexagon.V6.vadd.qf32.mix.128B" => "__builtin_HEXAGON_V6_vadd_qf32_mix_128B", + "llvm.hexagon.V6.vadd.sf" => "__builtin_HEXAGON_V6_vadd_sf", + "llvm.hexagon.V6.vadd.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_128B", + "llvm.hexagon.V6.vadd.sf.bf" => "__builtin_HEXAGON_V6_vadd_sf_bf", + "llvm.hexagon.V6.vadd.sf.bf.128B" => "__builtin_HEXAGON_V6_vadd_sf_bf_128B", + "llvm.hexagon.V6.vadd.sf.hf" => "__builtin_HEXAGON_V6_vadd_sf_hf", + "llvm.hexagon.V6.vadd.sf.hf.128B" => "__builtin_HEXAGON_V6_vadd_sf_hf_128B", + "llvm.hexagon.V6.vadd.sf.sf" => "__builtin_HEXAGON_V6_vadd_sf_sf", + "llvm.hexagon.V6.vadd.sf.sf.128B" => "__builtin_HEXAGON_V6_vadd_sf_sf_128B", "llvm.hexagon.V6.vaddb" => "__builtin_HEXAGON_V6_vaddb", "llvm.hexagon.V6.vaddb.128B" => "__builtin_HEXAGON_V6_vaddb_128B", "llvm.hexagon.V6.vaddb.dv" => "__builtin_HEXAGON_V6_vaddb_dv", "llvm.hexagon.V6.vaddb.dv.128B" => "__builtin_HEXAGON_V6_vaddb_dv_128B", + "llvm.hexagon.V6.vaddbnq" => "__builtin_HEXAGON_V6_vaddbnq", + "llvm.hexagon.V6.vaddbnq.128B" => "__builtin_HEXAGON_V6_vaddbnq_128B", + "llvm.hexagon.V6.vaddbq" => "__builtin_HEXAGON_V6_vaddbq", + "llvm.hexagon.V6.vaddbq.128B" => "__builtin_HEXAGON_V6_vaddbq_128B", + "llvm.hexagon.V6.vaddbsat" => "__builtin_HEXAGON_V6_vaddbsat", + "llvm.hexagon.V6.vaddbsat.128B" => "__builtin_HEXAGON_V6_vaddbsat_128B", + "llvm.hexagon.V6.vaddbsat.dv" => "__builtin_HEXAGON_V6_vaddbsat_dv", + "llvm.hexagon.V6.vaddbsat.dv.128B" => "__builtin_HEXAGON_V6_vaddbsat_dv_128B", + "llvm.hexagon.V6.vaddcarrysat" => "__builtin_HEXAGON_V6_vaddcarrysat", + "llvm.hexagon.V6.vaddcarrysat.128B" => "__builtin_HEXAGON_V6_vaddcarrysat_128B", + "llvm.hexagon.V6.vaddclbh" => "__builtin_HEXAGON_V6_vaddclbh", + "llvm.hexagon.V6.vaddclbh.128B" => "__builtin_HEXAGON_V6_vaddclbh_128B", + "llvm.hexagon.V6.vaddclbw" => "__builtin_HEXAGON_V6_vaddclbw", + "llvm.hexagon.V6.vaddclbw.128B" => "__builtin_HEXAGON_V6_vaddclbw_128B", "llvm.hexagon.V6.vaddh" => "__builtin_HEXAGON_V6_vaddh", "llvm.hexagon.V6.vaddh.128B" => "__builtin_HEXAGON_V6_vaddh_128B", "llvm.hexagon.V6.vaddh.dv" => "__builtin_HEXAGON_V6_vaddh_dv", "llvm.hexagon.V6.vaddh.dv.128B" => "__builtin_HEXAGON_V6_vaddh_dv_128B", + "llvm.hexagon.V6.vaddhnq" => "__builtin_HEXAGON_V6_vaddhnq", + "llvm.hexagon.V6.vaddhnq.128B" => "__builtin_HEXAGON_V6_vaddhnq_128B", + "llvm.hexagon.V6.vaddhq" => "__builtin_HEXAGON_V6_vaddhq", + "llvm.hexagon.V6.vaddhq.128B" => "__builtin_HEXAGON_V6_vaddhq_128B", "llvm.hexagon.V6.vaddhsat" => "__builtin_HEXAGON_V6_vaddhsat", "llvm.hexagon.V6.vaddhsat.128B" => "__builtin_HEXAGON_V6_vaddhsat_128B", "llvm.hexagon.V6.vaddhsat.dv" => "__builtin_HEXAGON_V6_vaddhsat_dv", "llvm.hexagon.V6.vaddhsat.dv.128B" => "__builtin_HEXAGON_V6_vaddhsat_dv_128B", "llvm.hexagon.V6.vaddhw" => "__builtin_HEXAGON_V6_vaddhw", "llvm.hexagon.V6.vaddhw.128B" => "__builtin_HEXAGON_V6_vaddhw_128B", + "llvm.hexagon.V6.vaddhw.acc" => "__builtin_HEXAGON_V6_vaddhw_acc", + "llvm.hexagon.V6.vaddhw.acc.128B" => "__builtin_HEXAGON_V6_vaddhw_acc_128B", "llvm.hexagon.V6.vaddubh" => "__builtin_HEXAGON_V6_vaddubh", "llvm.hexagon.V6.vaddubh.128B" => "__builtin_HEXAGON_V6_vaddubh_128B", + "llvm.hexagon.V6.vaddubh.acc" => "__builtin_HEXAGON_V6_vaddubh_acc", + "llvm.hexagon.V6.vaddubh.acc.128B" => "__builtin_HEXAGON_V6_vaddubh_acc_128B", "llvm.hexagon.V6.vaddubsat" => "__builtin_HEXAGON_V6_vaddubsat", "llvm.hexagon.V6.vaddubsat.128B" => "__builtin_HEXAGON_V6_vaddubsat_128B", "llvm.hexagon.V6.vaddubsat.dv" => "__builtin_HEXAGON_V6_vaddubsat_dv", "llvm.hexagon.V6.vaddubsat.dv.128B" => "__builtin_HEXAGON_V6_vaddubsat_dv_128B", + "llvm.hexagon.V6.vaddububb.sat" => "__builtin_HEXAGON_V6_vaddububb_sat", + "llvm.hexagon.V6.vaddububb.sat.128B" => "__builtin_HEXAGON_V6_vaddububb_sat_128B", "llvm.hexagon.V6.vadduhsat" => "__builtin_HEXAGON_V6_vadduhsat", "llvm.hexagon.V6.vadduhsat.128B" => "__builtin_HEXAGON_V6_vadduhsat_128B", "llvm.hexagon.V6.vadduhsat.dv" => "__builtin_HEXAGON_V6_vadduhsat_dv", "llvm.hexagon.V6.vadduhsat.dv.128B" => "__builtin_HEXAGON_V6_vadduhsat_dv_128B", "llvm.hexagon.V6.vadduhw" => "__builtin_HEXAGON_V6_vadduhw", "llvm.hexagon.V6.vadduhw.128B" => "__builtin_HEXAGON_V6_vadduhw_128B", + "llvm.hexagon.V6.vadduhw.acc" => "__builtin_HEXAGON_V6_vadduhw_acc", + "llvm.hexagon.V6.vadduhw.acc.128B" => "__builtin_HEXAGON_V6_vadduhw_acc_128B", + "llvm.hexagon.V6.vadduwsat" => "__builtin_HEXAGON_V6_vadduwsat", + "llvm.hexagon.V6.vadduwsat.128B" => "__builtin_HEXAGON_V6_vadduwsat_128B", + "llvm.hexagon.V6.vadduwsat.dv" => "__builtin_HEXAGON_V6_vadduwsat_dv", + "llvm.hexagon.V6.vadduwsat.dv.128B" => "__builtin_HEXAGON_V6_vadduwsat_dv_128B", "llvm.hexagon.V6.vaddw" => "__builtin_HEXAGON_V6_vaddw", "llvm.hexagon.V6.vaddw.128B" => "__builtin_HEXAGON_V6_vaddw_128B", "llvm.hexagon.V6.vaddw.dv" => "__builtin_HEXAGON_V6_vaddw_dv", "llvm.hexagon.V6.vaddw.dv.128B" => "__builtin_HEXAGON_V6_vaddw_dv_128B", + "llvm.hexagon.V6.vaddwnq" => "__builtin_HEXAGON_V6_vaddwnq", + "llvm.hexagon.V6.vaddwnq.128B" => "__builtin_HEXAGON_V6_vaddwnq_128B", + "llvm.hexagon.V6.vaddwq" => "__builtin_HEXAGON_V6_vaddwq", + "llvm.hexagon.V6.vaddwq.128B" => "__builtin_HEXAGON_V6_vaddwq_128B", "llvm.hexagon.V6.vaddwsat" => "__builtin_HEXAGON_V6_vaddwsat", "llvm.hexagon.V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B", "llvm.hexagon.V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv", @@ -1184,8 +1401,26 @@ match name { "llvm.hexagon.V6.valignbi.128B" => "__builtin_HEXAGON_V6_valignbi_128B", "llvm.hexagon.V6.vand" => "__builtin_HEXAGON_V6_vand", "llvm.hexagon.V6.vand.128B" => "__builtin_HEXAGON_V6_vand_128B", + "llvm.hexagon.V6.vandnqrt" => "__builtin_HEXAGON_V6_vandnqrt", + "llvm.hexagon.V6.vandnqrt.128B" => "__builtin_HEXAGON_V6_vandnqrt_128B", + "llvm.hexagon.V6.vandnqrt.acc" => "__builtin_HEXAGON_V6_vandnqrt_acc", + "llvm.hexagon.V6.vandnqrt.acc.128B" => "__builtin_HEXAGON_V6_vandnqrt_acc_128B", + "llvm.hexagon.V6.vandqrt" => "__builtin_HEXAGON_V6_vandqrt", + "llvm.hexagon.V6.vandqrt.128B" => "__builtin_HEXAGON_V6_vandqrt_128B", + "llvm.hexagon.V6.vandqrt.acc" => "__builtin_HEXAGON_V6_vandqrt_acc", + "llvm.hexagon.V6.vandqrt.acc.128B" => "__builtin_HEXAGON_V6_vandqrt_acc_128B", + "llvm.hexagon.V6.vandvnqv" => "__builtin_HEXAGON_V6_vandvnqv", + "llvm.hexagon.V6.vandvnqv.128B" => "__builtin_HEXAGON_V6_vandvnqv_128B", + "llvm.hexagon.V6.vandvqv" => "__builtin_HEXAGON_V6_vandvqv", + "llvm.hexagon.V6.vandvqv.128B" => "__builtin_HEXAGON_V6_vandvqv_128B", + "llvm.hexagon.V6.vandvrt" => "__builtin_HEXAGON_V6_vandvrt", + "llvm.hexagon.V6.vandvrt.128B" => "__builtin_HEXAGON_V6_vandvrt_128B", + "llvm.hexagon.V6.vandvrt.acc" => "__builtin_HEXAGON_V6_vandvrt_acc", + "llvm.hexagon.V6.vandvrt.acc.128B" => "__builtin_HEXAGON_V6_vandvrt_acc_128B", "llvm.hexagon.V6.vaslh" => "__builtin_HEXAGON_V6_vaslh", "llvm.hexagon.V6.vaslh.128B" => "__builtin_HEXAGON_V6_vaslh_128B", + "llvm.hexagon.V6.vaslh.acc" => "__builtin_HEXAGON_V6_vaslh_acc", + "llvm.hexagon.V6.vaslh.acc.128B" => "__builtin_HEXAGON_V6_vaslh_acc_128B", "llvm.hexagon.V6.vaslhv" => "__builtin_HEXAGON_V6_vaslhv", "llvm.hexagon.V6.vaslhv.128B" => "__builtin_HEXAGON_V6_vaslhv_128B", "llvm.hexagon.V6.vaslw" => "__builtin_HEXAGON_V6_vaslw", @@ -1194,16 +1429,38 @@ match name { "llvm.hexagon.V6.vaslw.acc.128B" => "__builtin_HEXAGON_V6_vaslw_acc_128B", "llvm.hexagon.V6.vaslwv" => "__builtin_HEXAGON_V6_vaslwv", "llvm.hexagon.V6.vaslwv.128B" => "__builtin_HEXAGON_V6_vaslwv_128B", + "llvm.hexagon.V6.vasr.into" => "__builtin_HEXAGON_V6_vasr_into", + "llvm.hexagon.V6.vasr.into.128B" => "__builtin_HEXAGON_V6_vasr_into_128B", "llvm.hexagon.V6.vasrh" => "__builtin_HEXAGON_V6_vasrh", "llvm.hexagon.V6.vasrh.128B" => "__builtin_HEXAGON_V6_vasrh_128B", + "llvm.hexagon.V6.vasrh.acc" => "__builtin_HEXAGON_V6_vasrh_acc", + "llvm.hexagon.V6.vasrh.acc.128B" => "__builtin_HEXAGON_V6_vasrh_acc_128B", "llvm.hexagon.V6.vasrhbrndsat" => "__builtin_HEXAGON_V6_vasrhbrndsat", "llvm.hexagon.V6.vasrhbrndsat.128B" => "__builtin_HEXAGON_V6_vasrhbrndsat_128B", + "llvm.hexagon.V6.vasrhbsat" => "__builtin_HEXAGON_V6_vasrhbsat", + "llvm.hexagon.V6.vasrhbsat.128B" => "__builtin_HEXAGON_V6_vasrhbsat_128B", "llvm.hexagon.V6.vasrhubrndsat" => "__builtin_HEXAGON_V6_vasrhubrndsat", "llvm.hexagon.V6.vasrhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrhubrndsat_128B", "llvm.hexagon.V6.vasrhubsat" => "__builtin_HEXAGON_V6_vasrhubsat", "llvm.hexagon.V6.vasrhubsat.128B" => "__builtin_HEXAGON_V6_vasrhubsat_128B", "llvm.hexagon.V6.vasrhv" => "__builtin_HEXAGON_V6_vasrhv", "llvm.hexagon.V6.vasrhv.128B" => "__builtin_HEXAGON_V6_vasrhv_128B", + "llvm.hexagon.V6.vasruhubrndsat" => "__builtin_HEXAGON_V6_vasruhubrndsat", + "llvm.hexagon.V6.vasruhubrndsat.128B" => "__builtin_HEXAGON_V6_vasruhubrndsat_128B", + "llvm.hexagon.V6.vasruhubsat" => "__builtin_HEXAGON_V6_vasruhubsat", + "llvm.hexagon.V6.vasruhubsat.128B" => "__builtin_HEXAGON_V6_vasruhubsat_128B", + "llvm.hexagon.V6.vasruwuhrndsat" => "__builtin_HEXAGON_V6_vasruwuhrndsat", + "llvm.hexagon.V6.vasruwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasruwuhrndsat_128B", + "llvm.hexagon.V6.vasruwuhsat" => "__builtin_HEXAGON_V6_vasruwuhsat", + "llvm.hexagon.V6.vasruwuhsat.128B" => "__builtin_HEXAGON_V6_vasruwuhsat_128B", + "llvm.hexagon.V6.vasrvuhubrndsat" => "__builtin_HEXAGON_V6_vasrvuhubrndsat", + "llvm.hexagon.V6.vasrvuhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubrndsat_128B", + "llvm.hexagon.V6.vasrvuhubsat" => "__builtin_HEXAGON_V6_vasrvuhubsat", + "llvm.hexagon.V6.vasrvuhubsat.128B" => "__builtin_HEXAGON_V6_vasrvuhubsat_128B", + "llvm.hexagon.V6.vasrvwuhrndsat" => "__builtin_HEXAGON_V6_vasrvwuhrndsat", + "llvm.hexagon.V6.vasrvwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhrndsat_128B", + "llvm.hexagon.V6.vasrvwuhsat" => "__builtin_HEXAGON_V6_vasrvwuhsat", + "llvm.hexagon.V6.vasrvwuhsat.128B" => "__builtin_HEXAGON_V6_vasrvwuhsat_128B", "llvm.hexagon.V6.vasrw" => "__builtin_HEXAGON_V6_vasrw", "llvm.hexagon.V6.vasrw.128B" => "__builtin_HEXAGON_V6_vasrw_128B", "llvm.hexagon.V6.vasrw.acc" => "__builtin_HEXAGON_V6_vasrw_acc", @@ -1214,14 +1471,22 @@ match name { "llvm.hexagon.V6.vasrwhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwhrndsat_128B", "llvm.hexagon.V6.vasrwhsat" => "__builtin_HEXAGON_V6_vasrwhsat", "llvm.hexagon.V6.vasrwhsat.128B" => "__builtin_HEXAGON_V6_vasrwhsat_128B", + "llvm.hexagon.V6.vasrwuhrndsat" => "__builtin_HEXAGON_V6_vasrwuhrndsat", + "llvm.hexagon.V6.vasrwuhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwuhrndsat_128B", "llvm.hexagon.V6.vasrwuhsat" => "__builtin_HEXAGON_V6_vasrwuhsat", "llvm.hexagon.V6.vasrwuhsat.128B" => "__builtin_HEXAGON_V6_vasrwuhsat_128B", "llvm.hexagon.V6.vasrwv" => "__builtin_HEXAGON_V6_vasrwv", "llvm.hexagon.V6.vasrwv.128B" => "__builtin_HEXAGON_V6_vasrwv_128B", "llvm.hexagon.V6.vassign" => "__builtin_HEXAGON_V6_vassign", "llvm.hexagon.V6.vassign.128B" => "__builtin_HEXAGON_V6_vassign_128B", + "llvm.hexagon.V6.vassign.fp" => "__builtin_HEXAGON_V6_vassign_fp", + "llvm.hexagon.V6.vassign.fp.128B" => "__builtin_HEXAGON_V6_vassign_fp_128B", "llvm.hexagon.V6.vassignp" => "__builtin_HEXAGON_V6_vassignp", "llvm.hexagon.V6.vassignp.128B" => "__builtin_HEXAGON_V6_vassignp_128B", + "llvm.hexagon.V6.vavgb" => "__builtin_HEXAGON_V6_vavgb", + "llvm.hexagon.V6.vavgb.128B" => "__builtin_HEXAGON_V6_vavgb_128B", + "llvm.hexagon.V6.vavgbrnd" => "__builtin_HEXAGON_V6_vavgbrnd", + "llvm.hexagon.V6.vavgbrnd.128B" => "__builtin_HEXAGON_V6_vavgbrnd_128B", "llvm.hexagon.V6.vavgh" => "__builtin_HEXAGON_V6_vavgh", "llvm.hexagon.V6.vavgh.128B" => "__builtin_HEXAGON_V6_vavgh_128B", "llvm.hexagon.V6.vavghrnd" => "__builtin_HEXAGON_V6_vavghrnd", @@ -1234,6 +1499,10 @@ match name { "llvm.hexagon.V6.vavguh.128B" => "__builtin_HEXAGON_V6_vavguh_128B", "llvm.hexagon.V6.vavguhrnd" => "__builtin_HEXAGON_V6_vavguhrnd", "llvm.hexagon.V6.vavguhrnd.128B" => "__builtin_HEXAGON_V6_vavguhrnd_128B", + "llvm.hexagon.V6.vavguw" => "__builtin_HEXAGON_V6_vavguw", + "llvm.hexagon.V6.vavguw.128B" => "__builtin_HEXAGON_V6_vavguw_128B", + "llvm.hexagon.V6.vavguwrnd" => "__builtin_HEXAGON_V6_vavguwrnd", + "llvm.hexagon.V6.vavguwrnd.128B" => "__builtin_HEXAGON_V6_vavguwrnd_128B", "llvm.hexagon.V6.vavgw" => "__builtin_HEXAGON_V6_vavgw", "llvm.hexagon.V6.vavgw.128B" => "__builtin_HEXAGON_V6_vavgw_128B", "llvm.hexagon.V6.vavgwrnd" => "__builtin_HEXAGON_V6_vavgwrnd", @@ -1244,8 +1513,46 @@ match name { "llvm.hexagon.V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B", "llvm.hexagon.V6.vcombine" => "__builtin_HEXAGON_V6_vcombine", "llvm.hexagon.V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B", + "llvm.hexagon.V6.vconv.h.hf" => "__builtin_HEXAGON_V6_vconv_h_hf", + "llvm.hexagon.V6.vconv.h.hf.128B" => "__builtin_HEXAGON_V6_vconv_h_hf_128B", + "llvm.hexagon.V6.vconv.hf.h" => "__builtin_HEXAGON_V6_vconv_hf_h", + "llvm.hexagon.V6.vconv.hf.h.128B" => "__builtin_HEXAGON_V6_vconv_hf_h_128B", + "llvm.hexagon.V6.vconv.hf.qf16" => "__builtin_HEXAGON_V6_vconv_hf_qf16", + "llvm.hexagon.V6.vconv.hf.qf16.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf16_128B", + "llvm.hexagon.V6.vconv.hf.qf32" => "__builtin_HEXAGON_V6_vconv_hf_qf32", + "llvm.hexagon.V6.vconv.hf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_hf_qf32_128B", + "llvm.hexagon.V6.vconv.sf.qf32" => "__builtin_HEXAGON_V6_vconv_sf_qf32", + "llvm.hexagon.V6.vconv.sf.qf32.128B" => "__builtin_HEXAGON_V6_vconv_sf_qf32_128B", + "llvm.hexagon.V6.vconv.sf.w" => "__builtin_HEXAGON_V6_vconv_sf_w", + "llvm.hexagon.V6.vconv.sf.w.128B" => "__builtin_HEXAGON_V6_vconv_sf_w_128B", + "llvm.hexagon.V6.vconv.w.sf" => "__builtin_HEXAGON_V6_vconv_w_sf", + "llvm.hexagon.V6.vconv.w.sf.128B" => "__builtin_HEXAGON_V6_vconv_w_sf_128B", + "llvm.hexagon.V6.vcvt.b.hf" => "__builtin_HEXAGON_V6_vcvt_b_hf", + "llvm.hexagon.V6.vcvt.b.hf.128B" => "__builtin_HEXAGON_V6_vcvt_b_hf_128B", + "llvm.hexagon.V6.vcvt.bf.sf" => "__builtin_HEXAGON_V6_vcvt_bf_sf", + "llvm.hexagon.V6.vcvt.bf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_bf_sf_128B", + "llvm.hexagon.V6.vcvt.h.hf" => "__builtin_HEXAGON_V6_vcvt_h_hf", + "llvm.hexagon.V6.vcvt.h.hf.128B" => "__builtin_HEXAGON_V6_vcvt_h_hf_128B", + "llvm.hexagon.V6.vcvt.hf.b" => "__builtin_HEXAGON_V6_vcvt_hf_b", + "llvm.hexagon.V6.vcvt.hf.b.128B" => "__builtin_HEXAGON_V6_vcvt_hf_b_128B", + "llvm.hexagon.V6.vcvt.hf.h" => "__builtin_HEXAGON_V6_vcvt_hf_h", + "llvm.hexagon.V6.vcvt.hf.h.128B" => "__builtin_HEXAGON_V6_vcvt_hf_h_128B", + "llvm.hexagon.V6.vcvt.hf.sf" => "__builtin_HEXAGON_V6_vcvt_hf_sf", + "llvm.hexagon.V6.vcvt.hf.sf.128B" => "__builtin_HEXAGON_V6_vcvt_hf_sf_128B", + "llvm.hexagon.V6.vcvt.hf.ub" => "__builtin_HEXAGON_V6_vcvt_hf_ub", + "llvm.hexagon.V6.vcvt.hf.ub.128B" => "__builtin_HEXAGON_V6_vcvt_hf_ub_128B", + "llvm.hexagon.V6.vcvt.hf.uh" => "__builtin_HEXAGON_V6_vcvt_hf_uh", + "llvm.hexagon.V6.vcvt.hf.uh.128B" => "__builtin_HEXAGON_V6_vcvt_hf_uh_128B", + "llvm.hexagon.V6.vcvt.sf.hf" => "__builtin_HEXAGON_V6_vcvt_sf_hf", + "llvm.hexagon.V6.vcvt.sf.hf.128B" => "__builtin_HEXAGON_V6_vcvt_sf_hf_128B", + "llvm.hexagon.V6.vcvt.ub.hf" => "__builtin_HEXAGON_V6_vcvt_ub_hf", + "llvm.hexagon.V6.vcvt.ub.hf.128B" => "__builtin_HEXAGON_V6_vcvt_ub_hf_128B", + "llvm.hexagon.V6.vcvt.uh.hf" => "__builtin_HEXAGON_V6_vcvt_uh_hf", + "llvm.hexagon.V6.vcvt.uh.hf.128B" => "__builtin_HEXAGON_V6_vcvt_uh_hf_128B", "llvm.hexagon.V6.vd0" => "__builtin_HEXAGON_V6_vd0", "llvm.hexagon.V6.vd0.128B" => "__builtin_HEXAGON_V6_vd0_128B", + "llvm.hexagon.V6.vdd0" => "__builtin_HEXAGON_V6_vdd0", + "llvm.hexagon.V6.vdd0.128B" => "__builtin_HEXAGON_V6_vdd0_128B", "llvm.hexagon.V6.vdealb" => "__builtin_HEXAGON_V6_vdealb", "llvm.hexagon.V6.vdealb.128B" => "__builtin_HEXAGON_V6_vdealb_128B", "llvm.hexagon.V6.vdealb4w" => "__builtin_HEXAGON_V6_vdealb4w", @@ -1256,6 +1563,10 @@ match name { "llvm.hexagon.V6.vdealvdd.128B" => "__builtin_HEXAGON_V6_vdealvdd_128B", "llvm.hexagon.V6.vdelta" => "__builtin_HEXAGON_V6_vdelta", "llvm.hexagon.V6.vdelta.128B" => "__builtin_HEXAGON_V6_vdelta_128B", + "llvm.hexagon.V6.vdmpy.sf.hf" => "__builtin_HEXAGON_V6_vdmpy_sf_hf", + "llvm.hexagon.V6.vdmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_128B", + "llvm.hexagon.V6.vdmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc", + "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vdmpy_sf_hf_acc_128B", "llvm.hexagon.V6.vdmpybus" => "__builtin_HEXAGON_V6_vdmpybus", "llvm.hexagon.V6.vdmpybus.128B" => "__builtin_HEXAGON_V6_vdmpybus_128B", "llvm.hexagon.V6.vdmpybus.acc" => "__builtin_HEXAGON_V6_vdmpybus_acc", @@ -1296,12 +1607,134 @@ match name { "llvm.hexagon.V6.vdsaduh.128B" => "__builtin_HEXAGON_V6_vdsaduh_128B", "llvm.hexagon.V6.vdsaduh.acc" => "__builtin_HEXAGON_V6_vdsaduh_acc", "llvm.hexagon.V6.vdsaduh.acc.128B" => "__builtin_HEXAGON_V6_vdsaduh_acc_128B", + "llvm.hexagon.V6.veqb" => "__builtin_HEXAGON_V6_veqb", + "llvm.hexagon.V6.veqb.128B" => "__builtin_HEXAGON_V6_veqb_128B", + "llvm.hexagon.V6.veqb.and" => "__builtin_HEXAGON_V6_veqb_and", + "llvm.hexagon.V6.veqb.and.128B" => "__builtin_HEXAGON_V6_veqb_and_128B", + "llvm.hexagon.V6.veqb.or" => "__builtin_HEXAGON_V6_veqb_or", + "llvm.hexagon.V6.veqb.or.128B" => "__builtin_HEXAGON_V6_veqb_or_128B", + "llvm.hexagon.V6.veqb.xor" => "__builtin_HEXAGON_V6_veqb_xor", + "llvm.hexagon.V6.veqb.xor.128B" => "__builtin_HEXAGON_V6_veqb_xor_128B", + "llvm.hexagon.V6.veqh" => "__builtin_HEXAGON_V6_veqh", + "llvm.hexagon.V6.veqh.128B" => "__builtin_HEXAGON_V6_veqh_128B", + "llvm.hexagon.V6.veqh.and" => "__builtin_HEXAGON_V6_veqh_and", + "llvm.hexagon.V6.veqh.and.128B" => "__builtin_HEXAGON_V6_veqh_and_128B", + "llvm.hexagon.V6.veqh.or" => "__builtin_HEXAGON_V6_veqh_or", + "llvm.hexagon.V6.veqh.or.128B" => "__builtin_HEXAGON_V6_veqh_or_128B", + "llvm.hexagon.V6.veqh.xor" => "__builtin_HEXAGON_V6_veqh_xor", + "llvm.hexagon.V6.veqh.xor.128B" => "__builtin_HEXAGON_V6_veqh_xor_128B", + "llvm.hexagon.V6.veqw" => "__builtin_HEXAGON_V6_veqw", + "llvm.hexagon.V6.veqw.128B" => "__builtin_HEXAGON_V6_veqw_128B", + "llvm.hexagon.V6.veqw.and" => "__builtin_HEXAGON_V6_veqw_and", + "llvm.hexagon.V6.veqw.and.128B" => "__builtin_HEXAGON_V6_veqw_and_128B", + "llvm.hexagon.V6.veqw.or" => "__builtin_HEXAGON_V6_veqw_or", + "llvm.hexagon.V6.veqw.or.128B" => "__builtin_HEXAGON_V6_veqw_or_128B", + "llvm.hexagon.V6.veqw.xor" => "__builtin_HEXAGON_V6_veqw_xor", + "llvm.hexagon.V6.veqw.xor.128B" => "__builtin_HEXAGON_V6_veqw_xor_128B", + "llvm.hexagon.V6.vfmax.hf" => "__builtin_HEXAGON_V6_vfmax_hf", + "llvm.hexagon.V6.vfmax.hf.128B" => "__builtin_HEXAGON_V6_vfmax_hf_128B", + "llvm.hexagon.V6.vfmax.sf" => "__builtin_HEXAGON_V6_vfmax_sf", + "llvm.hexagon.V6.vfmax.sf.128B" => "__builtin_HEXAGON_V6_vfmax_sf_128B", + "llvm.hexagon.V6.vfmin.hf" => "__builtin_HEXAGON_V6_vfmin_hf", + "llvm.hexagon.V6.vfmin.hf.128B" => "__builtin_HEXAGON_V6_vfmin_hf_128B", + "llvm.hexagon.V6.vfmin.sf" => "__builtin_HEXAGON_V6_vfmin_sf", + "llvm.hexagon.V6.vfmin.sf.128B" => "__builtin_HEXAGON_V6_vfmin_sf_128B", + "llvm.hexagon.V6.vfneg.hf" => "__builtin_HEXAGON_V6_vfneg_hf", + "llvm.hexagon.V6.vfneg.hf.128B" => "__builtin_HEXAGON_V6_vfneg_hf_128B", + "llvm.hexagon.V6.vfneg.sf" => "__builtin_HEXAGON_V6_vfneg_sf", + "llvm.hexagon.V6.vfneg.sf.128B" => "__builtin_HEXAGON_V6_vfneg_sf_128B", + "llvm.hexagon.V6.vgathermh" => "__builtin_HEXAGON_V6_vgathermh", + "llvm.hexagon.V6.vgathermh.128B" => "__builtin_HEXAGON_V6_vgathermh_128B", + "llvm.hexagon.V6.vgathermhq" => "__builtin_HEXAGON_V6_vgathermhq", + "llvm.hexagon.V6.vgathermhq.128B" => "__builtin_HEXAGON_V6_vgathermhq_128B", + "llvm.hexagon.V6.vgathermhw" => "__builtin_HEXAGON_V6_vgathermhw", + "llvm.hexagon.V6.vgathermhw.128B" => "__builtin_HEXAGON_V6_vgathermhw_128B", + "llvm.hexagon.V6.vgathermhwq" => "__builtin_HEXAGON_V6_vgathermhwq", + "llvm.hexagon.V6.vgathermhwq.128B" => "__builtin_HEXAGON_V6_vgathermhwq_128B", + "llvm.hexagon.V6.vgathermw" => "__builtin_HEXAGON_V6_vgathermw", + "llvm.hexagon.V6.vgathermw.128B" => "__builtin_HEXAGON_V6_vgathermw_128B", + "llvm.hexagon.V6.vgathermwq" => "__builtin_HEXAGON_V6_vgathermwq", + "llvm.hexagon.V6.vgathermwq.128B" => "__builtin_HEXAGON_V6_vgathermwq_128B", + "llvm.hexagon.V6.vgtb" => "__builtin_HEXAGON_V6_vgtb", + "llvm.hexagon.V6.vgtb.128B" => "__builtin_HEXAGON_V6_vgtb_128B", + "llvm.hexagon.V6.vgtb.and" => "__builtin_HEXAGON_V6_vgtb_and", + "llvm.hexagon.V6.vgtb.and.128B" => "__builtin_HEXAGON_V6_vgtb_and_128B", + "llvm.hexagon.V6.vgtb.or" => "__builtin_HEXAGON_V6_vgtb_or", + "llvm.hexagon.V6.vgtb.or.128B" => "__builtin_HEXAGON_V6_vgtb_or_128B", + "llvm.hexagon.V6.vgtb.xor" => "__builtin_HEXAGON_V6_vgtb_xor", + "llvm.hexagon.V6.vgtb.xor.128B" => "__builtin_HEXAGON_V6_vgtb_xor_128B", + "llvm.hexagon.V6.vgtbf" => "__builtin_HEXAGON_V6_vgtbf", + "llvm.hexagon.V6.vgtbf.128B" => "__builtin_HEXAGON_V6_vgtbf_128B", + "llvm.hexagon.V6.vgtbf.and" => "__builtin_HEXAGON_V6_vgtbf_and", + "llvm.hexagon.V6.vgtbf.and.128B" => "__builtin_HEXAGON_V6_vgtbf_and_128B", + "llvm.hexagon.V6.vgtbf.or" => "__builtin_HEXAGON_V6_vgtbf_or", + "llvm.hexagon.V6.vgtbf.or.128B" => "__builtin_HEXAGON_V6_vgtbf_or_128B", + "llvm.hexagon.V6.vgtbf.xor" => "__builtin_HEXAGON_V6_vgtbf_xor", + "llvm.hexagon.V6.vgtbf.xor.128B" => "__builtin_HEXAGON_V6_vgtbf_xor_128B", + "llvm.hexagon.V6.vgth" => "__builtin_HEXAGON_V6_vgth", + "llvm.hexagon.V6.vgth.128B" => "__builtin_HEXAGON_V6_vgth_128B", + "llvm.hexagon.V6.vgth.and" => "__builtin_HEXAGON_V6_vgth_and", + "llvm.hexagon.V6.vgth.and.128B" => "__builtin_HEXAGON_V6_vgth_and_128B", + "llvm.hexagon.V6.vgth.or" => "__builtin_HEXAGON_V6_vgth_or", + "llvm.hexagon.V6.vgth.or.128B" => "__builtin_HEXAGON_V6_vgth_or_128B", + "llvm.hexagon.V6.vgth.xor" => "__builtin_HEXAGON_V6_vgth_xor", + "llvm.hexagon.V6.vgth.xor.128B" => "__builtin_HEXAGON_V6_vgth_xor_128B", + "llvm.hexagon.V6.vgthf" => "__builtin_HEXAGON_V6_vgthf", + "llvm.hexagon.V6.vgthf.128B" => "__builtin_HEXAGON_V6_vgthf_128B", + "llvm.hexagon.V6.vgthf.and" => "__builtin_HEXAGON_V6_vgthf_and", + "llvm.hexagon.V6.vgthf.and.128B" => "__builtin_HEXAGON_V6_vgthf_and_128B", + "llvm.hexagon.V6.vgthf.or" => "__builtin_HEXAGON_V6_vgthf_or", + "llvm.hexagon.V6.vgthf.or.128B" => "__builtin_HEXAGON_V6_vgthf_or_128B", + "llvm.hexagon.V6.vgthf.xor" => "__builtin_HEXAGON_V6_vgthf_xor", + "llvm.hexagon.V6.vgthf.xor.128B" => "__builtin_HEXAGON_V6_vgthf_xor_128B", + "llvm.hexagon.V6.vgtsf" => "__builtin_HEXAGON_V6_vgtsf", + "llvm.hexagon.V6.vgtsf.128B" => "__builtin_HEXAGON_V6_vgtsf_128B", + "llvm.hexagon.V6.vgtsf.and" => "__builtin_HEXAGON_V6_vgtsf_and", + "llvm.hexagon.V6.vgtsf.and.128B" => "__builtin_HEXAGON_V6_vgtsf_and_128B", + "llvm.hexagon.V6.vgtsf.or" => "__builtin_HEXAGON_V6_vgtsf_or", + "llvm.hexagon.V6.vgtsf.or.128B" => "__builtin_HEXAGON_V6_vgtsf_or_128B", + "llvm.hexagon.V6.vgtsf.xor" => "__builtin_HEXAGON_V6_vgtsf_xor", + "llvm.hexagon.V6.vgtsf.xor.128B" => "__builtin_HEXAGON_V6_vgtsf_xor_128B", + "llvm.hexagon.V6.vgtub" => "__builtin_HEXAGON_V6_vgtub", + "llvm.hexagon.V6.vgtub.128B" => "__builtin_HEXAGON_V6_vgtub_128B", + "llvm.hexagon.V6.vgtub.and" => "__builtin_HEXAGON_V6_vgtub_and", + "llvm.hexagon.V6.vgtub.and.128B" => "__builtin_HEXAGON_V6_vgtub_and_128B", + "llvm.hexagon.V6.vgtub.or" => "__builtin_HEXAGON_V6_vgtub_or", + "llvm.hexagon.V6.vgtub.or.128B" => "__builtin_HEXAGON_V6_vgtub_or_128B", + "llvm.hexagon.V6.vgtub.xor" => "__builtin_HEXAGON_V6_vgtub_xor", + "llvm.hexagon.V6.vgtub.xor.128B" => "__builtin_HEXAGON_V6_vgtub_xor_128B", + "llvm.hexagon.V6.vgtuh" => "__builtin_HEXAGON_V6_vgtuh", + "llvm.hexagon.V6.vgtuh.128B" => "__builtin_HEXAGON_V6_vgtuh_128B", + "llvm.hexagon.V6.vgtuh.and" => "__builtin_HEXAGON_V6_vgtuh_and", + "llvm.hexagon.V6.vgtuh.and.128B" => "__builtin_HEXAGON_V6_vgtuh_and_128B", + "llvm.hexagon.V6.vgtuh.or" => "__builtin_HEXAGON_V6_vgtuh_or", + "llvm.hexagon.V6.vgtuh.or.128B" => "__builtin_HEXAGON_V6_vgtuh_or_128B", + "llvm.hexagon.V6.vgtuh.xor" => "__builtin_HEXAGON_V6_vgtuh_xor", + "llvm.hexagon.V6.vgtuh.xor.128B" => "__builtin_HEXAGON_V6_vgtuh_xor_128B", + "llvm.hexagon.V6.vgtuw" => "__builtin_HEXAGON_V6_vgtuw", + "llvm.hexagon.V6.vgtuw.128B" => "__builtin_HEXAGON_V6_vgtuw_128B", + "llvm.hexagon.V6.vgtuw.and" => "__builtin_HEXAGON_V6_vgtuw_and", + "llvm.hexagon.V6.vgtuw.and.128B" => "__builtin_HEXAGON_V6_vgtuw_and_128B", + "llvm.hexagon.V6.vgtuw.or" => "__builtin_HEXAGON_V6_vgtuw_or", + "llvm.hexagon.V6.vgtuw.or.128B" => "__builtin_HEXAGON_V6_vgtuw_or_128B", + "llvm.hexagon.V6.vgtuw.xor" => "__builtin_HEXAGON_V6_vgtuw_xor", + "llvm.hexagon.V6.vgtuw.xor.128B" => "__builtin_HEXAGON_V6_vgtuw_xor_128B", + "llvm.hexagon.V6.vgtw" => "__builtin_HEXAGON_V6_vgtw", + "llvm.hexagon.V6.vgtw.128B" => "__builtin_HEXAGON_V6_vgtw_128B", + "llvm.hexagon.V6.vgtw.and" => "__builtin_HEXAGON_V6_vgtw_and", + "llvm.hexagon.V6.vgtw.and.128B" => "__builtin_HEXAGON_V6_vgtw_and_128B", + "llvm.hexagon.V6.vgtw.or" => "__builtin_HEXAGON_V6_vgtw_or", + "llvm.hexagon.V6.vgtw.or.128B" => "__builtin_HEXAGON_V6_vgtw_or_128B", + "llvm.hexagon.V6.vgtw.xor" => "__builtin_HEXAGON_V6_vgtw_xor", + "llvm.hexagon.V6.vgtw.xor.128B" => "__builtin_HEXAGON_V6_vgtw_xor_128B", "llvm.hexagon.V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr", "llvm.hexagon.V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B", "llvm.hexagon.V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb", "llvm.hexagon.V6.vlalignb.128B" => "__builtin_HEXAGON_V6_vlalignb_128B", "llvm.hexagon.V6.vlalignbi" => "__builtin_HEXAGON_V6_vlalignbi", "llvm.hexagon.V6.vlalignbi.128B" => "__builtin_HEXAGON_V6_vlalignbi_128B", + "llvm.hexagon.V6.vlsrb" => "__builtin_HEXAGON_V6_vlsrb", + "llvm.hexagon.V6.vlsrb.128B" => "__builtin_HEXAGON_V6_vlsrb_128B", "llvm.hexagon.V6.vlsrh" => "__builtin_HEXAGON_V6_vlsrh", "llvm.hexagon.V6.vlsrh.128B" => "__builtin_HEXAGON_V6_vlsrh_128B", "llvm.hexagon.V6.vlsrhv" => "__builtin_HEXAGON_V6_vlsrhv", @@ -1310,6 +1743,8 @@ match name { "llvm.hexagon.V6.vlsrw.128B" => "__builtin_HEXAGON_V6_vlsrw_128B", "llvm.hexagon.V6.vlsrwv" => "__builtin_HEXAGON_V6_vlsrwv", "llvm.hexagon.V6.vlsrwv.128B" => "__builtin_HEXAGON_V6_vlsrwv_128B", + "llvm.hexagon.V6.vlut4" => "__builtin_HEXAGON_V6_vlut4", + "llvm.hexagon.V6.vlut4.128B" => "__builtin_HEXAGON_V6_vlut4_128B", "llvm.hexagon.V6.vlutb" => "__builtin_HEXAGON_V6_vlutb", "llvm.hexagon.V6.vlutb.128B" => "__builtin_HEXAGON_V6_vlutb_128B", "llvm.hexagon.V6.vlutb.acc" => "__builtin_HEXAGON_V6_vlutb_acc", @@ -1320,12 +1755,32 @@ match name { "llvm.hexagon.V6.vlutb.dv.acc.128B" => "__builtin_HEXAGON_V6_vlutb_dv_acc_128B", "llvm.hexagon.V6.vlutvvb" => "__builtin_HEXAGON_V6_vlutvvb", "llvm.hexagon.V6.vlutvvb.128B" => "__builtin_HEXAGON_V6_vlutvvb_128B", + "llvm.hexagon.V6.vlutvvb.nm" => "__builtin_HEXAGON_V6_vlutvvb_nm", + "llvm.hexagon.V6.vlutvvb.nm.128B" => "__builtin_HEXAGON_V6_vlutvvb_nm_128B", "llvm.hexagon.V6.vlutvvb.oracc" => "__builtin_HEXAGON_V6_vlutvvb_oracc", "llvm.hexagon.V6.vlutvvb.oracc.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracc_128B", + "llvm.hexagon.V6.vlutvvb.oracci" => "__builtin_HEXAGON_V6_vlutvvb_oracci", + "llvm.hexagon.V6.vlutvvb.oracci.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracci_128B", + "llvm.hexagon.V6.vlutvvbi" => "__builtin_HEXAGON_V6_vlutvvbi", + "llvm.hexagon.V6.vlutvvbi.128B" => "__builtin_HEXAGON_V6_vlutvvbi_128B", "llvm.hexagon.V6.vlutvwh" => "__builtin_HEXAGON_V6_vlutvwh", "llvm.hexagon.V6.vlutvwh.128B" => "__builtin_HEXAGON_V6_vlutvwh_128B", + "llvm.hexagon.V6.vlutvwh.nm" => "__builtin_HEXAGON_V6_vlutvwh_nm", + "llvm.hexagon.V6.vlutvwh.nm.128B" => "__builtin_HEXAGON_V6_vlutvwh_nm_128B", "llvm.hexagon.V6.vlutvwh.oracc" => "__builtin_HEXAGON_V6_vlutvwh_oracc", "llvm.hexagon.V6.vlutvwh.oracc.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracc_128B", + "llvm.hexagon.V6.vlutvwh.oracci" => "__builtin_HEXAGON_V6_vlutvwh_oracci", + "llvm.hexagon.V6.vlutvwh.oracci.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracci_128B", + "llvm.hexagon.V6.vlutvwhi" => "__builtin_HEXAGON_V6_vlutvwhi", + "llvm.hexagon.V6.vlutvwhi.128B" => "__builtin_HEXAGON_V6_vlutvwhi_128B", + "llvm.hexagon.V6.vmax.bf" => "__builtin_HEXAGON_V6_vmax_bf", + "llvm.hexagon.V6.vmax.bf.128B" => "__builtin_HEXAGON_V6_vmax_bf_128B", + "llvm.hexagon.V6.vmax.hf" => "__builtin_HEXAGON_V6_vmax_hf", + "llvm.hexagon.V6.vmax.hf.128B" => "__builtin_HEXAGON_V6_vmax_hf_128B", + "llvm.hexagon.V6.vmax.sf" => "__builtin_HEXAGON_V6_vmax_sf", + "llvm.hexagon.V6.vmax.sf.128B" => "__builtin_HEXAGON_V6_vmax_sf_128B", + "llvm.hexagon.V6.vmaxb" => "__builtin_HEXAGON_V6_vmaxb", + "llvm.hexagon.V6.vmaxb.128B" => "__builtin_HEXAGON_V6_vmaxb_128B", "llvm.hexagon.V6.vmaxh" => "__builtin_HEXAGON_V6_vmaxh", "llvm.hexagon.V6.vmaxh.128B" => "__builtin_HEXAGON_V6_vmaxh_128B", "llvm.hexagon.V6.vmaxub" => "__builtin_HEXAGON_V6_vmaxub", @@ -1334,6 +1789,14 @@ match name { "llvm.hexagon.V6.vmaxuh.128B" => "__builtin_HEXAGON_V6_vmaxuh_128B", "llvm.hexagon.V6.vmaxw" => "__builtin_HEXAGON_V6_vmaxw", "llvm.hexagon.V6.vmaxw.128B" => "__builtin_HEXAGON_V6_vmaxw_128B", + "llvm.hexagon.V6.vmin.bf" => "__builtin_HEXAGON_V6_vmin_bf", + "llvm.hexagon.V6.vmin.bf.128B" => "__builtin_HEXAGON_V6_vmin_bf_128B", + "llvm.hexagon.V6.vmin.hf" => "__builtin_HEXAGON_V6_vmin_hf", + "llvm.hexagon.V6.vmin.hf.128B" => "__builtin_HEXAGON_V6_vmin_hf_128B", + "llvm.hexagon.V6.vmin.sf" => "__builtin_HEXAGON_V6_vmin_sf", + "llvm.hexagon.V6.vmin.sf.128B" => "__builtin_HEXAGON_V6_vmin_sf_128B", + "llvm.hexagon.V6.vminb" => "__builtin_HEXAGON_V6_vminb", + "llvm.hexagon.V6.vminb.128B" => "__builtin_HEXAGON_V6_vminb_128B", "llvm.hexagon.V6.vminh" => "__builtin_HEXAGON_V6_vminh", "llvm.hexagon.V6.vminh.128B" => "__builtin_HEXAGON_V6_vminh_128B", "llvm.hexagon.V6.vminub" => "__builtin_HEXAGON_V6_vminub", @@ -1348,12 +1811,56 @@ match name { "llvm.hexagon.V6.vmpabus.acc.128B" => "__builtin_HEXAGON_V6_vmpabus_acc_128B", "llvm.hexagon.V6.vmpabusv" => "__builtin_HEXAGON_V6_vmpabusv", "llvm.hexagon.V6.vmpabusv.128B" => "__builtin_HEXAGON_V6_vmpabusv_128B", + "llvm.hexagon.V6.vmpabuu" => "__builtin_HEXAGON_V6_vmpabuu", + "llvm.hexagon.V6.vmpabuu.128B" => "__builtin_HEXAGON_V6_vmpabuu_128B", + "llvm.hexagon.V6.vmpabuu.acc" => "__builtin_HEXAGON_V6_vmpabuu_acc", + "llvm.hexagon.V6.vmpabuu.acc.128B" => "__builtin_HEXAGON_V6_vmpabuu_acc_128B", "llvm.hexagon.V6.vmpabuuv" => "__builtin_HEXAGON_V6_vmpabuuv", "llvm.hexagon.V6.vmpabuuv.128B" => "__builtin_HEXAGON_V6_vmpabuuv_128B", "llvm.hexagon.V6.vmpahb" => "__builtin_HEXAGON_V6_vmpahb", "llvm.hexagon.V6.vmpahb.128B" => "__builtin_HEXAGON_V6_vmpahb_128B", "llvm.hexagon.V6.vmpahb.acc" => "__builtin_HEXAGON_V6_vmpahb_acc", "llvm.hexagon.V6.vmpahb.acc.128B" => "__builtin_HEXAGON_V6_vmpahb_acc_128B", + "llvm.hexagon.V6.vmpahhsat" => "__builtin_HEXAGON_V6_vmpahhsat", + "llvm.hexagon.V6.vmpahhsat.128B" => "__builtin_HEXAGON_V6_vmpahhsat_128B", + "llvm.hexagon.V6.vmpauhb" => "__builtin_HEXAGON_V6_vmpauhb", + "llvm.hexagon.V6.vmpauhb.128B" => "__builtin_HEXAGON_V6_vmpauhb_128B", + "llvm.hexagon.V6.vmpauhb.acc" => "__builtin_HEXAGON_V6_vmpauhb_acc", + "llvm.hexagon.V6.vmpauhb.acc.128B" => "__builtin_HEXAGON_V6_vmpauhb_acc_128B", + "llvm.hexagon.V6.vmpauhuhsat" => "__builtin_HEXAGON_V6_vmpauhuhsat", + "llvm.hexagon.V6.vmpauhuhsat.128B" => "__builtin_HEXAGON_V6_vmpauhuhsat_128B", + "llvm.hexagon.V6.vmpsuhuhsat" => "__builtin_HEXAGON_V6_vmpsuhuhsat", + "llvm.hexagon.V6.vmpsuhuhsat.128B" => "__builtin_HEXAGON_V6_vmpsuhuhsat_128B", + "llvm.hexagon.V6.vmpy.hf.hf" => "__builtin_HEXAGON_V6_vmpy_hf_hf", + "llvm.hexagon.V6.vmpy.hf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_128B", + "llvm.hexagon.V6.vmpy.hf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc", + "llvm.hexagon.V6.vmpy.hf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_hf_hf_acc_128B", + "llvm.hexagon.V6.vmpy.qf16" => "__builtin_HEXAGON_V6_vmpy_qf16", + "llvm.hexagon.V6.vmpy.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_128B", + "llvm.hexagon.V6.vmpy.qf16.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_hf", + "llvm.hexagon.V6.vmpy.qf16.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_hf_128B", + "llvm.hexagon.V6.vmpy.qf16.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf", + "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf16_mix_hf_128B", + "llvm.hexagon.V6.vmpy.qf32" => "__builtin_HEXAGON_V6_vmpy_qf32", + "llvm.hexagon.V6.vmpy.qf32.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_128B", + "llvm.hexagon.V6.vmpy.qf32.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_hf", + "llvm.hexagon.V6.vmpy.qf32.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_hf_128B", + "llvm.hexagon.V6.vmpy.qf32.mix.hf" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf", + "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_mix_hf_128B", + "llvm.hexagon.V6.vmpy.qf32.qf16" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16", + "llvm.hexagon.V6.vmpy.qf32.qf16.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_qf16_128B", + "llvm.hexagon.V6.vmpy.qf32.sf" => "__builtin_HEXAGON_V6_vmpy_qf32_sf", + "llvm.hexagon.V6.vmpy.qf32.sf.128B" => "__builtin_HEXAGON_V6_vmpy_qf32_sf_128B", + "llvm.hexagon.V6.vmpy.sf.bf" => "__builtin_HEXAGON_V6_vmpy_sf_bf", + "llvm.hexagon.V6.vmpy.sf.bf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_128B", + "llvm.hexagon.V6.vmpy.sf.bf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc", + "llvm.hexagon.V6.vmpy.sf.bf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_bf_acc_128B", + "llvm.hexagon.V6.vmpy.sf.hf" => "__builtin_HEXAGON_V6_vmpy_sf_hf", + "llvm.hexagon.V6.vmpy.sf.hf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_128B", + "llvm.hexagon.V6.vmpy.sf.hf.acc" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc", + "llvm.hexagon.V6.vmpy.sf.hf.acc.128B" => "__builtin_HEXAGON_V6_vmpy_sf_hf_acc_128B", + "llvm.hexagon.V6.vmpy.sf.sf" => "__builtin_HEXAGON_V6_vmpy_sf_sf", + "llvm.hexagon.V6.vmpy.sf.sf.128B" => "__builtin_HEXAGON_V6_vmpy_sf_sf_128B", "llvm.hexagon.V6.vmpybus" => "__builtin_HEXAGON_V6_vmpybus", "llvm.hexagon.V6.vmpybus.128B" => "__builtin_HEXAGON_V6_vmpybus_128B", "llvm.hexagon.V6.vmpybus.acc" => "__builtin_HEXAGON_V6_vmpybus_acc", @@ -1368,8 +1875,12 @@ match name { "llvm.hexagon.V6.vmpybv.acc.128B" => "__builtin_HEXAGON_V6_vmpybv_acc_128B", "llvm.hexagon.V6.vmpyewuh" => "__builtin_HEXAGON_V6_vmpyewuh", "llvm.hexagon.V6.vmpyewuh.128B" => "__builtin_HEXAGON_V6_vmpyewuh_128B", + "llvm.hexagon.V6.vmpyewuh.64" => "__builtin_HEXAGON_V6_vmpyewuh_64", + "llvm.hexagon.V6.vmpyewuh.64.128B" => "__builtin_HEXAGON_V6_vmpyewuh_64_128B", "llvm.hexagon.V6.vmpyh" => "__builtin_HEXAGON_V6_vmpyh", "llvm.hexagon.V6.vmpyh.128B" => "__builtin_HEXAGON_V6_vmpyh_128B", + "llvm.hexagon.V6.vmpyh.acc" => "__builtin_HEXAGON_V6_vmpyh_acc", + "llvm.hexagon.V6.vmpyh.acc.128B" => "__builtin_HEXAGON_V6_vmpyh_acc_128B", "llvm.hexagon.V6.vmpyhsat.acc" => "__builtin_HEXAGON_V6_vmpyhsat_acc", "llvm.hexagon.V6.vmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vmpyhsat_acc_128B", "llvm.hexagon.V6.vmpyhsrs" => "__builtin_HEXAGON_V6_vmpyhsrs", @@ -1412,8 +1923,14 @@ match name { "llvm.hexagon.V6.vmpyiwh.128B" => "__builtin_HEXAGON_V6_vmpyiwh_128B", "llvm.hexagon.V6.vmpyiwh.acc" => "__builtin_HEXAGON_V6_vmpyiwh_acc", "llvm.hexagon.V6.vmpyiwh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwh_acc_128B", + "llvm.hexagon.V6.vmpyiwub" => "__builtin_HEXAGON_V6_vmpyiwub", + "llvm.hexagon.V6.vmpyiwub.128B" => "__builtin_HEXAGON_V6_vmpyiwub_128B", + "llvm.hexagon.V6.vmpyiwub.acc" => "__builtin_HEXAGON_V6_vmpyiwub_acc", + "llvm.hexagon.V6.vmpyiwub.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwub_acc_128B", "llvm.hexagon.V6.vmpyowh" => "__builtin_HEXAGON_V6_vmpyowh", "llvm.hexagon.V6.vmpyowh.128B" => "__builtin_HEXAGON_V6_vmpyowh_128B", + "llvm.hexagon.V6.vmpyowh.64.acc" => "__builtin_HEXAGON_V6_vmpyowh_64_acc", + "llvm.hexagon.V6.vmpyowh.64.acc.128B" => "__builtin_HEXAGON_V6_vmpyowh_64_acc_128B", "llvm.hexagon.V6.vmpyowh.rnd" => "__builtin_HEXAGON_V6_vmpyowh_rnd", "llvm.hexagon.V6.vmpyowh.rnd.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_128B", "llvm.hexagon.V6.vmpyowh.rnd.sacc" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc", @@ -1432,10 +1949,20 @@ match name { "llvm.hexagon.V6.vmpyuh.128B" => "__builtin_HEXAGON_V6_vmpyuh_128B", "llvm.hexagon.V6.vmpyuh.acc" => "__builtin_HEXAGON_V6_vmpyuh_acc", "llvm.hexagon.V6.vmpyuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyuh_acc_128B", + "llvm.hexagon.V6.vmpyuhe" => "__builtin_HEXAGON_V6_vmpyuhe", + "llvm.hexagon.V6.vmpyuhe.128B" => "__builtin_HEXAGON_V6_vmpyuhe_128B", + "llvm.hexagon.V6.vmpyuhe.acc" => "__builtin_HEXAGON_V6_vmpyuhe_acc", + "llvm.hexagon.V6.vmpyuhe.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhe_acc_128B", "llvm.hexagon.V6.vmpyuhv" => "__builtin_HEXAGON_V6_vmpyuhv", "llvm.hexagon.V6.vmpyuhv.128B" => "__builtin_HEXAGON_V6_vmpyuhv_128B", "llvm.hexagon.V6.vmpyuhv.acc" => "__builtin_HEXAGON_V6_vmpyuhv_acc", "llvm.hexagon.V6.vmpyuhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhv_acc_128B", + "llvm.hexagon.V6.vmpyuhvs" => "__builtin_HEXAGON_V6_vmpyuhvs", + "llvm.hexagon.V6.vmpyuhvs.128B" => "__builtin_HEXAGON_V6_vmpyuhvs_128B", + "llvm.hexagon.V6.vmux" => "__builtin_HEXAGON_V6_vmux", + "llvm.hexagon.V6.vmux.128B" => "__builtin_HEXAGON_V6_vmux_128B", + "llvm.hexagon.V6.vnavgb" => "__builtin_HEXAGON_V6_vnavgb", + "llvm.hexagon.V6.vnavgb.128B" => "__builtin_HEXAGON_V6_vnavgb_128B", "llvm.hexagon.V6.vnavgh" => "__builtin_HEXAGON_V6_vnavgh", "llvm.hexagon.V6.vnavgh.128B" => "__builtin_HEXAGON_V6_vnavgh_128B", "llvm.hexagon.V6.vnavgub" => "__builtin_HEXAGON_V6_vnavgub", @@ -1468,8 +1995,18 @@ match name { "llvm.hexagon.V6.vpackwuh.sat.128B" => "__builtin_HEXAGON_V6_vpackwuh_sat_128B", "llvm.hexagon.V6.vpopcounth" => "__builtin_HEXAGON_V6_vpopcounth", "llvm.hexagon.V6.vpopcounth.128B" => "__builtin_HEXAGON_V6_vpopcounth_128B", + "llvm.hexagon.V6.vprefixqb" => "__builtin_HEXAGON_V6_vprefixqb", + "llvm.hexagon.V6.vprefixqb.128B" => "__builtin_HEXAGON_V6_vprefixqb_128B", + "llvm.hexagon.V6.vprefixqh" => "__builtin_HEXAGON_V6_vprefixqh", + "llvm.hexagon.V6.vprefixqh.128B" => "__builtin_HEXAGON_V6_vprefixqh_128B", + "llvm.hexagon.V6.vprefixqw" => "__builtin_HEXAGON_V6_vprefixqw", + "llvm.hexagon.V6.vprefixqw.128B" => "__builtin_HEXAGON_V6_vprefixqw_128B", "llvm.hexagon.V6.vrdelta" => "__builtin_HEXAGON_V6_vrdelta", "llvm.hexagon.V6.vrdelta.128B" => "__builtin_HEXAGON_V6_vrdelta_128B", + "llvm.hexagon.V6.vrmpybub.rtt" => "__builtin_HEXAGON_V6_vrmpybub_rtt", + "llvm.hexagon.V6.vrmpybub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_128B", + "llvm.hexagon.V6.vrmpybub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc", + "llvm.hexagon.V6.vrmpybub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B", "llvm.hexagon.V6.vrmpybus" => "__builtin_HEXAGON_V6_vrmpybus", "llvm.hexagon.V6.vrmpybus.128B" => "__builtin_HEXAGON_V6_vrmpybus_128B", "llvm.hexagon.V6.vrmpybus.acc" => "__builtin_HEXAGON_V6_vrmpybus_acc", @@ -1490,6 +2027,10 @@ match name { "llvm.hexagon.V6.vrmpyub.128B" => "__builtin_HEXAGON_V6_vrmpyub_128B", "llvm.hexagon.V6.vrmpyub.acc" => "__builtin_HEXAGON_V6_vrmpyub_acc", "llvm.hexagon.V6.vrmpyub.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_acc_128B", + "llvm.hexagon.V6.vrmpyub.rtt" => "__builtin_HEXAGON_V6_vrmpyub_rtt", + "llvm.hexagon.V6.vrmpyub.rtt.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_128B", + "llvm.hexagon.V6.vrmpyub.rtt.acc" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc", + "llvm.hexagon.V6.vrmpyub.rtt.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B", "llvm.hexagon.V6.vrmpyubi" => "__builtin_HEXAGON_V6_vrmpyubi", "llvm.hexagon.V6.vrmpyubi.128B" => "__builtin_HEXAGON_V6_vrmpyubi_128B", "llvm.hexagon.V6.vrmpyubi.acc" => "__builtin_HEXAGON_V6_vrmpyubi_acc", @@ -1500,10 +2041,16 @@ match name { "llvm.hexagon.V6.vrmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubv_acc_128B", "llvm.hexagon.V6.vror" => "__builtin_HEXAGON_V6_vror", "llvm.hexagon.V6.vror.128B" => "__builtin_HEXAGON_V6_vror_128B", + "llvm.hexagon.V6.vrotr" => "__builtin_HEXAGON_V6_vrotr", + "llvm.hexagon.V6.vrotr.128B" => "__builtin_HEXAGON_V6_vrotr_128B", "llvm.hexagon.V6.vroundhb" => "__builtin_HEXAGON_V6_vroundhb", "llvm.hexagon.V6.vroundhb.128B" => "__builtin_HEXAGON_V6_vroundhb_128B", "llvm.hexagon.V6.vroundhub" => "__builtin_HEXAGON_V6_vroundhub", "llvm.hexagon.V6.vroundhub.128B" => "__builtin_HEXAGON_V6_vroundhub_128B", + "llvm.hexagon.V6.vrounduhub" => "__builtin_HEXAGON_V6_vrounduhub", + "llvm.hexagon.V6.vrounduhub.128B" => "__builtin_HEXAGON_V6_vrounduhub_128B", + "llvm.hexagon.V6.vrounduwuh" => "__builtin_HEXAGON_V6_vrounduwuh", + "llvm.hexagon.V6.vrounduwuh.128B" => "__builtin_HEXAGON_V6_vrounduwuh_128B", "llvm.hexagon.V6.vroundwh" => "__builtin_HEXAGON_V6_vroundwh", "llvm.hexagon.V6.vroundwh.128B" => "__builtin_HEXAGON_V6_vroundwh_128B", "llvm.hexagon.V6.vroundwuh" => "__builtin_HEXAGON_V6_vroundwuh", @@ -1512,12 +2059,34 @@ match name { "llvm.hexagon.V6.vrsadubi.128B" => "__builtin_HEXAGON_V6_vrsadubi_128B", "llvm.hexagon.V6.vrsadubi.acc" => "__builtin_HEXAGON_V6_vrsadubi_acc", "llvm.hexagon.V6.vrsadubi.acc.128B" => "__builtin_HEXAGON_V6_vrsadubi_acc_128B", + "llvm.hexagon.V6.vsatdw" => "__builtin_HEXAGON_V6_vsatdw", + "llvm.hexagon.V6.vsatdw.128B" => "__builtin_HEXAGON_V6_vsatdw_128B", "llvm.hexagon.V6.vsathub" => "__builtin_HEXAGON_V6_vsathub", "llvm.hexagon.V6.vsathub.128B" => "__builtin_HEXAGON_V6_vsathub_128B", + "llvm.hexagon.V6.vsatuwuh" => "__builtin_HEXAGON_V6_vsatuwuh", + "llvm.hexagon.V6.vsatuwuh.128B" => "__builtin_HEXAGON_V6_vsatuwuh_128B", "llvm.hexagon.V6.vsatwh" => "__builtin_HEXAGON_V6_vsatwh", "llvm.hexagon.V6.vsatwh.128B" => "__builtin_HEXAGON_V6_vsatwh_128B", "llvm.hexagon.V6.vsb" => "__builtin_HEXAGON_V6_vsb", "llvm.hexagon.V6.vsb.128B" => "__builtin_HEXAGON_V6_vsb_128B", + "llvm.hexagon.V6.vscattermh" => "__builtin_HEXAGON_V6_vscattermh", + "llvm.hexagon.V6.vscattermh.128B" => "__builtin_HEXAGON_V6_vscattermh_128B", + "llvm.hexagon.V6.vscattermh.add" => "__builtin_HEXAGON_V6_vscattermh_add", + "llvm.hexagon.V6.vscattermh.add.128B" => "__builtin_HEXAGON_V6_vscattermh_add_128B", + "llvm.hexagon.V6.vscattermhq" => "__builtin_HEXAGON_V6_vscattermhq", + "llvm.hexagon.V6.vscattermhq.128B" => "__builtin_HEXAGON_V6_vscattermhq_128B", + "llvm.hexagon.V6.vscattermhw" => "__builtin_HEXAGON_V6_vscattermhw", + "llvm.hexagon.V6.vscattermhw.128B" => "__builtin_HEXAGON_V6_vscattermhw_128B", + "llvm.hexagon.V6.vscattermhw.add" => "__builtin_HEXAGON_V6_vscattermhw_add", + "llvm.hexagon.V6.vscattermhw.add.128B" => "__builtin_HEXAGON_V6_vscattermhw_add_128B", + "llvm.hexagon.V6.vscattermhwq" => "__builtin_HEXAGON_V6_vscattermhwq", + "llvm.hexagon.V6.vscattermhwq.128B" => "__builtin_HEXAGON_V6_vscattermhwq_128B", + "llvm.hexagon.V6.vscattermw" => "__builtin_HEXAGON_V6_vscattermw", + "llvm.hexagon.V6.vscattermw.128B" => "__builtin_HEXAGON_V6_vscattermw_128B", + "llvm.hexagon.V6.vscattermw.add" => "__builtin_HEXAGON_V6_vscattermw_add", + "llvm.hexagon.V6.vscattermw.add.128B" => "__builtin_HEXAGON_V6_vscattermw_add_128B", + "llvm.hexagon.V6.vscattermwq" => "__builtin_HEXAGON_V6_vscattermwq", + "llvm.hexagon.V6.vscattermwq.128B" => "__builtin_HEXAGON_V6_vscattermwq_128B", "llvm.hexagon.V6.vsh" => "__builtin_HEXAGON_V6_vsh", "llvm.hexagon.V6.vsh.128B" => "__builtin_HEXAGON_V6_vsh_128B", "llvm.hexagon.V6.vshufeh" => "__builtin_HEXAGON_V6_vshufeh", @@ -1538,14 +2107,46 @@ match name { "llvm.hexagon.V6.vshufoeh.128B" => "__builtin_HEXAGON_V6_vshufoeh_128B", "llvm.hexagon.V6.vshufoh" => "__builtin_HEXAGON_V6_vshufoh", "llvm.hexagon.V6.vshufoh.128B" => "__builtin_HEXAGON_V6_vshufoh_128B", + "llvm.hexagon.V6.vsub.hf" => "__builtin_HEXAGON_V6_vsub_hf", + "llvm.hexagon.V6.vsub.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_128B", + "llvm.hexagon.V6.vsub.hf.hf" => "__builtin_HEXAGON_V6_vsub_hf_hf", + "llvm.hexagon.V6.vsub.hf.hf.128B" => "__builtin_HEXAGON_V6_vsub_hf_hf_128B", + "llvm.hexagon.V6.vsub.qf16" => "__builtin_HEXAGON_V6_vsub_qf16", + "llvm.hexagon.V6.vsub.qf16.128B" => "__builtin_HEXAGON_V6_vsub_qf16_128B", + "llvm.hexagon.V6.vsub.qf16.mix" => "__builtin_HEXAGON_V6_vsub_qf16_mix", + "llvm.hexagon.V6.vsub.qf16.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf16_mix_128B", + "llvm.hexagon.V6.vsub.qf32" => "__builtin_HEXAGON_V6_vsub_qf32", + "llvm.hexagon.V6.vsub.qf32.128B" => "__builtin_HEXAGON_V6_vsub_qf32_128B", + "llvm.hexagon.V6.vsub.qf32.mix" => "__builtin_HEXAGON_V6_vsub_qf32_mix", + "llvm.hexagon.V6.vsub.qf32.mix.128B" => "__builtin_HEXAGON_V6_vsub_qf32_mix_128B", + "llvm.hexagon.V6.vsub.sf" => "__builtin_HEXAGON_V6_vsub_sf", + "llvm.hexagon.V6.vsub.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_128B", + "llvm.hexagon.V6.vsub.sf.bf" => "__builtin_HEXAGON_V6_vsub_sf_bf", + "llvm.hexagon.V6.vsub.sf.bf.128B" => "__builtin_HEXAGON_V6_vsub_sf_bf_128B", + "llvm.hexagon.V6.vsub.sf.hf" => "__builtin_HEXAGON_V6_vsub_sf_hf", + "llvm.hexagon.V6.vsub.sf.hf.128B" => "__builtin_HEXAGON_V6_vsub_sf_hf_128B", + "llvm.hexagon.V6.vsub.sf.sf" => "__builtin_HEXAGON_V6_vsub_sf_sf", + "llvm.hexagon.V6.vsub.sf.sf.128B" => "__builtin_HEXAGON_V6_vsub_sf_sf_128B", "llvm.hexagon.V6.vsubb" => "__builtin_HEXAGON_V6_vsubb", "llvm.hexagon.V6.vsubb.128B" => "__builtin_HEXAGON_V6_vsubb_128B", "llvm.hexagon.V6.vsubb.dv" => "__builtin_HEXAGON_V6_vsubb_dv", "llvm.hexagon.V6.vsubb.dv.128B" => "__builtin_HEXAGON_V6_vsubb_dv_128B", + "llvm.hexagon.V6.vsubbnq" => "__builtin_HEXAGON_V6_vsubbnq", + "llvm.hexagon.V6.vsubbnq.128B" => "__builtin_HEXAGON_V6_vsubbnq_128B", + "llvm.hexagon.V6.vsubbq" => "__builtin_HEXAGON_V6_vsubbq", + "llvm.hexagon.V6.vsubbq.128B" => "__builtin_HEXAGON_V6_vsubbq_128B", + "llvm.hexagon.V6.vsubbsat" => "__builtin_HEXAGON_V6_vsubbsat", + "llvm.hexagon.V6.vsubbsat.128B" => "__builtin_HEXAGON_V6_vsubbsat_128B", + "llvm.hexagon.V6.vsubbsat.dv" => "__builtin_HEXAGON_V6_vsubbsat_dv", + "llvm.hexagon.V6.vsubbsat.dv.128B" => "__builtin_HEXAGON_V6_vsubbsat_dv_128B", "llvm.hexagon.V6.vsubh" => "__builtin_HEXAGON_V6_vsubh", "llvm.hexagon.V6.vsubh.128B" => "__builtin_HEXAGON_V6_vsubh_128B", "llvm.hexagon.V6.vsubh.dv" => "__builtin_HEXAGON_V6_vsubh_dv", "llvm.hexagon.V6.vsubh.dv.128B" => "__builtin_HEXAGON_V6_vsubh_dv_128B", + "llvm.hexagon.V6.vsubhnq" => "__builtin_HEXAGON_V6_vsubhnq", + "llvm.hexagon.V6.vsubhnq.128B" => "__builtin_HEXAGON_V6_vsubhnq_128B", + "llvm.hexagon.V6.vsubhq" => "__builtin_HEXAGON_V6_vsubhq", + "llvm.hexagon.V6.vsubhq.128B" => "__builtin_HEXAGON_V6_vsubhq_128B", "llvm.hexagon.V6.vsubhsat" => "__builtin_HEXAGON_V6_vsubhsat", "llvm.hexagon.V6.vsubhsat.128B" => "__builtin_HEXAGON_V6_vsubhsat_128B", "llvm.hexagon.V6.vsubhsat.dv" => "__builtin_HEXAGON_V6_vsubhsat_dv", @@ -1558,20 +2159,32 @@ match name { "llvm.hexagon.V6.vsububsat.128B" => "__builtin_HEXAGON_V6_vsububsat_128B", "llvm.hexagon.V6.vsububsat.dv" => "__builtin_HEXAGON_V6_vsububsat_dv", "llvm.hexagon.V6.vsububsat.dv.128B" => "__builtin_HEXAGON_V6_vsububsat_dv_128B", + "llvm.hexagon.V6.vsubububb.sat" => "__builtin_HEXAGON_V6_vsubububb_sat", + "llvm.hexagon.V6.vsubububb.sat.128B" => "__builtin_HEXAGON_V6_vsubububb_sat_128B", "llvm.hexagon.V6.vsubuhsat" => "__builtin_HEXAGON_V6_vsubuhsat", "llvm.hexagon.V6.vsubuhsat.128B" => "__builtin_HEXAGON_V6_vsubuhsat_128B", "llvm.hexagon.V6.vsubuhsat.dv" => "__builtin_HEXAGON_V6_vsubuhsat_dv", "llvm.hexagon.V6.vsubuhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuhsat_dv_128B", "llvm.hexagon.V6.vsubuhw" => "__builtin_HEXAGON_V6_vsubuhw", "llvm.hexagon.V6.vsubuhw.128B" => "__builtin_HEXAGON_V6_vsubuhw_128B", + "llvm.hexagon.V6.vsubuwsat" => "__builtin_HEXAGON_V6_vsubuwsat", + "llvm.hexagon.V6.vsubuwsat.128B" => "__builtin_HEXAGON_V6_vsubuwsat_128B", + "llvm.hexagon.V6.vsubuwsat.dv" => "__builtin_HEXAGON_V6_vsubuwsat_dv", + "llvm.hexagon.V6.vsubuwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuwsat_dv_128B", "llvm.hexagon.V6.vsubw" => "__builtin_HEXAGON_V6_vsubw", "llvm.hexagon.V6.vsubw.128B" => "__builtin_HEXAGON_V6_vsubw_128B", "llvm.hexagon.V6.vsubw.dv" => "__builtin_HEXAGON_V6_vsubw_dv", "llvm.hexagon.V6.vsubw.dv.128B" => "__builtin_HEXAGON_V6_vsubw_dv_128B", + "llvm.hexagon.V6.vsubwnq" => "__builtin_HEXAGON_V6_vsubwnq", + "llvm.hexagon.V6.vsubwnq.128B" => "__builtin_HEXAGON_V6_vsubwnq_128B", + "llvm.hexagon.V6.vsubwq" => "__builtin_HEXAGON_V6_vsubwq", + "llvm.hexagon.V6.vsubwq.128B" => "__builtin_HEXAGON_V6_vsubwq_128B", "llvm.hexagon.V6.vsubwsat" => "__builtin_HEXAGON_V6_vsubwsat", "llvm.hexagon.V6.vsubwsat.128B" => "__builtin_HEXAGON_V6_vsubwsat_128B", "llvm.hexagon.V6.vsubwsat.dv" => "__builtin_HEXAGON_V6_vsubwsat_dv", "llvm.hexagon.V6.vsubwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubwsat_dv_128B", + "llvm.hexagon.V6.vswap" => "__builtin_HEXAGON_V6_vswap", + "llvm.hexagon.V6.vswap.128B" => "__builtin_HEXAGON_V6_vswap_128B", "llvm.hexagon.V6.vtmpyb" => "__builtin_HEXAGON_V6_vtmpyb", "llvm.hexagon.V6.vtmpyb.128B" => "__builtin_HEXAGON_V6_vtmpyb_128B", "llvm.hexagon.V6.vtmpyb.acc" => "__builtin_HEXAGON_V6_vtmpyb_acc", @@ -1602,6 +2215,19 @@ match name { "llvm.hexagon.V6.vzb.128B" => "__builtin_HEXAGON_V6_vzb_128B", "llvm.hexagon.V6.vzh" => "__builtin_HEXAGON_V6_vzh", "llvm.hexagon.V6.vzh.128B" => "__builtin_HEXAGON_V6_vzh_128B", + "llvm.hexagon.Y2.dccleana" => "__builtin_HEXAGON_Y2_dccleana", + "llvm.hexagon.Y2.dccleaninva" => "__builtin_HEXAGON_Y2_dccleaninva", + "llvm.hexagon.Y2.dcfetch" => "__builtin_HEXAGON_Y2_dcfetch", + "llvm.hexagon.Y2.dcinva" => "__builtin_HEXAGON_Y2_dcinva", + "llvm.hexagon.Y2.dczeroa" => "__builtin_HEXAGON_Y2_dczeroa", + "llvm.hexagon.Y4.l2fetch" => "__builtin_HEXAGON_Y4_l2fetch", + "llvm.hexagon.Y5.l2fetch" => "__builtin_HEXAGON_Y5_l2fetch", + "llvm.hexagon.Y6.dmlink" => "__builtin_HEXAGON_Y6_dmlink", + "llvm.hexagon.Y6.dmpause" => "__builtin_HEXAGON_Y6_dmpause", + "llvm.hexagon.Y6.dmpoll" => "__builtin_HEXAGON_Y6_dmpoll", + "llvm.hexagon.Y6.dmresume" => "__builtin_HEXAGON_Y6_dmresume", + "llvm.hexagon.Y6.dmstart" => "__builtin_HEXAGON_Y6_dmstart", + "llvm.hexagon.Y6.dmwait" => "__builtin_HEXAGON_Y6_dmwait", "llvm.hexagon.brev.ldb" => "__builtin_brev_ldb", "llvm.hexagon.brev.ldd" => "__builtin_brev_ldd", "llvm.hexagon.brev.ldh" => "__builtin_brev_ldh", @@ -1626,6 +2252,8 @@ match name { "llvm.hexagon.circ.stw" => "__builtin_circ_stw", "llvm.hexagon.mm256i.vaddw" => "__builtin__mm256i_vaddw", "llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch", + "llvm.hexagon.vmemcpy" => "__builtin_hexagon_vmemcpy", + "llvm.hexagon.vmemset" => "__builtin_hexagon_vmemset", // mips "llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph", "llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb", @@ -2299,6 +2927,8 @@ match name { "llvm.mips.xor.v" => "__builtin_msa_xor_v", "llvm.mips.xori.b" => "__builtin_msa_xori_b", // nvvm + "llvm.nvvm.abs.bf16" => "__nvvm_abs_bf16", + "llvm.nvvm.abs.bf16x2" => "__nvvm_abs_bf16x2", "llvm.nvvm.abs.i" => "__nvvm_abs_i", "llvm.nvvm.abs.ll" => "__nvvm_abs_ll", "llvm.nvvm.add.rm.d" => "__nvvm_add_rm_d", @@ -2314,8 +2944,13 @@ match name { "llvm.nvvm.add.rz.f" => "__nvvm_add_rz_f", "llvm.nvvm.add.rz.ftz.f" => "__nvvm_add_rz_ftz_f", "llvm.nvvm.bar.sync" => "__nvvm_bar_sync", - "llvm.nvvm.barrier0" => "__nvvm_bar0", - // [DUPLICATE]: "llvm.nvvm.barrier0" => "__syncthreads", + "llvm.nvvm.bar.warp.sync" => "__nvvm_bar_warp_sync", + "llvm.nvvm.barrier" => "__nvvm_bar", + "llvm.nvvm.barrier.n" => "__nvvm_bar_n", + "llvm.nvvm.barrier.sync" => "__nvvm_barrier_sync", + "llvm.nvvm.barrier.sync.cnt" => "__nvvm_barrier_sync_cnt", + "llvm.nvvm.barrier0" => "__syncthreads", + // [DUPLICATE]: "llvm.nvvm.barrier0" => "__nvvm_bar0", "llvm.nvvm.barrier0.and" => "__nvvm_bar0_and", "llvm.nvvm.barrier0.or" => "__nvvm_bar0_or", "llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc", @@ -2332,6 +2967,17 @@ match name { "llvm.nvvm.clz.ll" => "__nvvm_clz_ll", "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f", "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f", + "llvm.nvvm.cp.async.ca.shared.global.16" => "__nvvm_cp_async_ca_shared_global_16", + "llvm.nvvm.cp.async.ca.shared.global.4" => "__nvvm_cp_async_ca_shared_global_4", + "llvm.nvvm.cp.async.ca.shared.global.8" => "__nvvm_cp_async_ca_shared_global_8", + "llvm.nvvm.cp.async.cg.shared.global.16" => "__nvvm_cp_async_cg_shared_global_16", + "llvm.nvvm.cp.async.commit.group" => "__nvvm_cp_async_commit_group", + "llvm.nvvm.cp.async.mbarrier.arrive" => "__nvvm_cp_async_mbarrier_arrive", + "llvm.nvvm.cp.async.mbarrier.arrive.noinc" => "__nvvm_cp_async_mbarrier_arrive_noinc", + "llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared" => "__nvvm_cp_async_mbarrier_arrive_noinc_shared", + "llvm.nvvm.cp.async.mbarrier.arrive.shared" => "__nvvm_cp_async_mbarrier_arrive_shared", + "llvm.nvvm.cp.async.wait.all" => "__nvvm_cp_async_wait_all", + "llvm.nvvm.cp.async.wait.group" => "__nvvm_cp_async_wait_group", "llvm.nvvm.d2f.rm" => "__nvvm_d2f_rm", "llvm.nvvm.d2f.rm.ftz" => "__nvvm_d2f_rm_ftz", "llvm.nvvm.d2f.rn" => "__nvvm_d2f_rn", @@ -2374,7 +3020,13 @@ match name { "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f", "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d", "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f", + "llvm.nvvm.ex2.approx.f16" => "__nvvm_ex2_approx_f16", + "llvm.nvvm.ex2.approx.f16x2" => "__nvvm_ex2_approx_f16x2", "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f", + "llvm.nvvm.f2bf16.rn" => "__nvvm_f2bf16_rn", + "llvm.nvvm.f2bf16.rn.relu" => "__nvvm_f2bf16_rn_relu", + "llvm.nvvm.f2bf16.rz" => "__nvvm_f2bf16_rz", + "llvm.nvvm.f2bf16.rz.relu" => "__nvvm_f2bf16_rz_relu", "llvm.nvvm.f2h.rn" => "__nvvm_f2h_rn", "llvm.nvvm.f2h.rn.ftz" => "__nvvm_f2h_rn_ftz", "llvm.nvvm.f2i.rm" => "__nvvm_f2i_rm", @@ -2393,6 +3045,7 @@ match name { "llvm.nvvm.f2ll.rp.ftz" => "__nvvm_f2ll_rp_ftz", "llvm.nvvm.f2ll.rz" => "__nvvm_f2ll_rz", "llvm.nvvm.f2ll.rz.ftz" => "__nvvm_f2ll_rz_ftz", + "llvm.nvvm.f2tf32.rna" => "__nvvm_f2tf32_rna", "llvm.nvvm.f2ui.rm" => "__nvvm_f2ui_rm", "llvm.nvvm.f2ui.rm.ftz" => "__nvvm_f2ui_rm_ftz", "llvm.nvvm.f2ui.rn" => "__nvvm_f2ui_rn", @@ -2412,27 +3065,112 @@ match name { "llvm.nvvm.fabs.d" => "__nvvm_fabs_d", "llvm.nvvm.fabs.f" => "__nvvm_fabs_f", "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f", + "llvm.nvvm.ff2bf16x2.rn" => "__nvvm_ff2bf16x2_rn", + "llvm.nvvm.ff2bf16x2.rn.relu" => "__nvvm_ff2bf16x2_rn_relu", + "llvm.nvvm.ff2bf16x2.rz" => "__nvvm_ff2bf16x2_rz", + "llvm.nvvm.ff2bf16x2.rz.relu" => "__nvvm_ff2bf16x2_rz_relu", + "llvm.nvvm.ff2f16x2.rn" => "__nvvm_ff2f16x2_rn", + "llvm.nvvm.ff2f16x2.rn.relu" => "__nvvm_ff2f16x2_rn_relu", + "llvm.nvvm.ff2f16x2.rz" => "__nvvm_ff2f16x2_rz", + "llvm.nvvm.ff2f16x2.rz.relu" => "__nvvm_ff2f16x2_rz_relu", "llvm.nvvm.floor.d" => "__nvvm_floor_d", "llvm.nvvm.floor.f" => "__nvvm_floor_f", "llvm.nvvm.floor.ftz.f" => "__nvvm_floor_ftz_f", "llvm.nvvm.fma.rm.d" => "__nvvm_fma_rm_d", "llvm.nvvm.fma.rm.f" => "__nvvm_fma_rm_f", "llvm.nvvm.fma.rm.ftz.f" => "__nvvm_fma_rm_ftz_f", + "llvm.nvvm.fma.rn.bf16" => "__nvvm_fma_rn_bf16", + "llvm.nvvm.fma.rn.bf16x2" => "__nvvm_fma_rn_bf16x2", "llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d", "llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f", + "llvm.nvvm.fma.rn.f16" => "__nvvm_fma_rn_f16", + "llvm.nvvm.fma.rn.f16x2" => "__nvvm_fma_rn_f16x2", "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f", + "llvm.nvvm.fma.rn.ftz.f16" => "__nvvm_fma_rn_ftz_f16", + "llvm.nvvm.fma.rn.ftz.f16x2" => "__nvvm_fma_rn_ftz_f16x2", + "llvm.nvvm.fma.rn.ftz.relu.f16" => "__nvvm_fma_rn_ftz_relu_f16", + "llvm.nvvm.fma.rn.ftz.relu.f16x2" => "__nvvm_fma_rn_ftz_relu_f16x2", + "llvm.nvvm.fma.rn.ftz.sat.f16" => "__nvvm_fma_rn_ftz_sat_f16", + "llvm.nvvm.fma.rn.ftz.sat.f16x2" => "__nvvm_fma_rn_ftz_sat_f16x2", + "llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16", + "llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2", + "llvm.nvvm.fma.rn.relu.f16" => "__nvvm_fma_rn_relu_f16", + "llvm.nvvm.fma.rn.relu.f16x2" => "__nvvm_fma_rn_relu_f16x2", + "llvm.nvvm.fma.rn.sat.f16" => "__nvvm_fma_rn_sat_f16", + "llvm.nvvm.fma.rn.sat.f16x2" => "__nvvm_fma_rn_sat_f16x2", "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d", "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f", "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f", "llvm.nvvm.fma.rz.d" => "__nvvm_fma_rz_d", "llvm.nvvm.fma.rz.f" => "__nvvm_fma_rz_f", "llvm.nvvm.fma.rz.ftz.f" => "__nvvm_fma_rz_ftz_f", + "llvm.nvvm.fmax.bf16" => "__nvvm_fmax_bf16", + "llvm.nvvm.fmax.bf16x2" => "__nvvm_fmax_bf16x2", "llvm.nvvm.fmax.d" => "__nvvm_fmax_d", "llvm.nvvm.fmax.f" => "__nvvm_fmax_f", + "llvm.nvvm.fmax.f16" => "__nvvm_fmax_f16", + "llvm.nvvm.fmax.f16x2" => "__nvvm_fmax_f16x2", "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f", + "llvm.nvvm.fmax.ftz.f16" => "__nvvm_fmax_ftz_f16", + "llvm.nvvm.fmax.ftz.f16x2" => "__nvvm_fmax_ftz_f16x2", + "llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f", + "llvm.nvvm.fmax.ftz.nan.f16" => "__nvvm_fmax_ftz_nan_f16", + "llvm.nvvm.fmax.ftz.nan.f16x2" => "__nvvm_fmax_ftz_nan_f16x2", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16", + "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f", + "llvm.nvvm.fmax.ftz.xorsign.abs.f16" => "__nvvm_fmax_ftz_xorsign_abs_f16", + "llvm.nvvm.fmax.ftz.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_xorsign_abs_f16x2", + "llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16", + "llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2", + "llvm.nvvm.fmax.nan.f" => "__nvvm_fmax_nan_f", + "llvm.nvvm.fmax.nan.f16" => "__nvvm_fmax_nan_f16", + "llvm.nvvm.fmax.nan.f16x2" => "__nvvm_fmax_nan_f16x2", + "llvm.nvvm.fmax.nan.xorsign.abs.bf16" => "__nvvm_fmax_nan_xorsign_abs_bf16", + "llvm.nvvm.fmax.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_nan_xorsign_abs_bf16x2", + "llvm.nvvm.fmax.nan.xorsign.abs.f" => "__nvvm_fmax_nan_xorsign_abs_f", + "llvm.nvvm.fmax.nan.xorsign.abs.f16" => "__nvvm_fmax_nan_xorsign_abs_f16", + "llvm.nvvm.fmax.nan.xorsign.abs.f16x2" => "__nvvm_fmax_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmax.xorsign.abs.bf16" => "__nvvm_fmax_xorsign_abs_bf16", + "llvm.nvvm.fmax.xorsign.abs.bf16x2" => "__nvvm_fmax_xorsign_abs_bf16x2", + "llvm.nvvm.fmax.xorsign.abs.f" => "__nvvm_fmax_xorsign_abs_f", + "llvm.nvvm.fmax.xorsign.abs.f16" => "__nvvm_fmax_xorsign_abs_f16", + "llvm.nvvm.fmax.xorsign.abs.f16x2" => "__nvvm_fmax_xorsign_abs_f16x2", + "llvm.nvvm.fmin.bf16" => "__nvvm_fmin_bf16", + "llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2", "llvm.nvvm.fmin.d" => "__nvvm_fmin_d", "llvm.nvvm.fmin.f" => "__nvvm_fmin_f", + "llvm.nvvm.fmin.f16" => "__nvvm_fmin_f16", + "llvm.nvvm.fmin.f16x2" => "__nvvm_fmin_f16x2", "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f", + "llvm.nvvm.fmin.ftz.f16" => "__nvvm_fmin_ftz_f16", + "llvm.nvvm.fmin.ftz.f16x2" => "__nvvm_fmin_ftz_f16x2", + "llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f", + "llvm.nvvm.fmin.ftz.nan.f16" => "__nvvm_fmin_ftz_nan_f16", + "llvm.nvvm.fmin.ftz.nan.f16x2" => "__nvvm_fmin_ftz_nan_f16x2", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16", + "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f", + "llvm.nvvm.fmin.ftz.xorsign.abs.f16" => "__nvvm_fmin_ftz_xorsign_abs_f16", + "llvm.nvvm.fmin.ftz.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_xorsign_abs_f16x2", + "llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16", + "llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2", + "llvm.nvvm.fmin.nan.f" => "__nvvm_fmin_nan_f", + "llvm.nvvm.fmin.nan.f16" => "__nvvm_fmin_nan_f16", + "llvm.nvvm.fmin.nan.f16x2" => "__nvvm_fmin_nan_f16x2", + "llvm.nvvm.fmin.nan.xorsign.abs.bf16" => "__nvvm_fmin_nan_xorsign_abs_bf16", + "llvm.nvvm.fmin.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_nan_xorsign_abs_bf16x2", + "llvm.nvvm.fmin.nan.xorsign.abs.f" => "__nvvm_fmin_nan_xorsign_abs_f", + "llvm.nvvm.fmin.nan.xorsign.abs.f16" => "__nvvm_fmin_nan_xorsign_abs_f16", + "llvm.nvvm.fmin.nan.xorsign.abs.f16x2" => "__nvvm_fmin_nan_xorsign_abs_f16x2", + "llvm.nvvm.fmin.xorsign.abs.bf16" => "__nvvm_fmin_xorsign_abs_bf16", + "llvm.nvvm.fmin.xorsign.abs.bf16x2" => "__nvvm_fmin_xorsign_abs_bf16x2", + "llvm.nvvm.fmin.xorsign.abs.f" => "__nvvm_fmin_xorsign_abs_f", + "llvm.nvvm.fmin.xorsign.abs.f16" => "__nvvm_fmin_xorsign_abs_f16", + "llvm.nvvm.fmin.xorsign.abs.f16x2" => "__nvvm_fmin_xorsign_abs_f16x2", + "llvm.nvvm.fns" => "__nvvm_fns", "llvm.nvvm.h2f" => "__nvvm_h2f", "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm", "llvm.nvvm.i2d.rn" => "__nvvm_i2d_rn", @@ -2461,10 +3199,27 @@ match name { "llvm.nvvm.ll2f.rp" => "__nvvm_ll2f_rp", "llvm.nvvm.ll2f.rz" => "__nvvm_ll2f_rz", "llvm.nvvm.lohi.i2d" => "__nvvm_lohi_i2d", + "llvm.nvvm.match.any.sync.i32" => "__nvvm_match_any_sync_i32", + "llvm.nvvm.match.any.sync.i64" => "__nvvm_match_any_sync_i64", "llvm.nvvm.max.i" => "__nvvm_max_i", "llvm.nvvm.max.ll" => "__nvvm_max_ll", "llvm.nvvm.max.ui" => "__nvvm_max_ui", "llvm.nvvm.max.ull" => "__nvvm_max_ull", + "llvm.nvvm.mbarrier.arrive" => "__nvvm_mbarrier_arrive", + "llvm.nvvm.mbarrier.arrive.drop" => "__nvvm_mbarrier_arrive_drop", + "llvm.nvvm.mbarrier.arrive.drop.noComplete" => "__nvvm_mbarrier_arrive_drop_noComplete", + "llvm.nvvm.mbarrier.arrive.drop.noComplete.shared" => "__nvvm_mbarrier_arrive_drop_noComplete_shared", + "llvm.nvvm.mbarrier.arrive.drop.shared" => "__nvvm_mbarrier_arrive_drop_shared", + "llvm.nvvm.mbarrier.arrive.noComplete" => "__nvvm_mbarrier_arrive_noComplete", + "llvm.nvvm.mbarrier.arrive.noComplete.shared" => "__nvvm_mbarrier_arrive_noComplete_shared", + "llvm.nvvm.mbarrier.arrive.shared" => "__nvvm_mbarrier_arrive_shared", + "llvm.nvvm.mbarrier.init" => "__nvvm_mbarrier_init", + "llvm.nvvm.mbarrier.init.shared" => "__nvvm_mbarrier_init_shared", + "llvm.nvvm.mbarrier.inval" => "__nvvm_mbarrier_inval", + "llvm.nvvm.mbarrier.inval.shared" => "__nvvm_mbarrier_inval_shared", + "llvm.nvvm.mbarrier.pending.count" => "__nvvm_mbarrier_pending_count", + "llvm.nvvm.mbarrier.test.wait" => "__nvvm_mbarrier_test_wait", + "llvm.nvvm.mbarrier.test.wait.shared" => "__nvvm_mbarrier_test_wait_shared", "llvm.nvvm.membar.cta" => "__nvvm_membar_cta", "llvm.nvvm.membar.gl" => "__nvvm_membar_gl", "llvm.nvvm.membar.sys" => "__nvvm_membar_sys", @@ -2490,10 +3245,13 @@ match name { "llvm.nvvm.mulhi.ll" => "__nvvm_mulhi_ll", "llvm.nvvm.mulhi.ui" => "__nvvm_mulhi_ui", "llvm.nvvm.mulhi.ull" => "__nvvm_mulhi_ull", + "llvm.nvvm.neg.bf16" => "__nvvm_neg_bf16", + "llvm.nvvm.neg.bf16x2" => "__nvvm_neg_bf16x2", "llvm.nvvm.popc.i" => "__nvvm_popc_i", "llvm.nvvm.popc.ll" => "__nvvm_popc_ll", "llvm.nvvm.prmt" => "__nvvm_prmt", "llvm.nvvm.rcp.approx.ftz.d" => "__nvvm_rcp_approx_ftz_d", + "llvm.nvvm.rcp.approx.ftz.f" => "__nvvm_rcp_approx_ftz_f", "llvm.nvvm.rcp.rm.d" => "__nvvm_rcp_rm_d", "llvm.nvvm.rcp.rm.f" => "__nvvm_rcp_rm_f", "llvm.nvvm.rcp.rm.ftz.f" => "__nvvm_rcp_rm_ftz_f", @@ -2506,8 +3264,11 @@ match name { "llvm.nvvm.rcp.rz.d" => "__nvvm_rcp_rz_d", "llvm.nvvm.rcp.rz.f" => "__nvvm_rcp_rz_f", "llvm.nvvm.rcp.rz.ftz.f" => "__nvvm_rcp_rz_ftz_f", - "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_clock", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_clock64", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.ctaid.w" => "__nvvm_read_ptx_sreg_ctaid_w", "llvm.nvvm.read.ptx.sreg.ctaid.x" => "__nvvm_read_ptx_sreg_ctaid_x", "llvm.nvvm.read.ptx.sreg.ctaid.y" => "__nvvm_read_ptx_sreg_ctaid_y", "llvm.nvvm.read.ptx.sreg.ctaid.z" => "__nvvm_read_ptx_sreg_ctaid_z", @@ -2543,32 +3304,58 @@ match name { "llvm.nvvm.read.ptx.sreg.envreg7" => "__nvvm_read_ptx_sreg_envreg7", "llvm.nvvm.read.ptx.sreg.envreg8" => "__nvvm_read_ptx_sreg_envreg8", "llvm.nvvm.read.ptx.sreg.envreg9" => "__nvvm_read_ptx_sreg_envreg9", - "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_gridid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_laneid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_lanemask_eq", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_lanemask_ge", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_lanemask_gt", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_lanemask_le", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_lanemask_lt", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nctaid.w" => "__nvvm_read_ptx_sreg_nctaid_w", "llvm.nvvm.read.ptx.sreg.nctaid.x" => "__nvvm_read_ptx_sreg_nctaid_x", "llvm.nvvm.read.ptx.sreg.nctaid.y" => "__nvvm_read_ptx_sreg_nctaid_y", "llvm.nvvm.read.ptx.sreg.nctaid.z" => "__nvvm_read_ptx_sreg_nctaid_z", - "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_nsmid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.ntid.w" => "__nvvm_read_ptx_sreg_ntid_w", "llvm.nvvm.read.ptx.sreg.ntid.x" => "__nvvm_read_ptx_sreg_ntid_x", "llvm.nvvm.read.ptx.sreg.ntid.y" => "__nvvm_read_ptx_sreg_ntid_y", "llvm.nvvm.read.ptx.sreg.ntid.z" => "__nvvm_read_ptx_sreg_ntid_z", - "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_", - "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_nwarpid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_pm0", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_pm1", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_pm2", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_pm3", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_smid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.tid.w" => "__nvvm_read_ptx_sreg_tid_w", "llvm.nvvm.read.ptx.sreg.tid.x" => "__nvvm_read_ptx_sreg_tid_x", "llvm.nvvm.read.ptx.sreg.tid.y" => "__nvvm_read_ptx_sreg_tid_y", "llvm.nvvm.read.ptx.sreg.tid.z" => "__nvvm_read_ptx_sreg_tid_z", - "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_warpid", + // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_", "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_warpsize", // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_", + "llvm.nvvm.redux.sync.add" => "__nvvm_redux_sync_add", + "llvm.nvvm.redux.sync.and" => "__nvvm_redux_sync_and", + "llvm.nvvm.redux.sync.max" => "__nvvm_redux_sync_max", + "llvm.nvvm.redux.sync.min" => "__nvvm_redux_sync_min", + "llvm.nvvm.redux.sync.or" => "__nvvm_redux_sync_or", + "llvm.nvvm.redux.sync.umax" => "__nvvm_redux_sync_umax", + "llvm.nvvm.redux.sync.umin" => "__nvvm_redux_sync_umin", + "llvm.nvvm.redux.sync.xor" => "__nvvm_redux_sync_xor", "llvm.nvvm.rotate.b32" => "__nvvm_rotate_b32", "llvm.nvvm.rotate.b64" => "__nvvm_rotate_b64", "llvm.nvvm.rotate.right.b64" => "__nvvm_rotate_right_b64", @@ -2589,6 +3376,14 @@ match name { "llvm.nvvm.shfl.down.i32" => "__nvvm_shfl_down_i32", "llvm.nvvm.shfl.idx.f32" => "__nvvm_shfl_idx_f32", "llvm.nvvm.shfl.idx.i32" => "__nvvm_shfl_idx_i32", + "llvm.nvvm.shfl.sync.bfly.f32" => "__nvvm_shfl_sync_bfly_f32", + "llvm.nvvm.shfl.sync.bfly.i32" => "__nvvm_shfl_sync_bfly_i32", + "llvm.nvvm.shfl.sync.down.f32" => "__nvvm_shfl_sync_down_f32", + "llvm.nvvm.shfl.sync.down.i32" => "__nvvm_shfl_sync_down_i32", + "llvm.nvvm.shfl.sync.idx.f32" => "__nvvm_shfl_sync_idx_f32", + "llvm.nvvm.shfl.sync.idx.i32" => "__nvvm_shfl_sync_idx_i32", + "llvm.nvvm.shfl.sync.up.f32" => "__nvvm_shfl_sync_up_f32", + "llvm.nvvm.shfl.sync.up.i32" => "__nvvm_shfl_sync_up_i32", "llvm.nvvm.shfl.up.f32" => "__nvvm_shfl_up_f32", "llvm.nvvm.shfl.up.i32" => "__nvvm_shfl_up_i32", "llvm.nvvm.sin.approx.f" => "__nvvm_sin_approx_f", @@ -2852,6 +3647,14 @@ match name { "llvm.nvvm.ull2f.rn" => "__nvvm_ull2f_rn", "llvm.nvvm.ull2f.rp" => "__nvvm_ull2f_rp", "llvm.nvvm.ull2f.rz" => "__nvvm_ull2f_rz", + "llvm.nvvm.vote.all" => "__nvvm_vote_all", + "llvm.nvvm.vote.all.sync" => "__nvvm_vote_all_sync", + "llvm.nvvm.vote.any" => "__nvvm_vote_any", + "llvm.nvvm.vote.any.sync" => "__nvvm_vote_any_sync", + "llvm.nvvm.vote.ballot" => "__nvvm_vote_ballot", + "llvm.nvvm.vote.ballot.sync" => "__nvvm_vote_ballot_sync", + "llvm.nvvm.vote.uni" => "__nvvm_vote_uni", + "llvm.nvvm.vote.uni.sync" => "__nvvm_vote_uni_sync", // ppc "llvm.ppc.addex" => "__builtin_ppc_addex", "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd", @@ -2881,6 +3684,10 @@ match name { "llvm.ppc.altivec.mtvsrhm" => "__builtin_altivec_mtvsrhm", "llvm.ppc.altivec.mtvsrqm" => "__builtin_altivec_mtvsrqm", "llvm.ppc.altivec.mtvsrwm" => "__builtin_altivec_mtvsrwm", + "llvm.ppc.altivec.vabsdub" => "__builtin_altivec_vabsdub", + "llvm.ppc.altivec.vabsduh" => "__builtin_altivec_vabsduh", + "llvm.ppc.altivec.vabsduw" => "__builtin_altivec_vabsduw", + "llvm.ppc.altivec.vaddcuq" => "__builtin_altivec_vaddcuq", "llvm.ppc.altivec.vaddcuw" => "__builtin_altivec_vaddcuw", "llvm.ppc.altivec.vaddecuq" => "__builtin_altivec_vaddecuq", "llvm.ppc.altivec.vaddeuqm" => "__builtin_altivec_vaddeuqm", @@ -2963,6 +3770,12 @@ match name { "llvm.ppc.altivec.vctuxs" => "__builtin_altivec_vctuxs", "llvm.ppc.altivec.vctzdm" => "__builtin_altivec_vctzdm", "llvm.ppc.altivec.vctzlsbb" => "__builtin_altivec_vctzlsbb", + "llvm.ppc.altivec.vdivesd" => "__builtin_altivec_vdivesd", + "llvm.ppc.altivec.vdivesq" => "__builtin_altivec_vdivesq", + "llvm.ppc.altivec.vdivesw" => "__builtin_altivec_vdivesw", + "llvm.ppc.altivec.vdiveud" => "__builtin_altivec_vdiveud", + "llvm.ppc.altivec.vdiveuq" => "__builtin_altivec_vdiveuq", + "llvm.ppc.altivec.vdiveuw" => "__builtin_altivec_vdiveuw", "llvm.ppc.altivec.vexpandbm" => "__builtin_altivec_vexpandbm", "llvm.ppc.altivec.vexpanddm" => "__builtin_altivec_vexpanddm", "llvm.ppc.altivec.vexpandhm" => "__builtin_altivec_vexpandhm", @@ -3036,15 +3849,23 @@ match name { "llvm.ppc.altivec.vmsumuhm" => "__builtin_altivec_vmsumuhm", "llvm.ppc.altivec.vmsumuhs" => "__builtin_altivec_vmsumuhs", "llvm.ppc.altivec.vmulesb" => "__builtin_altivec_vmulesb", + "llvm.ppc.altivec.vmulesd" => "__builtin_altivec_vmulesd", "llvm.ppc.altivec.vmulesh" => "__builtin_altivec_vmulesh", "llvm.ppc.altivec.vmulesw" => "__builtin_altivec_vmulesw", "llvm.ppc.altivec.vmuleub" => "__builtin_altivec_vmuleub", + "llvm.ppc.altivec.vmuleud" => "__builtin_altivec_vmuleud", "llvm.ppc.altivec.vmuleuh" => "__builtin_altivec_vmuleuh", "llvm.ppc.altivec.vmuleuw" => "__builtin_altivec_vmuleuw", + "llvm.ppc.altivec.vmulhsd" => "__builtin_altivec_vmulhsd", + "llvm.ppc.altivec.vmulhsw" => "__builtin_altivec_vmulhsw", + "llvm.ppc.altivec.vmulhud" => "__builtin_altivec_vmulhud", + "llvm.ppc.altivec.vmulhuw" => "__builtin_altivec_vmulhuw", "llvm.ppc.altivec.vmulosb" => "__builtin_altivec_vmulosb", + "llvm.ppc.altivec.vmulosd" => "__builtin_altivec_vmulosd", "llvm.ppc.altivec.vmulosh" => "__builtin_altivec_vmulosh", "llvm.ppc.altivec.vmulosw" => "__builtin_altivec_vmulosw", "llvm.ppc.altivec.vmuloub" => "__builtin_altivec_vmuloub", + "llvm.ppc.altivec.vmuloud" => "__builtin_altivec_vmuloud", "llvm.ppc.altivec.vmulouh" => "__builtin_altivec_vmulouh", "llvm.ppc.altivec.vmulouw" => "__builtin_altivec_vmulouw", "llvm.ppc.altivec.vnmsubfp" => "__builtin_altivec_vnmsubfp", @@ -3071,8 +3892,14 @@ match name { "llvm.ppc.altivec.vrfiz" => "__builtin_altivec_vrfiz", "llvm.ppc.altivec.vrlb" => "__builtin_altivec_vrlb", "llvm.ppc.altivec.vrld" => "__builtin_altivec_vrld", + "llvm.ppc.altivec.vrldmi" => "__builtin_altivec_vrldmi", + "llvm.ppc.altivec.vrldnm" => "__builtin_altivec_vrldnm", "llvm.ppc.altivec.vrlh" => "__builtin_altivec_vrlh", + "llvm.ppc.altivec.vrlqmi" => "__builtin_altivec_vrlqmi", + "llvm.ppc.altivec.vrlqnm" => "__builtin_altivec_vrlqnm", "llvm.ppc.altivec.vrlw" => "__builtin_altivec_vrlw", + "llvm.ppc.altivec.vrlwmi" => "__builtin_altivec_vrlwmi", + "llvm.ppc.altivec.vrlwnm" => "__builtin_altivec_vrlwnm", "llvm.ppc.altivec.vrsqrtefp" => "__builtin_altivec_vrsqrtefp", "llvm.ppc.altivec.vsel" => "__builtin_altivec_vsel_4si", "llvm.ppc.altivec.vsl" => "__builtin_altivec_vsl", @@ -3080,6 +3907,7 @@ match name { "llvm.ppc.altivec.vsldbi" => "__builtin_altivec_vsldbi", "llvm.ppc.altivec.vslh" => "__builtin_altivec_vslh", "llvm.ppc.altivec.vslo" => "__builtin_altivec_vslo", + "llvm.ppc.altivec.vslv" => "__builtin_altivec_vslv", "llvm.ppc.altivec.vslw" => "__builtin_altivec_vslw", "llvm.ppc.altivec.vsr" => "__builtin_altivec_vsr", "llvm.ppc.altivec.vsrab" => "__builtin_altivec_vsrab", @@ -3089,6 +3917,7 @@ match name { "llvm.ppc.altivec.vsrdbi" => "__builtin_altivec_vsrdbi", "llvm.ppc.altivec.vsrh" => "__builtin_altivec_vsrh", "llvm.ppc.altivec.vsro" => "__builtin_altivec_vsro", + "llvm.ppc.altivec.vsrv" => "__builtin_altivec_vsrv", "llvm.ppc.altivec.vsrw" => "__builtin_altivec_vsrw", "llvm.ppc.altivec.vstribl" => "__builtin_altivec_vstribl", "llvm.ppc.altivec.vstribl.p" => "__builtin_altivec_vstribl_p", @@ -3098,6 +3927,7 @@ match name { "llvm.ppc.altivec.vstrihl.p" => "__builtin_altivec_vstrihl_p", "llvm.ppc.altivec.vstrihr" => "__builtin_altivec_vstrihr", "llvm.ppc.altivec.vstrihr.p" => "__builtin_altivec_vstrihr_p", + "llvm.ppc.altivec.vsubcuq" => "__builtin_altivec_vsubcuq", "llvm.ppc.altivec.vsubcuw" => "__builtin_altivec_vsubcuw", "llvm.ppc.altivec.vsubecuq" => "__builtin_altivec_vsubecuq", "llvm.ppc.altivec.vsubeuqm" => "__builtin_altivec_vsubeuqm", @@ -3165,6 +3995,8 @@ match name { "llvm.ppc.fmaf128.round.to.odd" => "__builtin_fmaf128_round_to_odd", "llvm.ppc.fmsub" => "__builtin_ppc_fmsub", "llvm.ppc.fmsubs" => "__builtin_ppc_fmsubs", + "llvm.ppc.fnabs" => "__builtin_ppc_fnabs", + "llvm.ppc.fnabss" => "__builtin_ppc_fnabss", "llvm.ppc.fnmadd" => "__builtin_ppc_fnmadd", "llvm.ppc.fnmadds" => "__builtin_ppc_fnmadds", "llvm.ppc.fre" => "__builtin_ppc_fre", @@ -3341,8 +4173,24 @@ match name { "llvm.ppc.vsx.xvcmpgtdp.p" => "__builtin_vsx_xvcmpgtdp_p", "llvm.ppc.vsx.xvcmpgtsp" => "__builtin_vsx_xvcmpgtsp", "llvm.ppc.vsx.xvcmpgtsp.p" => "__builtin_vsx_xvcmpgtsp_p", + "llvm.ppc.vsx.xvcvbf16spn" => "__builtin_vsx_xvcvbf16spn", + "llvm.ppc.vsx.xvcvdpsp" => "__builtin_vsx_xvcvdpsp", + "llvm.ppc.vsx.xvcvdpsxws" => "__builtin_vsx_xvcvdpsxws", + "llvm.ppc.vsx.xvcvdpuxws" => "__builtin_vsx_xvcvdpuxws", + "llvm.ppc.vsx.xvcvhpsp" => "__builtin_vsx_xvcvhpsp", + "llvm.ppc.vsx.xvcvspbf16" => "__builtin_vsx_xvcvspbf16", + "llvm.ppc.vsx.xvcvspdp" => "__builtin_vsx_xvcvspdp", + "llvm.ppc.vsx.xvcvsphp" => "__builtin_vsx_xvcvsphp", + "llvm.ppc.vsx.xvcvspsxds" => "__builtin_vsx_xvcvspsxds", + "llvm.ppc.vsx.xvcvspuxds" => "__builtin_vsx_xvcvspuxds", + "llvm.ppc.vsx.xvcvsxdsp" => "__builtin_vsx_xvcvsxdsp", + "llvm.ppc.vsx.xvcvsxwdp" => "__builtin_vsx_xvcvsxwdp", + "llvm.ppc.vsx.xvcvuxdsp" => "__builtin_vsx_xvcvuxdsp", + "llvm.ppc.vsx.xvcvuxwdp" => "__builtin_vsx_xvcvuxwdp", "llvm.ppc.vsx.xvdivdp" => "__builtin_vsx_xvdivdp", "llvm.ppc.vsx.xvdivsp" => "__builtin_vsx_xvdivsp", + "llvm.ppc.vsx.xviexpdp" => "__builtin_vsx_xviexpdp", + "llvm.ppc.vsx.xviexpsp" => "__builtin_vsx_xviexpsp", "llvm.ppc.vsx.xvmaxdp" => "__builtin_vsx_xvmaxdp", "llvm.ppc.vsx.xvmaxsp" => "__builtin_vsx_xvmaxsp", "llvm.ppc.vsx.xvmindp" => "__builtin_vsx_xvmindp", @@ -3351,10 +4199,28 @@ match name { "llvm.ppc.vsx.xvresp" => "__builtin_vsx_xvresp", "llvm.ppc.vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp", "llvm.ppc.vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp", + "llvm.ppc.vsx.xvtdivdp" => "__builtin_vsx_xvtdivdp", + "llvm.ppc.vsx.xvtdivsp" => "__builtin_vsx_xvtdivsp", + "llvm.ppc.vsx.xvtlsbb" => "__builtin_vsx_xvtlsbb", + "llvm.ppc.vsx.xvtsqrtdp" => "__builtin_vsx_xvtsqrtdp", + "llvm.ppc.vsx.xvtsqrtsp" => "__builtin_vsx_xvtsqrtsp", + "llvm.ppc.vsx.xvtstdcdp" => "__builtin_vsx_xvtstdcdp", + "llvm.ppc.vsx.xvtstdcsp" => "__builtin_vsx_xvtstdcsp", + "llvm.ppc.vsx.xvxexpdp" => "__builtin_vsx_xvxexpdp", + "llvm.ppc.vsx.xvxexpsp" => "__builtin_vsx_xvxexpsp", + "llvm.ppc.vsx.xvxsigdp" => "__builtin_vsx_xvxsigdp", + "llvm.ppc.vsx.xvxsigsp" => "__builtin_vsx_xvxsigsp", "llvm.ppc.vsx.xxblendvb" => "__builtin_vsx_xxblendvb", "llvm.ppc.vsx.xxblendvd" => "__builtin_vsx_xxblendvd", "llvm.ppc.vsx.xxblendvh" => "__builtin_vsx_xxblendvh", "llvm.ppc.vsx.xxblendvw" => "__builtin_vsx_xxblendvw", + "llvm.ppc.vsx.xxeval" => "__builtin_vsx_xxeval", + "llvm.ppc.vsx.xxextractuw" => "__builtin_vsx_xxextractuw", + "llvm.ppc.vsx.xxgenpcvbm" => "__builtin_vsx_xxgenpcvbm", + "llvm.ppc.vsx.xxgenpcvdm" => "__builtin_vsx_xxgenpcvdm", + "llvm.ppc.vsx.xxgenpcvhm" => "__builtin_vsx_xxgenpcvhm", + "llvm.ppc.vsx.xxgenpcvwm" => "__builtin_vsx_xxgenpcvwm", + "llvm.ppc.vsx.xxinsertw" => "__builtin_vsx_xxinsertw", "llvm.ppc.vsx.xxleqv" => "__builtin_vsx_xxleqv", "llvm.ppc.vsx.xxpermx" => "__builtin_vsx_xxpermx", // ptx @@ -3376,6 +4242,19 @@ match name { "llvm.ptx.read.pm3" => "__builtin_ptx_read_pm3", "llvm.ptx.read.smid" => "__builtin_ptx_read_smid", "llvm.ptx.read.warpid" => "__builtin_ptx_read_warpid", + // r600 + "llvm.r600.group.barrier" => "__builtin_r600_group_barrier", + "llvm.r600.implicitarg.ptr" => "__builtin_r600_implicitarg_ptr", + "llvm.r600.rat.store.typed" => "__builtin_r600_rat_store_typed", + "llvm.r600.read.global.size.x" => "__builtin_r600_read_global_size_x", + "llvm.r600.read.global.size.y" => "__builtin_r600_read_global_size_y", + "llvm.r600.read.global.size.z" => "__builtin_r600_read_global_size_z", + "llvm.r600.read.ngroups.x" => "__builtin_r600_read_ngroups_x", + "llvm.r600.read.ngroups.y" => "__builtin_r600_read_ngroups_y", + "llvm.r600.read.ngroups.z" => "__builtin_r600_read_ngroups_z", + "llvm.r600.read.tgid.x" => "__builtin_r600_read_tgid_x", + "llvm.r600.read.tgid.y" => "__builtin_r600_read_tgid_y", + "llvm.r600.read.tgid.z" => "__builtin_r600_read_tgid_z", // s390 "llvm.s390.efpc" => "__builtin_s390_efpc", "llvm.s390.etnd" => "__builtin_tx_nesting_depth", @@ -3383,29 +4262,1426 @@ match name { "llvm.s390.ppa.txassist" => "__builtin_tx_assist", "llvm.s390.sfpc" => "__builtin_s390_sfpc", "llvm.s390.tend" => "__builtin_tend", + "llvm.s390.vaccb" => "__builtin_s390_vaccb", + "llvm.s390.vacccq" => "__builtin_s390_vacccq", + "llvm.s390.vaccf" => "__builtin_s390_vaccf", + "llvm.s390.vaccg" => "__builtin_s390_vaccg", + "llvm.s390.vacch" => "__builtin_s390_vacch", + "llvm.s390.vaccq" => "__builtin_s390_vaccq", + "llvm.s390.vacq" => "__builtin_s390_vacq", + "llvm.s390.vaq" => "__builtin_s390_vaq", + "llvm.s390.vavgb" => "__builtin_s390_vavgb", + "llvm.s390.vavgf" => "__builtin_s390_vavgf", + "llvm.s390.vavgg" => "__builtin_s390_vavgg", + "llvm.s390.vavgh" => "__builtin_s390_vavgh", + "llvm.s390.vavglb" => "__builtin_s390_vavglb", + "llvm.s390.vavglf" => "__builtin_s390_vavglf", + "llvm.s390.vavglg" => "__builtin_s390_vavglg", + "llvm.s390.vavglh" => "__builtin_s390_vavglh", + "llvm.s390.vbperm" => "__builtin_s390_vbperm", "llvm.s390.vcfn" => "__builtin_s390_vcfn", + "llvm.s390.vcksm" => "__builtin_s390_vcksm", "llvm.s390.vclfnhs" => "__builtin_s390_vclfnhs", "llvm.s390.vclfnls" => "__builtin_s390_vclfnls", "llvm.s390.vcnf" => "__builtin_s390_vcnf", "llvm.s390.vcrnfs" => "__builtin_s390_vcrnfs", + "llvm.s390.verimb" => "__builtin_s390_verimb", + "llvm.s390.verimf" => "__builtin_s390_verimf", + "llvm.s390.verimg" => "__builtin_s390_verimg", + "llvm.s390.verimh" => "__builtin_s390_verimh", + "llvm.s390.verllb" => "__builtin_s390_verllb", + "llvm.s390.verllf" => "__builtin_s390_verllf", + "llvm.s390.verllg" => "__builtin_s390_verllg", + "llvm.s390.verllh" => "__builtin_s390_verllh", + "llvm.s390.verllvb" => "__builtin_s390_verllvb", + "llvm.s390.verllvf" => "__builtin_s390_verllvf", + "llvm.s390.verllvg" => "__builtin_s390_verllvg", + "llvm.s390.verllvh" => "__builtin_s390_verllvh", + "llvm.s390.vfaeb" => "__builtin_s390_vfaeb", + "llvm.s390.vfaef" => "__builtin_s390_vfaef", + "llvm.s390.vfaeh" => "__builtin_s390_vfaeh", + "llvm.s390.vfaezb" => "__builtin_s390_vfaezb", + "llvm.s390.vfaezf" => "__builtin_s390_vfaezf", + "llvm.s390.vfaezh" => "__builtin_s390_vfaezh", + "llvm.s390.vfeeb" => "__builtin_s390_vfeeb", + "llvm.s390.vfeef" => "__builtin_s390_vfeef", + "llvm.s390.vfeeh" => "__builtin_s390_vfeeh", + "llvm.s390.vfeezb" => "__builtin_s390_vfeezb", + "llvm.s390.vfeezf" => "__builtin_s390_vfeezf", + "llvm.s390.vfeezh" => "__builtin_s390_vfeezh", + "llvm.s390.vfeneb" => "__builtin_s390_vfeneb", + "llvm.s390.vfenef" => "__builtin_s390_vfenef", + "llvm.s390.vfeneh" => "__builtin_s390_vfeneh", + "llvm.s390.vfenezb" => "__builtin_s390_vfenezb", + "llvm.s390.vfenezf" => "__builtin_s390_vfenezf", + "llvm.s390.vfenezh" => "__builtin_s390_vfenezh", + "llvm.s390.vgfmab" => "__builtin_s390_vgfmab", + "llvm.s390.vgfmaf" => "__builtin_s390_vgfmaf", + "llvm.s390.vgfmag" => "__builtin_s390_vgfmag", + "llvm.s390.vgfmah" => "__builtin_s390_vgfmah", + "llvm.s390.vgfmb" => "__builtin_s390_vgfmb", + "llvm.s390.vgfmf" => "__builtin_s390_vgfmf", + "llvm.s390.vgfmg" => "__builtin_s390_vgfmg", + "llvm.s390.vgfmh" => "__builtin_s390_vgfmh", + "llvm.s390.vistrb" => "__builtin_s390_vistrb", + "llvm.s390.vistrf" => "__builtin_s390_vistrf", + "llvm.s390.vistrh" => "__builtin_s390_vistrh", "llvm.s390.vlbb" => "__builtin_s390_vlbb", "llvm.s390.vll" => "__builtin_s390_vll", "llvm.s390.vlrl" => "__builtin_s390_vlrl", + "llvm.s390.vmaeb" => "__builtin_s390_vmaeb", + "llvm.s390.vmaef" => "__builtin_s390_vmaef", + "llvm.s390.vmaeh" => "__builtin_s390_vmaeh", + "llvm.s390.vmahb" => "__builtin_s390_vmahb", + "llvm.s390.vmahf" => "__builtin_s390_vmahf", + "llvm.s390.vmahh" => "__builtin_s390_vmahh", + "llvm.s390.vmaleb" => "__builtin_s390_vmaleb", + "llvm.s390.vmalef" => "__builtin_s390_vmalef", + "llvm.s390.vmaleh" => "__builtin_s390_vmaleh", + "llvm.s390.vmalhb" => "__builtin_s390_vmalhb", + "llvm.s390.vmalhf" => "__builtin_s390_vmalhf", + "llvm.s390.vmalhh" => "__builtin_s390_vmalhh", + "llvm.s390.vmalob" => "__builtin_s390_vmalob", + "llvm.s390.vmalof" => "__builtin_s390_vmalof", + "llvm.s390.vmaloh" => "__builtin_s390_vmaloh", + "llvm.s390.vmaob" => "__builtin_s390_vmaob", + "llvm.s390.vmaof" => "__builtin_s390_vmaof", + "llvm.s390.vmaoh" => "__builtin_s390_vmaoh", + "llvm.s390.vmeb" => "__builtin_s390_vmeb", + "llvm.s390.vmef" => "__builtin_s390_vmef", + "llvm.s390.vmeh" => "__builtin_s390_vmeh", + "llvm.s390.vmhb" => "__builtin_s390_vmhb", + "llvm.s390.vmhf" => "__builtin_s390_vmhf", + "llvm.s390.vmhh" => "__builtin_s390_vmhh", + "llvm.s390.vmleb" => "__builtin_s390_vmleb", + "llvm.s390.vmlef" => "__builtin_s390_vmlef", + "llvm.s390.vmleh" => "__builtin_s390_vmleh", + "llvm.s390.vmlhb" => "__builtin_s390_vmlhb", + "llvm.s390.vmlhf" => "__builtin_s390_vmlhf", + "llvm.s390.vmlhh" => "__builtin_s390_vmlhh", + "llvm.s390.vmlob" => "__builtin_s390_vmlob", + "llvm.s390.vmlof" => "__builtin_s390_vmlof", + "llvm.s390.vmloh" => "__builtin_s390_vmloh", + "llvm.s390.vmob" => "__builtin_s390_vmob", + "llvm.s390.vmof" => "__builtin_s390_vmof", + "llvm.s390.vmoh" => "__builtin_s390_vmoh", "llvm.s390.vmslg" => "__builtin_s390_vmslg", "llvm.s390.vpdi" => "__builtin_s390_vpdi", "llvm.s390.vperm" => "__builtin_s390_vperm", + "llvm.s390.vpklsf" => "__builtin_s390_vpklsf", + "llvm.s390.vpklsg" => "__builtin_s390_vpklsg", + "llvm.s390.vpklsh" => "__builtin_s390_vpklsh", + "llvm.s390.vpksf" => "__builtin_s390_vpksf", + "llvm.s390.vpksg" => "__builtin_s390_vpksg", + "llvm.s390.vpksh" => "__builtin_s390_vpksh", + "llvm.s390.vsbcbiq" => "__builtin_s390_vsbcbiq", + "llvm.s390.vsbiq" => "__builtin_s390_vsbiq", + "llvm.s390.vscbib" => "__builtin_s390_vscbib", + "llvm.s390.vscbif" => "__builtin_s390_vscbif", + "llvm.s390.vscbig" => "__builtin_s390_vscbig", + "llvm.s390.vscbih" => "__builtin_s390_vscbih", + "llvm.s390.vscbiq" => "__builtin_s390_vscbiq", + "llvm.s390.vsl" => "__builtin_s390_vsl", + "llvm.s390.vslb" => "__builtin_s390_vslb", "llvm.s390.vsld" => "__builtin_s390_vsld", "llvm.s390.vsldb" => "__builtin_s390_vsldb", + "llvm.s390.vsq" => "__builtin_s390_vsq", + "llvm.s390.vsra" => "__builtin_s390_vsra", + "llvm.s390.vsrab" => "__builtin_s390_vsrab", "llvm.s390.vsrd" => "__builtin_s390_vsrd", + "llvm.s390.vsrl" => "__builtin_s390_vsrl", + "llvm.s390.vsrlb" => "__builtin_s390_vsrlb", "llvm.s390.vstl" => "__builtin_s390_vstl", + "llvm.s390.vstrcb" => "__builtin_s390_vstrcb", + "llvm.s390.vstrcf" => "__builtin_s390_vstrcf", + "llvm.s390.vstrch" => "__builtin_s390_vstrch", + "llvm.s390.vstrczb" => "__builtin_s390_vstrczb", + "llvm.s390.vstrczf" => "__builtin_s390_vstrczf", + "llvm.s390.vstrczh" => "__builtin_s390_vstrczh", "llvm.s390.vstrl" => "__builtin_s390_vstrl", + "llvm.s390.vsumb" => "__builtin_s390_vsumb", + "llvm.s390.vsumgf" => "__builtin_s390_vsumgf", + "llvm.s390.vsumgh" => "__builtin_s390_vsumgh", + "llvm.s390.vsumh" => "__builtin_s390_vsumh", + "llvm.s390.vsumqf" => "__builtin_s390_vsumqf", + "llvm.s390.vsumqg" => "__builtin_s390_vsumqg", + "llvm.s390.vtm" => "__builtin_s390_vtm", + "llvm.s390.vuphb" => "__builtin_s390_vuphb", + "llvm.s390.vuphf" => "__builtin_s390_vuphf", + "llvm.s390.vuphh" => "__builtin_s390_vuphh", + "llvm.s390.vuplb" => "__builtin_s390_vuplb", + "llvm.s390.vuplf" => "__builtin_s390_vuplf", + "llvm.s390.vuplhb" => "__builtin_s390_vuplhb", + "llvm.s390.vuplhf" => "__builtin_s390_vuplhf", + "llvm.s390.vuplhh" => "__builtin_s390_vuplhh", + "llvm.s390.vuplhw" => "__builtin_s390_vuplhw", + "llvm.s390.vupllb" => "__builtin_s390_vupllb", + "llvm.s390.vupllf" => "__builtin_s390_vupllf", + "llvm.s390.vupllh" => "__builtin_s390_vupllh", // ve + "llvm.ve.vl.andm.MMM" => "__builtin_ve_vl_andm_MMM", + "llvm.ve.vl.andm.mmm" => "__builtin_ve_vl_andm_mmm", + "llvm.ve.vl.eqvm.MMM" => "__builtin_ve_vl_eqvm_MMM", + "llvm.ve.vl.eqvm.mmm" => "__builtin_ve_vl_eqvm_mmm", "llvm.ve.vl.extract.vm512l" => "__builtin_ve_vl_extract_vm512l", "llvm.ve.vl.extract.vm512u" => "__builtin_ve_vl_extract_vm512u", + "llvm.ve.vl.fencec.s" => "__builtin_ve_vl_fencec_s", + "llvm.ve.vl.fencei" => "__builtin_ve_vl_fencei", + "llvm.ve.vl.fencem.s" => "__builtin_ve_vl_fencem_s", + "llvm.ve.vl.fidcr.sss" => "__builtin_ve_vl_fidcr_sss", "llvm.ve.vl.insert.vm512l" => "__builtin_ve_vl_insert_vm512l", "llvm.ve.vl.insert.vm512u" => "__builtin_ve_vl_insert_vm512u", + "llvm.ve.vl.lcr.sss" => "__builtin_ve_vl_lcr_sss", + "llvm.ve.vl.lsv.vvss" => "__builtin_ve_vl_lsv_vvss", + "llvm.ve.vl.lvm.MMss" => "__builtin_ve_vl_lvm_MMss", + "llvm.ve.vl.lvm.mmss" => "__builtin_ve_vl_lvm_mmss", + "llvm.ve.vl.lvsd.svs" => "__builtin_ve_vl_lvsd_svs", + "llvm.ve.vl.lvsl.svs" => "__builtin_ve_vl_lvsl_svs", + "llvm.ve.vl.lvss.svs" => "__builtin_ve_vl_lvss_svs", + "llvm.ve.vl.lzvm.sml" => "__builtin_ve_vl_lzvm_sml", + "llvm.ve.vl.negm.MM" => "__builtin_ve_vl_negm_MM", + "llvm.ve.vl.negm.mm" => "__builtin_ve_vl_negm_mm", + "llvm.ve.vl.nndm.MMM" => "__builtin_ve_vl_nndm_MMM", + "llvm.ve.vl.nndm.mmm" => "__builtin_ve_vl_nndm_mmm", + "llvm.ve.vl.orm.MMM" => "__builtin_ve_vl_orm_MMM", + "llvm.ve.vl.orm.mmm" => "__builtin_ve_vl_orm_mmm", "llvm.ve.vl.pack.f32a" => "__builtin_ve_vl_pack_f32a", "llvm.ve.vl.pack.f32p" => "__builtin_ve_vl_pack_f32p", + "llvm.ve.vl.pcvm.sml" => "__builtin_ve_vl_pcvm_sml", + "llvm.ve.vl.pfchv.ssl" => "__builtin_ve_vl_pfchv_ssl", + "llvm.ve.vl.pfchvnc.ssl" => "__builtin_ve_vl_pfchvnc_ssl", + "llvm.ve.vl.pvadds.vsvMvl" => "__builtin_ve_vl_pvadds_vsvMvl", + "llvm.ve.vl.pvadds.vsvl" => "__builtin_ve_vl_pvadds_vsvl", + "llvm.ve.vl.pvadds.vsvvl" => "__builtin_ve_vl_pvadds_vsvvl", + "llvm.ve.vl.pvadds.vvvMvl" => "__builtin_ve_vl_pvadds_vvvMvl", + "llvm.ve.vl.pvadds.vvvl" => "__builtin_ve_vl_pvadds_vvvl", + "llvm.ve.vl.pvadds.vvvvl" => "__builtin_ve_vl_pvadds_vvvvl", + "llvm.ve.vl.pvaddu.vsvMvl" => "__builtin_ve_vl_pvaddu_vsvMvl", + "llvm.ve.vl.pvaddu.vsvl" => "__builtin_ve_vl_pvaddu_vsvl", + "llvm.ve.vl.pvaddu.vsvvl" => "__builtin_ve_vl_pvaddu_vsvvl", + "llvm.ve.vl.pvaddu.vvvMvl" => "__builtin_ve_vl_pvaddu_vvvMvl", + "llvm.ve.vl.pvaddu.vvvl" => "__builtin_ve_vl_pvaddu_vvvl", + "llvm.ve.vl.pvaddu.vvvvl" => "__builtin_ve_vl_pvaddu_vvvvl", + "llvm.ve.vl.pvand.vsvMvl" => "__builtin_ve_vl_pvand_vsvMvl", + "llvm.ve.vl.pvand.vsvl" => "__builtin_ve_vl_pvand_vsvl", + "llvm.ve.vl.pvand.vsvvl" => "__builtin_ve_vl_pvand_vsvvl", + "llvm.ve.vl.pvand.vvvMvl" => "__builtin_ve_vl_pvand_vvvMvl", + "llvm.ve.vl.pvand.vvvl" => "__builtin_ve_vl_pvand_vvvl", + "llvm.ve.vl.pvand.vvvvl" => "__builtin_ve_vl_pvand_vvvvl", + "llvm.ve.vl.pvbrd.vsMvl" => "__builtin_ve_vl_pvbrd_vsMvl", + "llvm.ve.vl.pvbrd.vsl" => "__builtin_ve_vl_pvbrd_vsl", + "llvm.ve.vl.pvbrd.vsvl" => "__builtin_ve_vl_pvbrd_vsvl", + "llvm.ve.vl.pvbrv.vvMvl" => "__builtin_ve_vl_pvbrv_vvMvl", + "llvm.ve.vl.pvbrv.vvl" => "__builtin_ve_vl_pvbrv_vvl", + "llvm.ve.vl.pvbrv.vvvl" => "__builtin_ve_vl_pvbrv_vvvl", + "llvm.ve.vl.pvbrvlo.vvl" => "__builtin_ve_vl_pvbrvlo_vvl", + "llvm.ve.vl.pvbrvlo.vvmvl" => "__builtin_ve_vl_pvbrvlo_vvmvl", + "llvm.ve.vl.pvbrvlo.vvvl" => "__builtin_ve_vl_pvbrvlo_vvvl", + "llvm.ve.vl.pvbrvup.vvl" => "__builtin_ve_vl_pvbrvup_vvl", + "llvm.ve.vl.pvbrvup.vvmvl" => "__builtin_ve_vl_pvbrvup_vvmvl", + "llvm.ve.vl.pvbrvup.vvvl" => "__builtin_ve_vl_pvbrvup_vvvl", + "llvm.ve.vl.pvcmps.vsvMvl" => "__builtin_ve_vl_pvcmps_vsvMvl", + "llvm.ve.vl.pvcmps.vsvl" => "__builtin_ve_vl_pvcmps_vsvl", + "llvm.ve.vl.pvcmps.vsvvl" => "__builtin_ve_vl_pvcmps_vsvvl", + "llvm.ve.vl.pvcmps.vvvMvl" => "__builtin_ve_vl_pvcmps_vvvMvl", + "llvm.ve.vl.pvcmps.vvvl" => "__builtin_ve_vl_pvcmps_vvvl", + "llvm.ve.vl.pvcmps.vvvvl" => "__builtin_ve_vl_pvcmps_vvvvl", + "llvm.ve.vl.pvcmpu.vsvMvl" => "__builtin_ve_vl_pvcmpu_vsvMvl", + "llvm.ve.vl.pvcmpu.vsvl" => "__builtin_ve_vl_pvcmpu_vsvl", + "llvm.ve.vl.pvcmpu.vsvvl" => "__builtin_ve_vl_pvcmpu_vsvvl", + "llvm.ve.vl.pvcmpu.vvvMvl" => "__builtin_ve_vl_pvcmpu_vvvMvl", + "llvm.ve.vl.pvcmpu.vvvl" => "__builtin_ve_vl_pvcmpu_vvvl", + "llvm.ve.vl.pvcmpu.vvvvl" => "__builtin_ve_vl_pvcmpu_vvvvl", + "llvm.ve.vl.pvcvtsw.vvl" => "__builtin_ve_vl_pvcvtsw_vvl", + "llvm.ve.vl.pvcvtsw.vvvl" => "__builtin_ve_vl_pvcvtsw_vvvl", + "llvm.ve.vl.pvcvtws.vvMvl" => "__builtin_ve_vl_pvcvtws_vvMvl", + "llvm.ve.vl.pvcvtws.vvl" => "__builtin_ve_vl_pvcvtws_vvl", + "llvm.ve.vl.pvcvtws.vvvl" => "__builtin_ve_vl_pvcvtws_vvvl", + "llvm.ve.vl.pvcvtwsrz.vvMvl" => "__builtin_ve_vl_pvcvtwsrz_vvMvl", + "llvm.ve.vl.pvcvtwsrz.vvl" => "__builtin_ve_vl_pvcvtwsrz_vvl", + "llvm.ve.vl.pvcvtwsrz.vvvl" => "__builtin_ve_vl_pvcvtwsrz_vvvl", + "llvm.ve.vl.pveqv.vsvMvl" => "__builtin_ve_vl_pveqv_vsvMvl", + "llvm.ve.vl.pveqv.vsvl" => "__builtin_ve_vl_pveqv_vsvl", + "llvm.ve.vl.pveqv.vsvvl" => "__builtin_ve_vl_pveqv_vsvvl", + "llvm.ve.vl.pveqv.vvvMvl" => "__builtin_ve_vl_pveqv_vvvMvl", + "llvm.ve.vl.pveqv.vvvl" => "__builtin_ve_vl_pveqv_vvvl", + "llvm.ve.vl.pveqv.vvvvl" => "__builtin_ve_vl_pveqv_vvvvl", + "llvm.ve.vl.pvfadd.vsvMvl" => "__builtin_ve_vl_pvfadd_vsvMvl", + "llvm.ve.vl.pvfadd.vsvl" => "__builtin_ve_vl_pvfadd_vsvl", + "llvm.ve.vl.pvfadd.vsvvl" => "__builtin_ve_vl_pvfadd_vsvvl", + "llvm.ve.vl.pvfadd.vvvMvl" => "__builtin_ve_vl_pvfadd_vvvMvl", + "llvm.ve.vl.pvfadd.vvvl" => "__builtin_ve_vl_pvfadd_vvvl", + "llvm.ve.vl.pvfadd.vvvvl" => "__builtin_ve_vl_pvfadd_vvvvl", + "llvm.ve.vl.pvfcmp.vsvMvl" => "__builtin_ve_vl_pvfcmp_vsvMvl", + "llvm.ve.vl.pvfcmp.vsvl" => "__builtin_ve_vl_pvfcmp_vsvl", + "llvm.ve.vl.pvfcmp.vsvvl" => "__builtin_ve_vl_pvfcmp_vsvvl", + "llvm.ve.vl.pvfcmp.vvvMvl" => "__builtin_ve_vl_pvfcmp_vvvMvl", + "llvm.ve.vl.pvfcmp.vvvl" => "__builtin_ve_vl_pvfcmp_vvvl", + "llvm.ve.vl.pvfcmp.vvvvl" => "__builtin_ve_vl_pvfcmp_vvvvl", + "llvm.ve.vl.pvfmad.vsvvMvl" => "__builtin_ve_vl_pvfmad_vsvvMvl", + "llvm.ve.vl.pvfmad.vsvvl" => "__builtin_ve_vl_pvfmad_vsvvl", + "llvm.ve.vl.pvfmad.vsvvvl" => "__builtin_ve_vl_pvfmad_vsvvvl", + "llvm.ve.vl.pvfmad.vvsvMvl" => "__builtin_ve_vl_pvfmad_vvsvMvl", + "llvm.ve.vl.pvfmad.vvsvl" => "__builtin_ve_vl_pvfmad_vvsvl", + "llvm.ve.vl.pvfmad.vvsvvl" => "__builtin_ve_vl_pvfmad_vvsvvl", + "llvm.ve.vl.pvfmad.vvvvMvl" => "__builtin_ve_vl_pvfmad_vvvvMvl", + "llvm.ve.vl.pvfmad.vvvvl" => "__builtin_ve_vl_pvfmad_vvvvl", + "llvm.ve.vl.pvfmad.vvvvvl" => "__builtin_ve_vl_pvfmad_vvvvvl", + "llvm.ve.vl.pvfmax.vsvMvl" => "__builtin_ve_vl_pvfmax_vsvMvl", + "llvm.ve.vl.pvfmax.vsvl" => "__builtin_ve_vl_pvfmax_vsvl", + "llvm.ve.vl.pvfmax.vsvvl" => "__builtin_ve_vl_pvfmax_vsvvl", + "llvm.ve.vl.pvfmax.vvvMvl" => "__builtin_ve_vl_pvfmax_vvvMvl", + "llvm.ve.vl.pvfmax.vvvl" => "__builtin_ve_vl_pvfmax_vvvl", + "llvm.ve.vl.pvfmax.vvvvl" => "__builtin_ve_vl_pvfmax_vvvvl", + "llvm.ve.vl.pvfmin.vsvMvl" => "__builtin_ve_vl_pvfmin_vsvMvl", + "llvm.ve.vl.pvfmin.vsvl" => "__builtin_ve_vl_pvfmin_vsvl", + "llvm.ve.vl.pvfmin.vsvvl" => "__builtin_ve_vl_pvfmin_vsvvl", + "llvm.ve.vl.pvfmin.vvvMvl" => "__builtin_ve_vl_pvfmin_vvvMvl", + "llvm.ve.vl.pvfmin.vvvl" => "__builtin_ve_vl_pvfmin_vvvl", + "llvm.ve.vl.pvfmin.vvvvl" => "__builtin_ve_vl_pvfmin_vvvvl", + "llvm.ve.vl.pvfmkaf.Ml" => "__builtin_ve_vl_pvfmkaf_Ml", + "llvm.ve.vl.pvfmkat.Ml" => "__builtin_ve_vl_pvfmkat_Ml", + "llvm.ve.vl.pvfmkseq.MvMl" => "__builtin_ve_vl_pvfmkseq_MvMl", + "llvm.ve.vl.pvfmkseq.Mvl" => "__builtin_ve_vl_pvfmkseq_Mvl", + "llvm.ve.vl.pvfmkseqnan.MvMl" => "__builtin_ve_vl_pvfmkseqnan_MvMl", + "llvm.ve.vl.pvfmkseqnan.Mvl" => "__builtin_ve_vl_pvfmkseqnan_Mvl", + "llvm.ve.vl.pvfmksge.MvMl" => "__builtin_ve_vl_pvfmksge_MvMl", + "llvm.ve.vl.pvfmksge.Mvl" => "__builtin_ve_vl_pvfmksge_Mvl", + "llvm.ve.vl.pvfmksgenan.MvMl" => "__builtin_ve_vl_pvfmksgenan_MvMl", + "llvm.ve.vl.pvfmksgenan.Mvl" => "__builtin_ve_vl_pvfmksgenan_Mvl", + "llvm.ve.vl.pvfmksgt.MvMl" => "__builtin_ve_vl_pvfmksgt_MvMl", + "llvm.ve.vl.pvfmksgt.Mvl" => "__builtin_ve_vl_pvfmksgt_Mvl", + "llvm.ve.vl.pvfmksgtnan.MvMl" => "__builtin_ve_vl_pvfmksgtnan_MvMl", + "llvm.ve.vl.pvfmksgtnan.Mvl" => "__builtin_ve_vl_pvfmksgtnan_Mvl", + "llvm.ve.vl.pvfmksle.MvMl" => "__builtin_ve_vl_pvfmksle_MvMl", + "llvm.ve.vl.pvfmksle.Mvl" => "__builtin_ve_vl_pvfmksle_Mvl", + "llvm.ve.vl.pvfmkslenan.MvMl" => "__builtin_ve_vl_pvfmkslenan_MvMl", + "llvm.ve.vl.pvfmkslenan.Mvl" => "__builtin_ve_vl_pvfmkslenan_Mvl", + "llvm.ve.vl.pvfmksloeq.mvl" => "__builtin_ve_vl_pvfmksloeq_mvl", + "llvm.ve.vl.pvfmksloeq.mvml" => "__builtin_ve_vl_pvfmksloeq_mvml", + "llvm.ve.vl.pvfmksloeqnan.mvl" => "__builtin_ve_vl_pvfmksloeqnan_mvl", + "llvm.ve.vl.pvfmksloeqnan.mvml" => "__builtin_ve_vl_pvfmksloeqnan_mvml", + "llvm.ve.vl.pvfmksloge.mvl" => "__builtin_ve_vl_pvfmksloge_mvl", + "llvm.ve.vl.pvfmksloge.mvml" => "__builtin_ve_vl_pvfmksloge_mvml", + "llvm.ve.vl.pvfmkslogenan.mvl" => "__builtin_ve_vl_pvfmkslogenan_mvl", + "llvm.ve.vl.pvfmkslogenan.mvml" => "__builtin_ve_vl_pvfmkslogenan_mvml", + "llvm.ve.vl.pvfmkslogt.mvl" => "__builtin_ve_vl_pvfmkslogt_mvl", + "llvm.ve.vl.pvfmkslogt.mvml" => "__builtin_ve_vl_pvfmkslogt_mvml", + "llvm.ve.vl.pvfmkslogtnan.mvl" => "__builtin_ve_vl_pvfmkslogtnan_mvl", + "llvm.ve.vl.pvfmkslogtnan.mvml" => "__builtin_ve_vl_pvfmkslogtnan_mvml", + "llvm.ve.vl.pvfmkslole.mvl" => "__builtin_ve_vl_pvfmkslole_mvl", + "llvm.ve.vl.pvfmkslole.mvml" => "__builtin_ve_vl_pvfmkslole_mvml", + "llvm.ve.vl.pvfmkslolenan.mvl" => "__builtin_ve_vl_pvfmkslolenan_mvl", + "llvm.ve.vl.pvfmkslolenan.mvml" => "__builtin_ve_vl_pvfmkslolenan_mvml", + "llvm.ve.vl.pvfmkslolt.mvl" => "__builtin_ve_vl_pvfmkslolt_mvl", + "llvm.ve.vl.pvfmkslolt.mvml" => "__builtin_ve_vl_pvfmkslolt_mvml", + "llvm.ve.vl.pvfmksloltnan.mvl" => "__builtin_ve_vl_pvfmksloltnan_mvl", + "llvm.ve.vl.pvfmksloltnan.mvml" => "__builtin_ve_vl_pvfmksloltnan_mvml", + "llvm.ve.vl.pvfmkslonan.mvl" => "__builtin_ve_vl_pvfmkslonan_mvl", + "llvm.ve.vl.pvfmkslonan.mvml" => "__builtin_ve_vl_pvfmkslonan_mvml", + "llvm.ve.vl.pvfmkslone.mvl" => "__builtin_ve_vl_pvfmkslone_mvl", + "llvm.ve.vl.pvfmkslone.mvml" => "__builtin_ve_vl_pvfmkslone_mvml", + "llvm.ve.vl.pvfmkslonenan.mvl" => "__builtin_ve_vl_pvfmkslonenan_mvl", + "llvm.ve.vl.pvfmkslonenan.mvml" => "__builtin_ve_vl_pvfmkslonenan_mvml", + "llvm.ve.vl.pvfmkslonum.mvl" => "__builtin_ve_vl_pvfmkslonum_mvl", + "llvm.ve.vl.pvfmkslonum.mvml" => "__builtin_ve_vl_pvfmkslonum_mvml", + "llvm.ve.vl.pvfmkslt.MvMl" => "__builtin_ve_vl_pvfmkslt_MvMl", + "llvm.ve.vl.pvfmkslt.Mvl" => "__builtin_ve_vl_pvfmkslt_Mvl", + "llvm.ve.vl.pvfmksltnan.MvMl" => "__builtin_ve_vl_pvfmksltnan_MvMl", + "llvm.ve.vl.pvfmksltnan.Mvl" => "__builtin_ve_vl_pvfmksltnan_Mvl", + "llvm.ve.vl.pvfmksnan.MvMl" => "__builtin_ve_vl_pvfmksnan_MvMl", + "llvm.ve.vl.pvfmksnan.Mvl" => "__builtin_ve_vl_pvfmksnan_Mvl", + "llvm.ve.vl.pvfmksne.MvMl" => "__builtin_ve_vl_pvfmksne_MvMl", + "llvm.ve.vl.pvfmksne.Mvl" => "__builtin_ve_vl_pvfmksne_Mvl", + "llvm.ve.vl.pvfmksnenan.MvMl" => "__builtin_ve_vl_pvfmksnenan_MvMl", + "llvm.ve.vl.pvfmksnenan.Mvl" => "__builtin_ve_vl_pvfmksnenan_Mvl", + "llvm.ve.vl.pvfmksnum.MvMl" => "__builtin_ve_vl_pvfmksnum_MvMl", + "llvm.ve.vl.pvfmksnum.Mvl" => "__builtin_ve_vl_pvfmksnum_Mvl", + "llvm.ve.vl.pvfmksupeq.mvl" => "__builtin_ve_vl_pvfmksupeq_mvl", + "llvm.ve.vl.pvfmksupeq.mvml" => "__builtin_ve_vl_pvfmksupeq_mvml", + "llvm.ve.vl.pvfmksupeqnan.mvl" => "__builtin_ve_vl_pvfmksupeqnan_mvl", + "llvm.ve.vl.pvfmksupeqnan.mvml" => "__builtin_ve_vl_pvfmksupeqnan_mvml", + "llvm.ve.vl.pvfmksupge.mvl" => "__builtin_ve_vl_pvfmksupge_mvl", + "llvm.ve.vl.pvfmksupge.mvml" => "__builtin_ve_vl_pvfmksupge_mvml", + "llvm.ve.vl.pvfmksupgenan.mvl" => "__builtin_ve_vl_pvfmksupgenan_mvl", + "llvm.ve.vl.pvfmksupgenan.mvml" => "__builtin_ve_vl_pvfmksupgenan_mvml", + "llvm.ve.vl.pvfmksupgt.mvl" => "__builtin_ve_vl_pvfmksupgt_mvl", + "llvm.ve.vl.pvfmksupgt.mvml" => "__builtin_ve_vl_pvfmksupgt_mvml", + "llvm.ve.vl.pvfmksupgtnan.mvl" => "__builtin_ve_vl_pvfmksupgtnan_mvl", + "llvm.ve.vl.pvfmksupgtnan.mvml" => "__builtin_ve_vl_pvfmksupgtnan_mvml", + "llvm.ve.vl.pvfmksuple.mvl" => "__builtin_ve_vl_pvfmksuple_mvl", + "llvm.ve.vl.pvfmksuple.mvml" => "__builtin_ve_vl_pvfmksuple_mvml", + "llvm.ve.vl.pvfmksuplenan.mvl" => "__builtin_ve_vl_pvfmksuplenan_mvl", + "llvm.ve.vl.pvfmksuplenan.mvml" => "__builtin_ve_vl_pvfmksuplenan_mvml", + "llvm.ve.vl.pvfmksuplt.mvl" => "__builtin_ve_vl_pvfmksuplt_mvl", + "llvm.ve.vl.pvfmksuplt.mvml" => "__builtin_ve_vl_pvfmksuplt_mvml", + "llvm.ve.vl.pvfmksupltnan.mvl" => "__builtin_ve_vl_pvfmksupltnan_mvl", + "llvm.ve.vl.pvfmksupltnan.mvml" => "__builtin_ve_vl_pvfmksupltnan_mvml", + "llvm.ve.vl.pvfmksupnan.mvl" => "__builtin_ve_vl_pvfmksupnan_mvl", + "llvm.ve.vl.pvfmksupnan.mvml" => "__builtin_ve_vl_pvfmksupnan_mvml", + "llvm.ve.vl.pvfmksupne.mvl" => "__builtin_ve_vl_pvfmksupne_mvl", + "llvm.ve.vl.pvfmksupne.mvml" => "__builtin_ve_vl_pvfmksupne_mvml", + "llvm.ve.vl.pvfmksupnenan.mvl" => "__builtin_ve_vl_pvfmksupnenan_mvl", + "llvm.ve.vl.pvfmksupnenan.mvml" => "__builtin_ve_vl_pvfmksupnenan_mvml", + "llvm.ve.vl.pvfmksupnum.mvl" => "__builtin_ve_vl_pvfmksupnum_mvl", + "llvm.ve.vl.pvfmksupnum.mvml" => "__builtin_ve_vl_pvfmksupnum_mvml", + "llvm.ve.vl.pvfmkweq.MvMl" => "__builtin_ve_vl_pvfmkweq_MvMl", + "llvm.ve.vl.pvfmkweq.Mvl" => "__builtin_ve_vl_pvfmkweq_Mvl", + "llvm.ve.vl.pvfmkweqnan.MvMl" => "__builtin_ve_vl_pvfmkweqnan_MvMl", + "llvm.ve.vl.pvfmkweqnan.Mvl" => "__builtin_ve_vl_pvfmkweqnan_Mvl", + "llvm.ve.vl.pvfmkwge.MvMl" => "__builtin_ve_vl_pvfmkwge_MvMl", + "llvm.ve.vl.pvfmkwge.Mvl" => "__builtin_ve_vl_pvfmkwge_Mvl", + "llvm.ve.vl.pvfmkwgenan.MvMl" => "__builtin_ve_vl_pvfmkwgenan_MvMl", + "llvm.ve.vl.pvfmkwgenan.Mvl" => "__builtin_ve_vl_pvfmkwgenan_Mvl", + "llvm.ve.vl.pvfmkwgt.MvMl" => "__builtin_ve_vl_pvfmkwgt_MvMl", + "llvm.ve.vl.pvfmkwgt.Mvl" => "__builtin_ve_vl_pvfmkwgt_Mvl", + "llvm.ve.vl.pvfmkwgtnan.MvMl" => "__builtin_ve_vl_pvfmkwgtnan_MvMl", + "llvm.ve.vl.pvfmkwgtnan.Mvl" => "__builtin_ve_vl_pvfmkwgtnan_Mvl", + "llvm.ve.vl.pvfmkwle.MvMl" => "__builtin_ve_vl_pvfmkwle_MvMl", + "llvm.ve.vl.pvfmkwle.Mvl" => "__builtin_ve_vl_pvfmkwle_Mvl", + "llvm.ve.vl.pvfmkwlenan.MvMl" => "__builtin_ve_vl_pvfmkwlenan_MvMl", + "llvm.ve.vl.pvfmkwlenan.Mvl" => "__builtin_ve_vl_pvfmkwlenan_Mvl", + "llvm.ve.vl.pvfmkwloeq.mvl" => "__builtin_ve_vl_pvfmkwloeq_mvl", + "llvm.ve.vl.pvfmkwloeq.mvml" => "__builtin_ve_vl_pvfmkwloeq_mvml", + "llvm.ve.vl.pvfmkwloeqnan.mvl" => "__builtin_ve_vl_pvfmkwloeqnan_mvl", + "llvm.ve.vl.pvfmkwloeqnan.mvml" => "__builtin_ve_vl_pvfmkwloeqnan_mvml", + "llvm.ve.vl.pvfmkwloge.mvl" => "__builtin_ve_vl_pvfmkwloge_mvl", + "llvm.ve.vl.pvfmkwloge.mvml" => "__builtin_ve_vl_pvfmkwloge_mvml", + "llvm.ve.vl.pvfmkwlogenan.mvl" => "__builtin_ve_vl_pvfmkwlogenan_mvl", + "llvm.ve.vl.pvfmkwlogenan.mvml" => "__builtin_ve_vl_pvfmkwlogenan_mvml", + "llvm.ve.vl.pvfmkwlogt.mvl" => "__builtin_ve_vl_pvfmkwlogt_mvl", + "llvm.ve.vl.pvfmkwlogt.mvml" => "__builtin_ve_vl_pvfmkwlogt_mvml", + "llvm.ve.vl.pvfmkwlogtnan.mvl" => "__builtin_ve_vl_pvfmkwlogtnan_mvl", + "llvm.ve.vl.pvfmkwlogtnan.mvml" => "__builtin_ve_vl_pvfmkwlogtnan_mvml", + "llvm.ve.vl.pvfmkwlole.mvl" => "__builtin_ve_vl_pvfmkwlole_mvl", + "llvm.ve.vl.pvfmkwlole.mvml" => "__builtin_ve_vl_pvfmkwlole_mvml", + "llvm.ve.vl.pvfmkwlolenan.mvl" => "__builtin_ve_vl_pvfmkwlolenan_mvl", + "llvm.ve.vl.pvfmkwlolenan.mvml" => "__builtin_ve_vl_pvfmkwlolenan_mvml", + "llvm.ve.vl.pvfmkwlolt.mvl" => "__builtin_ve_vl_pvfmkwlolt_mvl", + "llvm.ve.vl.pvfmkwlolt.mvml" => "__builtin_ve_vl_pvfmkwlolt_mvml", + "llvm.ve.vl.pvfmkwloltnan.mvl" => "__builtin_ve_vl_pvfmkwloltnan_mvl", + "llvm.ve.vl.pvfmkwloltnan.mvml" => "__builtin_ve_vl_pvfmkwloltnan_mvml", + "llvm.ve.vl.pvfmkwlonan.mvl" => "__builtin_ve_vl_pvfmkwlonan_mvl", + "llvm.ve.vl.pvfmkwlonan.mvml" => "__builtin_ve_vl_pvfmkwlonan_mvml", + "llvm.ve.vl.pvfmkwlone.mvl" => "__builtin_ve_vl_pvfmkwlone_mvl", + "llvm.ve.vl.pvfmkwlone.mvml" => "__builtin_ve_vl_pvfmkwlone_mvml", + "llvm.ve.vl.pvfmkwlonenan.mvl" => "__builtin_ve_vl_pvfmkwlonenan_mvl", + "llvm.ve.vl.pvfmkwlonenan.mvml" => "__builtin_ve_vl_pvfmkwlonenan_mvml", + "llvm.ve.vl.pvfmkwlonum.mvl" => "__builtin_ve_vl_pvfmkwlonum_mvl", + "llvm.ve.vl.pvfmkwlonum.mvml" => "__builtin_ve_vl_pvfmkwlonum_mvml", + "llvm.ve.vl.pvfmkwlt.MvMl" => "__builtin_ve_vl_pvfmkwlt_MvMl", + "llvm.ve.vl.pvfmkwlt.Mvl" => "__builtin_ve_vl_pvfmkwlt_Mvl", + "llvm.ve.vl.pvfmkwltnan.MvMl" => "__builtin_ve_vl_pvfmkwltnan_MvMl", + "llvm.ve.vl.pvfmkwltnan.Mvl" => "__builtin_ve_vl_pvfmkwltnan_Mvl", + "llvm.ve.vl.pvfmkwnan.MvMl" => "__builtin_ve_vl_pvfmkwnan_MvMl", + "llvm.ve.vl.pvfmkwnan.Mvl" => "__builtin_ve_vl_pvfmkwnan_Mvl", + "llvm.ve.vl.pvfmkwne.MvMl" => "__builtin_ve_vl_pvfmkwne_MvMl", + "llvm.ve.vl.pvfmkwne.Mvl" => "__builtin_ve_vl_pvfmkwne_Mvl", + "llvm.ve.vl.pvfmkwnenan.MvMl" => "__builtin_ve_vl_pvfmkwnenan_MvMl", + "llvm.ve.vl.pvfmkwnenan.Mvl" => "__builtin_ve_vl_pvfmkwnenan_Mvl", + "llvm.ve.vl.pvfmkwnum.MvMl" => "__builtin_ve_vl_pvfmkwnum_MvMl", + "llvm.ve.vl.pvfmkwnum.Mvl" => "__builtin_ve_vl_pvfmkwnum_Mvl", + "llvm.ve.vl.pvfmkwupeq.mvl" => "__builtin_ve_vl_pvfmkwupeq_mvl", + "llvm.ve.vl.pvfmkwupeq.mvml" => "__builtin_ve_vl_pvfmkwupeq_mvml", + "llvm.ve.vl.pvfmkwupeqnan.mvl" => "__builtin_ve_vl_pvfmkwupeqnan_mvl", + "llvm.ve.vl.pvfmkwupeqnan.mvml" => "__builtin_ve_vl_pvfmkwupeqnan_mvml", + "llvm.ve.vl.pvfmkwupge.mvl" => "__builtin_ve_vl_pvfmkwupge_mvl", + "llvm.ve.vl.pvfmkwupge.mvml" => "__builtin_ve_vl_pvfmkwupge_mvml", + "llvm.ve.vl.pvfmkwupgenan.mvl" => "__builtin_ve_vl_pvfmkwupgenan_mvl", + "llvm.ve.vl.pvfmkwupgenan.mvml" => "__builtin_ve_vl_pvfmkwupgenan_mvml", + "llvm.ve.vl.pvfmkwupgt.mvl" => "__builtin_ve_vl_pvfmkwupgt_mvl", + "llvm.ve.vl.pvfmkwupgt.mvml" => "__builtin_ve_vl_pvfmkwupgt_mvml", + "llvm.ve.vl.pvfmkwupgtnan.mvl" => "__builtin_ve_vl_pvfmkwupgtnan_mvl", + "llvm.ve.vl.pvfmkwupgtnan.mvml" => "__builtin_ve_vl_pvfmkwupgtnan_mvml", + "llvm.ve.vl.pvfmkwuple.mvl" => "__builtin_ve_vl_pvfmkwuple_mvl", + "llvm.ve.vl.pvfmkwuple.mvml" => "__builtin_ve_vl_pvfmkwuple_mvml", + "llvm.ve.vl.pvfmkwuplenan.mvl" => "__builtin_ve_vl_pvfmkwuplenan_mvl", + "llvm.ve.vl.pvfmkwuplenan.mvml" => "__builtin_ve_vl_pvfmkwuplenan_mvml", + "llvm.ve.vl.pvfmkwuplt.mvl" => "__builtin_ve_vl_pvfmkwuplt_mvl", + "llvm.ve.vl.pvfmkwuplt.mvml" => "__builtin_ve_vl_pvfmkwuplt_mvml", + "llvm.ve.vl.pvfmkwupltnan.mvl" => "__builtin_ve_vl_pvfmkwupltnan_mvl", + "llvm.ve.vl.pvfmkwupltnan.mvml" => "__builtin_ve_vl_pvfmkwupltnan_mvml", + "llvm.ve.vl.pvfmkwupnan.mvl" => "__builtin_ve_vl_pvfmkwupnan_mvl", + "llvm.ve.vl.pvfmkwupnan.mvml" => "__builtin_ve_vl_pvfmkwupnan_mvml", + "llvm.ve.vl.pvfmkwupne.mvl" => "__builtin_ve_vl_pvfmkwupne_mvl", + "llvm.ve.vl.pvfmkwupne.mvml" => "__builtin_ve_vl_pvfmkwupne_mvml", + "llvm.ve.vl.pvfmkwupnenan.mvl" => "__builtin_ve_vl_pvfmkwupnenan_mvl", + "llvm.ve.vl.pvfmkwupnenan.mvml" => "__builtin_ve_vl_pvfmkwupnenan_mvml", + "llvm.ve.vl.pvfmkwupnum.mvl" => "__builtin_ve_vl_pvfmkwupnum_mvl", + "llvm.ve.vl.pvfmkwupnum.mvml" => "__builtin_ve_vl_pvfmkwupnum_mvml", + "llvm.ve.vl.pvfmsb.vsvvMvl" => "__builtin_ve_vl_pvfmsb_vsvvMvl", + "llvm.ve.vl.pvfmsb.vsvvl" => "__builtin_ve_vl_pvfmsb_vsvvl", + "llvm.ve.vl.pvfmsb.vsvvvl" => "__builtin_ve_vl_pvfmsb_vsvvvl", + "llvm.ve.vl.pvfmsb.vvsvMvl" => "__builtin_ve_vl_pvfmsb_vvsvMvl", + "llvm.ve.vl.pvfmsb.vvsvl" => "__builtin_ve_vl_pvfmsb_vvsvl", + "llvm.ve.vl.pvfmsb.vvsvvl" => "__builtin_ve_vl_pvfmsb_vvsvvl", + "llvm.ve.vl.pvfmsb.vvvvMvl" => "__builtin_ve_vl_pvfmsb_vvvvMvl", + "llvm.ve.vl.pvfmsb.vvvvl" => "__builtin_ve_vl_pvfmsb_vvvvl", + "llvm.ve.vl.pvfmsb.vvvvvl" => "__builtin_ve_vl_pvfmsb_vvvvvl", + "llvm.ve.vl.pvfmul.vsvMvl" => "__builtin_ve_vl_pvfmul_vsvMvl", + "llvm.ve.vl.pvfmul.vsvl" => "__builtin_ve_vl_pvfmul_vsvl", + "llvm.ve.vl.pvfmul.vsvvl" => "__builtin_ve_vl_pvfmul_vsvvl", + "llvm.ve.vl.pvfmul.vvvMvl" => "__builtin_ve_vl_pvfmul_vvvMvl", + "llvm.ve.vl.pvfmul.vvvl" => "__builtin_ve_vl_pvfmul_vvvl", + "llvm.ve.vl.pvfmul.vvvvl" => "__builtin_ve_vl_pvfmul_vvvvl", + "llvm.ve.vl.pvfnmad.vsvvMvl" => "__builtin_ve_vl_pvfnmad_vsvvMvl", + "llvm.ve.vl.pvfnmad.vsvvl" => "__builtin_ve_vl_pvfnmad_vsvvl", + "llvm.ve.vl.pvfnmad.vsvvvl" => "__builtin_ve_vl_pvfnmad_vsvvvl", + "llvm.ve.vl.pvfnmad.vvsvMvl" => "__builtin_ve_vl_pvfnmad_vvsvMvl", + "llvm.ve.vl.pvfnmad.vvsvl" => "__builtin_ve_vl_pvfnmad_vvsvl", + "llvm.ve.vl.pvfnmad.vvsvvl" => "__builtin_ve_vl_pvfnmad_vvsvvl", + "llvm.ve.vl.pvfnmad.vvvvMvl" => "__builtin_ve_vl_pvfnmad_vvvvMvl", + "llvm.ve.vl.pvfnmad.vvvvl" => "__builtin_ve_vl_pvfnmad_vvvvl", + "llvm.ve.vl.pvfnmad.vvvvvl" => "__builtin_ve_vl_pvfnmad_vvvvvl", + "llvm.ve.vl.pvfnmsb.vsvvMvl" => "__builtin_ve_vl_pvfnmsb_vsvvMvl", + "llvm.ve.vl.pvfnmsb.vsvvl" => "__builtin_ve_vl_pvfnmsb_vsvvl", + "llvm.ve.vl.pvfnmsb.vsvvvl" => "__builtin_ve_vl_pvfnmsb_vsvvvl", + "llvm.ve.vl.pvfnmsb.vvsvMvl" => "__builtin_ve_vl_pvfnmsb_vvsvMvl", + "llvm.ve.vl.pvfnmsb.vvsvl" => "__builtin_ve_vl_pvfnmsb_vvsvl", + "llvm.ve.vl.pvfnmsb.vvsvvl" => "__builtin_ve_vl_pvfnmsb_vvsvvl", + "llvm.ve.vl.pvfnmsb.vvvvMvl" => "__builtin_ve_vl_pvfnmsb_vvvvMvl", + "llvm.ve.vl.pvfnmsb.vvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvl", + "llvm.ve.vl.pvfnmsb.vvvvvl" => "__builtin_ve_vl_pvfnmsb_vvvvvl", + "llvm.ve.vl.pvfsub.vsvMvl" => "__builtin_ve_vl_pvfsub_vsvMvl", + "llvm.ve.vl.pvfsub.vsvl" => "__builtin_ve_vl_pvfsub_vsvl", + "llvm.ve.vl.pvfsub.vsvvl" => "__builtin_ve_vl_pvfsub_vsvvl", + "llvm.ve.vl.pvfsub.vvvMvl" => "__builtin_ve_vl_pvfsub_vvvMvl", + "llvm.ve.vl.pvfsub.vvvl" => "__builtin_ve_vl_pvfsub_vvvl", + "llvm.ve.vl.pvfsub.vvvvl" => "__builtin_ve_vl_pvfsub_vvvvl", + "llvm.ve.vl.pvldz.vvMvl" => "__builtin_ve_vl_pvldz_vvMvl", + "llvm.ve.vl.pvldz.vvl" => "__builtin_ve_vl_pvldz_vvl", + "llvm.ve.vl.pvldz.vvvl" => "__builtin_ve_vl_pvldz_vvvl", + "llvm.ve.vl.pvldzlo.vvl" => "__builtin_ve_vl_pvldzlo_vvl", + "llvm.ve.vl.pvldzlo.vvmvl" => "__builtin_ve_vl_pvldzlo_vvmvl", + "llvm.ve.vl.pvldzlo.vvvl" => "__builtin_ve_vl_pvldzlo_vvvl", + "llvm.ve.vl.pvldzup.vvl" => "__builtin_ve_vl_pvldzup_vvl", + "llvm.ve.vl.pvldzup.vvmvl" => "__builtin_ve_vl_pvldzup_vvmvl", + "llvm.ve.vl.pvldzup.vvvl" => "__builtin_ve_vl_pvldzup_vvvl", + "llvm.ve.vl.pvmaxs.vsvMvl" => "__builtin_ve_vl_pvmaxs_vsvMvl", + "llvm.ve.vl.pvmaxs.vsvl" => "__builtin_ve_vl_pvmaxs_vsvl", + "llvm.ve.vl.pvmaxs.vsvvl" => "__builtin_ve_vl_pvmaxs_vsvvl", + "llvm.ve.vl.pvmaxs.vvvMvl" => "__builtin_ve_vl_pvmaxs_vvvMvl", + "llvm.ve.vl.pvmaxs.vvvl" => "__builtin_ve_vl_pvmaxs_vvvl", + "llvm.ve.vl.pvmaxs.vvvvl" => "__builtin_ve_vl_pvmaxs_vvvvl", + "llvm.ve.vl.pvmins.vsvMvl" => "__builtin_ve_vl_pvmins_vsvMvl", + "llvm.ve.vl.pvmins.vsvl" => "__builtin_ve_vl_pvmins_vsvl", + "llvm.ve.vl.pvmins.vsvvl" => "__builtin_ve_vl_pvmins_vsvvl", + "llvm.ve.vl.pvmins.vvvMvl" => "__builtin_ve_vl_pvmins_vvvMvl", + "llvm.ve.vl.pvmins.vvvl" => "__builtin_ve_vl_pvmins_vvvl", + "llvm.ve.vl.pvmins.vvvvl" => "__builtin_ve_vl_pvmins_vvvvl", + "llvm.ve.vl.pvor.vsvMvl" => "__builtin_ve_vl_pvor_vsvMvl", + "llvm.ve.vl.pvor.vsvl" => "__builtin_ve_vl_pvor_vsvl", + "llvm.ve.vl.pvor.vsvvl" => "__builtin_ve_vl_pvor_vsvvl", + "llvm.ve.vl.pvor.vvvMvl" => "__builtin_ve_vl_pvor_vvvMvl", + "llvm.ve.vl.pvor.vvvl" => "__builtin_ve_vl_pvor_vvvl", + "llvm.ve.vl.pvor.vvvvl" => "__builtin_ve_vl_pvor_vvvvl", + "llvm.ve.vl.pvpcnt.vvMvl" => "__builtin_ve_vl_pvpcnt_vvMvl", + "llvm.ve.vl.pvpcnt.vvl" => "__builtin_ve_vl_pvpcnt_vvl", + "llvm.ve.vl.pvpcnt.vvvl" => "__builtin_ve_vl_pvpcnt_vvvl", + "llvm.ve.vl.pvpcntlo.vvl" => "__builtin_ve_vl_pvpcntlo_vvl", + "llvm.ve.vl.pvpcntlo.vvmvl" => "__builtin_ve_vl_pvpcntlo_vvmvl", + "llvm.ve.vl.pvpcntlo.vvvl" => "__builtin_ve_vl_pvpcntlo_vvvl", + "llvm.ve.vl.pvpcntup.vvl" => "__builtin_ve_vl_pvpcntup_vvl", + "llvm.ve.vl.pvpcntup.vvmvl" => "__builtin_ve_vl_pvpcntup_vvmvl", + "llvm.ve.vl.pvpcntup.vvvl" => "__builtin_ve_vl_pvpcntup_vvvl", + "llvm.ve.vl.pvrcp.vvl" => "__builtin_ve_vl_pvrcp_vvl", + "llvm.ve.vl.pvrcp.vvvl" => "__builtin_ve_vl_pvrcp_vvvl", + "llvm.ve.vl.pvrsqrt.vvl" => "__builtin_ve_vl_pvrsqrt_vvl", + "llvm.ve.vl.pvrsqrt.vvvl" => "__builtin_ve_vl_pvrsqrt_vvvl", + "llvm.ve.vl.pvrsqrtnex.vvl" => "__builtin_ve_vl_pvrsqrtnex_vvl", + "llvm.ve.vl.pvrsqrtnex.vvvl" => "__builtin_ve_vl_pvrsqrtnex_vvvl", + "llvm.ve.vl.pvseq.vl" => "__builtin_ve_vl_pvseq_vl", + "llvm.ve.vl.pvseq.vvl" => "__builtin_ve_vl_pvseq_vvl", + "llvm.ve.vl.pvseqlo.vl" => "__builtin_ve_vl_pvseqlo_vl", + "llvm.ve.vl.pvseqlo.vvl" => "__builtin_ve_vl_pvseqlo_vvl", + "llvm.ve.vl.pvsequp.vl" => "__builtin_ve_vl_pvsequp_vl", + "llvm.ve.vl.pvsequp.vvl" => "__builtin_ve_vl_pvsequp_vvl", + "llvm.ve.vl.pvsla.vvsMvl" => "__builtin_ve_vl_pvsla_vvsMvl", + "llvm.ve.vl.pvsla.vvsl" => "__builtin_ve_vl_pvsla_vvsl", + "llvm.ve.vl.pvsla.vvsvl" => "__builtin_ve_vl_pvsla_vvsvl", + "llvm.ve.vl.pvsla.vvvMvl" => "__builtin_ve_vl_pvsla_vvvMvl", + "llvm.ve.vl.pvsla.vvvl" => "__builtin_ve_vl_pvsla_vvvl", + "llvm.ve.vl.pvsla.vvvvl" => "__builtin_ve_vl_pvsla_vvvvl", + "llvm.ve.vl.pvsll.vvsMvl" => "__builtin_ve_vl_pvsll_vvsMvl", + "llvm.ve.vl.pvsll.vvsl" => "__builtin_ve_vl_pvsll_vvsl", + "llvm.ve.vl.pvsll.vvsvl" => "__builtin_ve_vl_pvsll_vvsvl", + "llvm.ve.vl.pvsll.vvvMvl" => "__builtin_ve_vl_pvsll_vvvMvl", + "llvm.ve.vl.pvsll.vvvl" => "__builtin_ve_vl_pvsll_vvvl", + "llvm.ve.vl.pvsll.vvvvl" => "__builtin_ve_vl_pvsll_vvvvl", + "llvm.ve.vl.pvsra.vvsMvl" => "__builtin_ve_vl_pvsra_vvsMvl", + "llvm.ve.vl.pvsra.vvsl" => "__builtin_ve_vl_pvsra_vvsl", + "llvm.ve.vl.pvsra.vvsvl" => "__builtin_ve_vl_pvsra_vvsvl", + "llvm.ve.vl.pvsra.vvvMvl" => "__builtin_ve_vl_pvsra_vvvMvl", + "llvm.ve.vl.pvsra.vvvl" => "__builtin_ve_vl_pvsra_vvvl", + "llvm.ve.vl.pvsra.vvvvl" => "__builtin_ve_vl_pvsra_vvvvl", + "llvm.ve.vl.pvsrl.vvsMvl" => "__builtin_ve_vl_pvsrl_vvsMvl", + "llvm.ve.vl.pvsrl.vvsl" => "__builtin_ve_vl_pvsrl_vvsl", + "llvm.ve.vl.pvsrl.vvsvl" => "__builtin_ve_vl_pvsrl_vvsvl", + "llvm.ve.vl.pvsrl.vvvMvl" => "__builtin_ve_vl_pvsrl_vvvMvl", + "llvm.ve.vl.pvsrl.vvvl" => "__builtin_ve_vl_pvsrl_vvvl", + "llvm.ve.vl.pvsrl.vvvvl" => "__builtin_ve_vl_pvsrl_vvvvl", + "llvm.ve.vl.pvsubs.vsvMvl" => "__builtin_ve_vl_pvsubs_vsvMvl", + "llvm.ve.vl.pvsubs.vsvl" => "__builtin_ve_vl_pvsubs_vsvl", + "llvm.ve.vl.pvsubs.vsvvl" => "__builtin_ve_vl_pvsubs_vsvvl", + "llvm.ve.vl.pvsubs.vvvMvl" => "__builtin_ve_vl_pvsubs_vvvMvl", + "llvm.ve.vl.pvsubs.vvvl" => "__builtin_ve_vl_pvsubs_vvvl", + "llvm.ve.vl.pvsubs.vvvvl" => "__builtin_ve_vl_pvsubs_vvvvl", + "llvm.ve.vl.pvsubu.vsvMvl" => "__builtin_ve_vl_pvsubu_vsvMvl", + "llvm.ve.vl.pvsubu.vsvl" => "__builtin_ve_vl_pvsubu_vsvl", + "llvm.ve.vl.pvsubu.vsvvl" => "__builtin_ve_vl_pvsubu_vsvvl", + "llvm.ve.vl.pvsubu.vvvMvl" => "__builtin_ve_vl_pvsubu_vvvMvl", + "llvm.ve.vl.pvsubu.vvvl" => "__builtin_ve_vl_pvsubu_vvvl", + "llvm.ve.vl.pvsubu.vvvvl" => "__builtin_ve_vl_pvsubu_vvvvl", + "llvm.ve.vl.pvxor.vsvMvl" => "__builtin_ve_vl_pvxor_vsvMvl", + "llvm.ve.vl.pvxor.vsvl" => "__builtin_ve_vl_pvxor_vsvl", + "llvm.ve.vl.pvxor.vsvvl" => "__builtin_ve_vl_pvxor_vsvvl", + "llvm.ve.vl.pvxor.vvvMvl" => "__builtin_ve_vl_pvxor_vvvMvl", + "llvm.ve.vl.pvxor.vvvl" => "__builtin_ve_vl_pvxor_vvvl", + "llvm.ve.vl.pvxor.vvvvl" => "__builtin_ve_vl_pvxor_vvvvl", + "llvm.ve.vl.scr.sss" => "__builtin_ve_vl_scr_sss", + "llvm.ve.vl.svm.sMs" => "__builtin_ve_vl_svm_sMs", + "llvm.ve.vl.svm.sms" => "__builtin_ve_vl_svm_sms", + "llvm.ve.vl.svob" => "__builtin_ve_vl_svob", + "llvm.ve.vl.tovm.sml" => "__builtin_ve_vl_tovm_sml", + "llvm.ve.vl.tscr.ssss" => "__builtin_ve_vl_tscr_ssss", + "llvm.ve.vl.vaddsl.vsvl" => "__builtin_ve_vl_vaddsl_vsvl", + "llvm.ve.vl.vaddsl.vsvmvl" => "__builtin_ve_vl_vaddsl_vsvmvl", + "llvm.ve.vl.vaddsl.vsvvl" => "__builtin_ve_vl_vaddsl_vsvvl", + "llvm.ve.vl.vaddsl.vvvl" => "__builtin_ve_vl_vaddsl_vvvl", + "llvm.ve.vl.vaddsl.vvvmvl" => "__builtin_ve_vl_vaddsl_vvvmvl", + "llvm.ve.vl.vaddsl.vvvvl" => "__builtin_ve_vl_vaddsl_vvvvl", + "llvm.ve.vl.vaddswsx.vsvl" => "__builtin_ve_vl_vaddswsx_vsvl", + "llvm.ve.vl.vaddswsx.vsvmvl" => "__builtin_ve_vl_vaddswsx_vsvmvl", + "llvm.ve.vl.vaddswsx.vsvvl" => "__builtin_ve_vl_vaddswsx_vsvvl", + "llvm.ve.vl.vaddswsx.vvvl" => "__builtin_ve_vl_vaddswsx_vvvl", + "llvm.ve.vl.vaddswsx.vvvmvl" => "__builtin_ve_vl_vaddswsx_vvvmvl", + "llvm.ve.vl.vaddswsx.vvvvl" => "__builtin_ve_vl_vaddswsx_vvvvl", + "llvm.ve.vl.vaddswzx.vsvl" => "__builtin_ve_vl_vaddswzx_vsvl", + "llvm.ve.vl.vaddswzx.vsvmvl" => "__builtin_ve_vl_vaddswzx_vsvmvl", + "llvm.ve.vl.vaddswzx.vsvvl" => "__builtin_ve_vl_vaddswzx_vsvvl", + "llvm.ve.vl.vaddswzx.vvvl" => "__builtin_ve_vl_vaddswzx_vvvl", + "llvm.ve.vl.vaddswzx.vvvmvl" => "__builtin_ve_vl_vaddswzx_vvvmvl", + "llvm.ve.vl.vaddswzx.vvvvl" => "__builtin_ve_vl_vaddswzx_vvvvl", + "llvm.ve.vl.vaddul.vsvl" => "__builtin_ve_vl_vaddul_vsvl", + "llvm.ve.vl.vaddul.vsvmvl" => "__builtin_ve_vl_vaddul_vsvmvl", + "llvm.ve.vl.vaddul.vsvvl" => "__builtin_ve_vl_vaddul_vsvvl", + "llvm.ve.vl.vaddul.vvvl" => "__builtin_ve_vl_vaddul_vvvl", + "llvm.ve.vl.vaddul.vvvmvl" => "__builtin_ve_vl_vaddul_vvvmvl", + "llvm.ve.vl.vaddul.vvvvl" => "__builtin_ve_vl_vaddul_vvvvl", + "llvm.ve.vl.vadduw.vsvl" => "__builtin_ve_vl_vadduw_vsvl", + "llvm.ve.vl.vadduw.vsvmvl" => "__builtin_ve_vl_vadduw_vsvmvl", + "llvm.ve.vl.vadduw.vsvvl" => "__builtin_ve_vl_vadduw_vsvvl", + "llvm.ve.vl.vadduw.vvvl" => "__builtin_ve_vl_vadduw_vvvl", + "llvm.ve.vl.vadduw.vvvmvl" => "__builtin_ve_vl_vadduw_vvvmvl", + "llvm.ve.vl.vadduw.vvvvl" => "__builtin_ve_vl_vadduw_vvvvl", + "llvm.ve.vl.vand.vsvl" => "__builtin_ve_vl_vand_vsvl", + "llvm.ve.vl.vand.vsvmvl" => "__builtin_ve_vl_vand_vsvmvl", + "llvm.ve.vl.vand.vsvvl" => "__builtin_ve_vl_vand_vsvvl", + "llvm.ve.vl.vand.vvvl" => "__builtin_ve_vl_vand_vvvl", + "llvm.ve.vl.vand.vvvmvl" => "__builtin_ve_vl_vand_vvvmvl", + "llvm.ve.vl.vand.vvvvl" => "__builtin_ve_vl_vand_vvvvl", + "llvm.ve.vl.vbrdd.vsl" => "__builtin_ve_vl_vbrdd_vsl", + "llvm.ve.vl.vbrdd.vsmvl" => "__builtin_ve_vl_vbrdd_vsmvl", + "llvm.ve.vl.vbrdd.vsvl" => "__builtin_ve_vl_vbrdd_vsvl", + "llvm.ve.vl.vbrdl.vsl" => "__builtin_ve_vl_vbrdl_vsl", + "llvm.ve.vl.vbrdl.vsmvl" => "__builtin_ve_vl_vbrdl_vsmvl", + "llvm.ve.vl.vbrdl.vsvl" => "__builtin_ve_vl_vbrdl_vsvl", + "llvm.ve.vl.vbrds.vsl" => "__builtin_ve_vl_vbrds_vsl", + "llvm.ve.vl.vbrds.vsmvl" => "__builtin_ve_vl_vbrds_vsmvl", + "llvm.ve.vl.vbrds.vsvl" => "__builtin_ve_vl_vbrds_vsvl", + "llvm.ve.vl.vbrdw.vsl" => "__builtin_ve_vl_vbrdw_vsl", + "llvm.ve.vl.vbrdw.vsmvl" => "__builtin_ve_vl_vbrdw_vsmvl", + "llvm.ve.vl.vbrdw.vsvl" => "__builtin_ve_vl_vbrdw_vsvl", + "llvm.ve.vl.vbrv.vvl" => "__builtin_ve_vl_vbrv_vvl", + "llvm.ve.vl.vbrv.vvmvl" => "__builtin_ve_vl_vbrv_vvmvl", + "llvm.ve.vl.vbrv.vvvl" => "__builtin_ve_vl_vbrv_vvvl", + "llvm.ve.vl.vcmpsl.vsvl" => "__builtin_ve_vl_vcmpsl_vsvl", + "llvm.ve.vl.vcmpsl.vsvmvl" => "__builtin_ve_vl_vcmpsl_vsvmvl", + "llvm.ve.vl.vcmpsl.vsvvl" => "__builtin_ve_vl_vcmpsl_vsvvl", + "llvm.ve.vl.vcmpsl.vvvl" => "__builtin_ve_vl_vcmpsl_vvvl", + "llvm.ve.vl.vcmpsl.vvvmvl" => "__builtin_ve_vl_vcmpsl_vvvmvl", + "llvm.ve.vl.vcmpsl.vvvvl" => "__builtin_ve_vl_vcmpsl_vvvvl", + "llvm.ve.vl.vcmpswsx.vsvl" => "__builtin_ve_vl_vcmpswsx_vsvl", + "llvm.ve.vl.vcmpswsx.vsvmvl" => "__builtin_ve_vl_vcmpswsx_vsvmvl", + "llvm.ve.vl.vcmpswsx.vsvvl" => "__builtin_ve_vl_vcmpswsx_vsvvl", + "llvm.ve.vl.vcmpswsx.vvvl" => "__builtin_ve_vl_vcmpswsx_vvvl", + "llvm.ve.vl.vcmpswsx.vvvmvl" => "__builtin_ve_vl_vcmpswsx_vvvmvl", + "llvm.ve.vl.vcmpswsx.vvvvl" => "__builtin_ve_vl_vcmpswsx_vvvvl", + "llvm.ve.vl.vcmpswzx.vsvl" => "__builtin_ve_vl_vcmpswzx_vsvl", + "llvm.ve.vl.vcmpswzx.vsvmvl" => "__builtin_ve_vl_vcmpswzx_vsvmvl", + "llvm.ve.vl.vcmpswzx.vsvvl" => "__builtin_ve_vl_vcmpswzx_vsvvl", + "llvm.ve.vl.vcmpswzx.vvvl" => "__builtin_ve_vl_vcmpswzx_vvvl", + "llvm.ve.vl.vcmpswzx.vvvmvl" => "__builtin_ve_vl_vcmpswzx_vvvmvl", + "llvm.ve.vl.vcmpswzx.vvvvl" => "__builtin_ve_vl_vcmpswzx_vvvvl", + "llvm.ve.vl.vcmpul.vsvl" => "__builtin_ve_vl_vcmpul_vsvl", + "llvm.ve.vl.vcmpul.vsvmvl" => "__builtin_ve_vl_vcmpul_vsvmvl", + "llvm.ve.vl.vcmpul.vsvvl" => "__builtin_ve_vl_vcmpul_vsvvl", + "llvm.ve.vl.vcmpul.vvvl" => "__builtin_ve_vl_vcmpul_vvvl", + "llvm.ve.vl.vcmpul.vvvmvl" => "__builtin_ve_vl_vcmpul_vvvmvl", + "llvm.ve.vl.vcmpul.vvvvl" => "__builtin_ve_vl_vcmpul_vvvvl", + "llvm.ve.vl.vcmpuw.vsvl" => "__builtin_ve_vl_vcmpuw_vsvl", + "llvm.ve.vl.vcmpuw.vsvmvl" => "__builtin_ve_vl_vcmpuw_vsvmvl", + "llvm.ve.vl.vcmpuw.vsvvl" => "__builtin_ve_vl_vcmpuw_vsvvl", + "llvm.ve.vl.vcmpuw.vvvl" => "__builtin_ve_vl_vcmpuw_vvvl", + "llvm.ve.vl.vcmpuw.vvvmvl" => "__builtin_ve_vl_vcmpuw_vvvmvl", + "llvm.ve.vl.vcmpuw.vvvvl" => "__builtin_ve_vl_vcmpuw_vvvvl", + "llvm.ve.vl.vcp.vvmvl" => "__builtin_ve_vl_vcp_vvmvl", + "llvm.ve.vl.vcvtdl.vvl" => "__builtin_ve_vl_vcvtdl_vvl", + "llvm.ve.vl.vcvtdl.vvvl" => "__builtin_ve_vl_vcvtdl_vvvl", + "llvm.ve.vl.vcvtds.vvl" => "__builtin_ve_vl_vcvtds_vvl", + "llvm.ve.vl.vcvtds.vvvl" => "__builtin_ve_vl_vcvtds_vvvl", + "llvm.ve.vl.vcvtdw.vvl" => "__builtin_ve_vl_vcvtdw_vvl", + "llvm.ve.vl.vcvtdw.vvvl" => "__builtin_ve_vl_vcvtdw_vvvl", + "llvm.ve.vl.vcvtld.vvl" => "__builtin_ve_vl_vcvtld_vvl", + "llvm.ve.vl.vcvtld.vvmvl" => "__builtin_ve_vl_vcvtld_vvmvl", + "llvm.ve.vl.vcvtld.vvvl" => "__builtin_ve_vl_vcvtld_vvvl", + "llvm.ve.vl.vcvtldrz.vvl" => "__builtin_ve_vl_vcvtldrz_vvl", + "llvm.ve.vl.vcvtldrz.vvmvl" => "__builtin_ve_vl_vcvtldrz_vvmvl", + "llvm.ve.vl.vcvtldrz.vvvl" => "__builtin_ve_vl_vcvtldrz_vvvl", + "llvm.ve.vl.vcvtsd.vvl" => "__builtin_ve_vl_vcvtsd_vvl", + "llvm.ve.vl.vcvtsd.vvvl" => "__builtin_ve_vl_vcvtsd_vvvl", + "llvm.ve.vl.vcvtsw.vvl" => "__builtin_ve_vl_vcvtsw_vvl", + "llvm.ve.vl.vcvtsw.vvvl" => "__builtin_ve_vl_vcvtsw_vvvl", + "llvm.ve.vl.vcvtwdsx.vvl" => "__builtin_ve_vl_vcvtwdsx_vvl", + "llvm.ve.vl.vcvtwdsx.vvmvl" => "__builtin_ve_vl_vcvtwdsx_vvmvl", + "llvm.ve.vl.vcvtwdsx.vvvl" => "__builtin_ve_vl_vcvtwdsx_vvvl", + "llvm.ve.vl.vcvtwdsxrz.vvl" => "__builtin_ve_vl_vcvtwdsxrz_vvl", + "llvm.ve.vl.vcvtwdsxrz.vvmvl" => "__builtin_ve_vl_vcvtwdsxrz_vvmvl", + "llvm.ve.vl.vcvtwdsxrz.vvvl" => "__builtin_ve_vl_vcvtwdsxrz_vvvl", + "llvm.ve.vl.vcvtwdzx.vvl" => "__builtin_ve_vl_vcvtwdzx_vvl", + "llvm.ve.vl.vcvtwdzx.vvmvl" => "__builtin_ve_vl_vcvtwdzx_vvmvl", + "llvm.ve.vl.vcvtwdzx.vvvl" => "__builtin_ve_vl_vcvtwdzx_vvvl", + "llvm.ve.vl.vcvtwdzxrz.vvl" => "__builtin_ve_vl_vcvtwdzxrz_vvl", + "llvm.ve.vl.vcvtwdzxrz.vvmvl" => "__builtin_ve_vl_vcvtwdzxrz_vvmvl", + "llvm.ve.vl.vcvtwdzxrz.vvvl" => "__builtin_ve_vl_vcvtwdzxrz_vvvl", + "llvm.ve.vl.vcvtwssx.vvl" => "__builtin_ve_vl_vcvtwssx_vvl", + "llvm.ve.vl.vcvtwssx.vvmvl" => "__builtin_ve_vl_vcvtwssx_vvmvl", + "llvm.ve.vl.vcvtwssx.vvvl" => "__builtin_ve_vl_vcvtwssx_vvvl", + "llvm.ve.vl.vcvtwssxrz.vvl" => "__builtin_ve_vl_vcvtwssxrz_vvl", + "llvm.ve.vl.vcvtwssxrz.vvmvl" => "__builtin_ve_vl_vcvtwssxrz_vvmvl", + "llvm.ve.vl.vcvtwssxrz.vvvl" => "__builtin_ve_vl_vcvtwssxrz_vvvl", + "llvm.ve.vl.vcvtwszx.vvl" => "__builtin_ve_vl_vcvtwszx_vvl", + "llvm.ve.vl.vcvtwszx.vvmvl" => "__builtin_ve_vl_vcvtwszx_vvmvl", + "llvm.ve.vl.vcvtwszx.vvvl" => "__builtin_ve_vl_vcvtwszx_vvvl", + "llvm.ve.vl.vcvtwszxrz.vvl" => "__builtin_ve_vl_vcvtwszxrz_vvl", + "llvm.ve.vl.vcvtwszxrz.vvmvl" => "__builtin_ve_vl_vcvtwszxrz_vvmvl", + "llvm.ve.vl.vcvtwszxrz.vvvl" => "__builtin_ve_vl_vcvtwszxrz_vvvl", + "llvm.ve.vl.vdivsl.vsvl" => "__builtin_ve_vl_vdivsl_vsvl", + "llvm.ve.vl.vdivsl.vsvmvl" => "__builtin_ve_vl_vdivsl_vsvmvl", + "llvm.ve.vl.vdivsl.vsvvl" => "__builtin_ve_vl_vdivsl_vsvvl", + "llvm.ve.vl.vdivsl.vvsl" => "__builtin_ve_vl_vdivsl_vvsl", + "llvm.ve.vl.vdivsl.vvsmvl" => "__builtin_ve_vl_vdivsl_vvsmvl", + "llvm.ve.vl.vdivsl.vvsvl" => "__builtin_ve_vl_vdivsl_vvsvl", + "llvm.ve.vl.vdivsl.vvvl" => "__builtin_ve_vl_vdivsl_vvvl", + "llvm.ve.vl.vdivsl.vvvmvl" => "__builtin_ve_vl_vdivsl_vvvmvl", + "llvm.ve.vl.vdivsl.vvvvl" => "__builtin_ve_vl_vdivsl_vvvvl", + "llvm.ve.vl.vdivswsx.vsvl" => "__builtin_ve_vl_vdivswsx_vsvl", + "llvm.ve.vl.vdivswsx.vsvmvl" => "__builtin_ve_vl_vdivswsx_vsvmvl", + "llvm.ve.vl.vdivswsx.vsvvl" => "__builtin_ve_vl_vdivswsx_vsvvl", + "llvm.ve.vl.vdivswsx.vvsl" => "__builtin_ve_vl_vdivswsx_vvsl", + "llvm.ve.vl.vdivswsx.vvsmvl" => "__builtin_ve_vl_vdivswsx_vvsmvl", + "llvm.ve.vl.vdivswsx.vvsvl" => "__builtin_ve_vl_vdivswsx_vvsvl", + "llvm.ve.vl.vdivswsx.vvvl" => "__builtin_ve_vl_vdivswsx_vvvl", + "llvm.ve.vl.vdivswsx.vvvmvl" => "__builtin_ve_vl_vdivswsx_vvvmvl", + "llvm.ve.vl.vdivswsx.vvvvl" => "__builtin_ve_vl_vdivswsx_vvvvl", + "llvm.ve.vl.vdivswzx.vsvl" => "__builtin_ve_vl_vdivswzx_vsvl", + "llvm.ve.vl.vdivswzx.vsvmvl" => "__builtin_ve_vl_vdivswzx_vsvmvl", + "llvm.ve.vl.vdivswzx.vsvvl" => "__builtin_ve_vl_vdivswzx_vsvvl", + "llvm.ve.vl.vdivswzx.vvsl" => "__builtin_ve_vl_vdivswzx_vvsl", + "llvm.ve.vl.vdivswzx.vvsmvl" => "__builtin_ve_vl_vdivswzx_vvsmvl", + "llvm.ve.vl.vdivswzx.vvsvl" => "__builtin_ve_vl_vdivswzx_vvsvl", + "llvm.ve.vl.vdivswzx.vvvl" => "__builtin_ve_vl_vdivswzx_vvvl", + "llvm.ve.vl.vdivswzx.vvvmvl" => "__builtin_ve_vl_vdivswzx_vvvmvl", + "llvm.ve.vl.vdivswzx.vvvvl" => "__builtin_ve_vl_vdivswzx_vvvvl", + "llvm.ve.vl.vdivul.vsvl" => "__builtin_ve_vl_vdivul_vsvl", + "llvm.ve.vl.vdivul.vsvmvl" => "__builtin_ve_vl_vdivul_vsvmvl", + "llvm.ve.vl.vdivul.vsvvl" => "__builtin_ve_vl_vdivul_vsvvl", + "llvm.ve.vl.vdivul.vvsl" => "__builtin_ve_vl_vdivul_vvsl", + "llvm.ve.vl.vdivul.vvsmvl" => "__builtin_ve_vl_vdivul_vvsmvl", + "llvm.ve.vl.vdivul.vvsvl" => "__builtin_ve_vl_vdivul_vvsvl", + "llvm.ve.vl.vdivul.vvvl" => "__builtin_ve_vl_vdivul_vvvl", + "llvm.ve.vl.vdivul.vvvmvl" => "__builtin_ve_vl_vdivul_vvvmvl", + "llvm.ve.vl.vdivul.vvvvl" => "__builtin_ve_vl_vdivul_vvvvl", + "llvm.ve.vl.vdivuw.vsvl" => "__builtin_ve_vl_vdivuw_vsvl", + "llvm.ve.vl.vdivuw.vsvmvl" => "__builtin_ve_vl_vdivuw_vsvmvl", + "llvm.ve.vl.vdivuw.vsvvl" => "__builtin_ve_vl_vdivuw_vsvvl", + "llvm.ve.vl.vdivuw.vvsl" => "__builtin_ve_vl_vdivuw_vvsl", + "llvm.ve.vl.vdivuw.vvsmvl" => "__builtin_ve_vl_vdivuw_vvsmvl", + "llvm.ve.vl.vdivuw.vvsvl" => "__builtin_ve_vl_vdivuw_vvsvl", + "llvm.ve.vl.vdivuw.vvvl" => "__builtin_ve_vl_vdivuw_vvvl", + "llvm.ve.vl.vdivuw.vvvmvl" => "__builtin_ve_vl_vdivuw_vvvmvl", + "llvm.ve.vl.vdivuw.vvvvl" => "__builtin_ve_vl_vdivuw_vvvvl", + "llvm.ve.vl.veqv.vsvl" => "__builtin_ve_vl_veqv_vsvl", + "llvm.ve.vl.veqv.vsvmvl" => "__builtin_ve_vl_veqv_vsvmvl", + "llvm.ve.vl.veqv.vsvvl" => "__builtin_ve_vl_veqv_vsvvl", + "llvm.ve.vl.veqv.vvvl" => "__builtin_ve_vl_veqv_vvvl", + "llvm.ve.vl.veqv.vvvmvl" => "__builtin_ve_vl_veqv_vvvmvl", + "llvm.ve.vl.veqv.vvvvl" => "__builtin_ve_vl_veqv_vvvvl", + "llvm.ve.vl.vex.vvmvl" => "__builtin_ve_vl_vex_vvmvl", + "llvm.ve.vl.vfaddd.vsvl" => "__builtin_ve_vl_vfaddd_vsvl", + "llvm.ve.vl.vfaddd.vsvmvl" => "__builtin_ve_vl_vfaddd_vsvmvl", + "llvm.ve.vl.vfaddd.vsvvl" => "__builtin_ve_vl_vfaddd_vsvvl", + "llvm.ve.vl.vfaddd.vvvl" => "__builtin_ve_vl_vfaddd_vvvl", + "llvm.ve.vl.vfaddd.vvvmvl" => "__builtin_ve_vl_vfaddd_vvvmvl", + "llvm.ve.vl.vfaddd.vvvvl" => "__builtin_ve_vl_vfaddd_vvvvl", + "llvm.ve.vl.vfadds.vsvl" => "__builtin_ve_vl_vfadds_vsvl", + "llvm.ve.vl.vfadds.vsvmvl" => "__builtin_ve_vl_vfadds_vsvmvl", + "llvm.ve.vl.vfadds.vsvvl" => "__builtin_ve_vl_vfadds_vsvvl", + "llvm.ve.vl.vfadds.vvvl" => "__builtin_ve_vl_vfadds_vvvl", + "llvm.ve.vl.vfadds.vvvmvl" => "__builtin_ve_vl_vfadds_vvvmvl", + "llvm.ve.vl.vfadds.vvvvl" => "__builtin_ve_vl_vfadds_vvvvl", + "llvm.ve.vl.vfcmpd.vsvl" => "__builtin_ve_vl_vfcmpd_vsvl", + "llvm.ve.vl.vfcmpd.vsvmvl" => "__builtin_ve_vl_vfcmpd_vsvmvl", + "llvm.ve.vl.vfcmpd.vsvvl" => "__builtin_ve_vl_vfcmpd_vsvvl", + "llvm.ve.vl.vfcmpd.vvvl" => "__builtin_ve_vl_vfcmpd_vvvl", + "llvm.ve.vl.vfcmpd.vvvmvl" => "__builtin_ve_vl_vfcmpd_vvvmvl", + "llvm.ve.vl.vfcmpd.vvvvl" => "__builtin_ve_vl_vfcmpd_vvvvl", + "llvm.ve.vl.vfcmps.vsvl" => "__builtin_ve_vl_vfcmps_vsvl", + "llvm.ve.vl.vfcmps.vsvmvl" => "__builtin_ve_vl_vfcmps_vsvmvl", + "llvm.ve.vl.vfcmps.vsvvl" => "__builtin_ve_vl_vfcmps_vsvvl", + "llvm.ve.vl.vfcmps.vvvl" => "__builtin_ve_vl_vfcmps_vvvl", + "llvm.ve.vl.vfcmps.vvvmvl" => "__builtin_ve_vl_vfcmps_vvvmvl", + "llvm.ve.vl.vfcmps.vvvvl" => "__builtin_ve_vl_vfcmps_vvvvl", + "llvm.ve.vl.vfdivd.vsvl" => "__builtin_ve_vl_vfdivd_vsvl", + "llvm.ve.vl.vfdivd.vsvmvl" => "__builtin_ve_vl_vfdivd_vsvmvl", + "llvm.ve.vl.vfdivd.vsvvl" => "__builtin_ve_vl_vfdivd_vsvvl", + "llvm.ve.vl.vfdivd.vvvl" => "__builtin_ve_vl_vfdivd_vvvl", + "llvm.ve.vl.vfdivd.vvvmvl" => "__builtin_ve_vl_vfdivd_vvvmvl", + "llvm.ve.vl.vfdivd.vvvvl" => "__builtin_ve_vl_vfdivd_vvvvl", + "llvm.ve.vl.vfdivs.vsvl" => "__builtin_ve_vl_vfdivs_vsvl", + "llvm.ve.vl.vfdivs.vsvmvl" => "__builtin_ve_vl_vfdivs_vsvmvl", + "llvm.ve.vl.vfdivs.vsvvl" => "__builtin_ve_vl_vfdivs_vsvvl", + "llvm.ve.vl.vfdivs.vvvl" => "__builtin_ve_vl_vfdivs_vvvl", + "llvm.ve.vl.vfdivs.vvvmvl" => "__builtin_ve_vl_vfdivs_vvvmvl", + "llvm.ve.vl.vfdivs.vvvvl" => "__builtin_ve_vl_vfdivs_vvvvl", + "llvm.ve.vl.vfmadd.vsvvl" => "__builtin_ve_vl_vfmadd_vsvvl", + "llvm.ve.vl.vfmadd.vsvvmvl" => "__builtin_ve_vl_vfmadd_vsvvmvl", + "llvm.ve.vl.vfmadd.vsvvvl" => "__builtin_ve_vl_vfmadd_vsvvvl", + "llvm.ve.vl.vfmadd.vvsvl" => "__builtin_ve_vl_vfmadd_vvsvl", + "llvm.ve.vl.vfmadd.vvsvmvl" => "__builtin_ve_vl_vfmadd_vvsvmvl", + "llvm.ve.vl.vfmadd.vvsvvl" => "__builtin_ve_vl_vfmadd_vvsvvl", + "llvm.ve.vl.vfmadd.vvvvl" => "__builtin_ve_vl_vfmadd_vvvvl", + "llvm.ve.vl.vfmadd.vvvvmvl" => "__builtin_ve_vl_vfmadd_vvvvmvl", + "llvm.ve.vl.vfmadd.vvvvvl" => "__builtin_ve_vl_vfmadd_vvvvvl", + "llvm.ve.vl.vfmads.vsvvl" => "__builtin_ve_vl_vfmads_vsvvl", + "llvm.ve.vl.vfmads.vsvvmvl" => "__builtin_ve_vl_vfmads_vsvvmvl", + "llvm.ve.vl.vfmads.vsvvvl" => "__builtin_ve_vl_vfmads_vsvvvl", + "llvm.ve.vl.vfmads.vvsvl" => "__builtin_ve_vl_vfmads_vvsvl", + "llvm.ve.vl.vfmads.vvsvmvl" => "__builtin_ve_vl_vfmads_vvsvmvl", + "llvm.ve.vl.vfmads.vvsvvl" => "__builtin_ve_vl_vfmads_vvsvvl", + "llvm.ve.vl.vfmads.vvvvl" => "__builtin_ve_vl_vfmads_vvvvl", + "llvm.ve.vl.vfmads.vvvvmvl" => "__builtin_ve_vl_vfmads_vvvvmvl", + "llvm.ve.vl.vfmads.vvvvvl" => "__builtin_ve_vl_vfmads_vvvvvl", + "llvm.ve.vl.vfmaxd.vsvl" => "__builtin_ve_vl_vfmaxd_vsvl", + "llvm.ve.vl.vfmaxd.vsvmvl" => "__builtin_ve_vl_vfmaxd_vsvmvl", + "llvm.ve.vl.vfmaxd.vsvvl" => "__builtin_ve_vl_vfmaxd_vsvvl", + "llvm.ve.vl.vfmaxd.vvvl" => "__builtin_ve_vl_vfmaxd_vvvl", + "llvm.ve.vl.vfmaxd.vvvmvl" => "__builtin_ve_vl_vfmaxd_vvvmvl", + "llvm.ve.vl.vfmaxd.vvvvl" => "__builtin_ve_vl_vfmaxd_vvvvl", + "llvm.ve.vl.vfmaxs.vsvl" => "__builtin_ve_vl_vfmaxs_vsvl", + "llvm.ve.vl.vfmaxs.vsvmvl" => "__builtin_ve_vl_vfmaxs_vsvmvl", + "llvm.ve.vl.vfmaxs.vsvvl" => "__builtin_ve_vl_vfmaxs_vsvvl", + "llvm.ve.vl.vfmaxs.vvvl" => "__builtin_ve_vl_vfmaxs_vvvl", + "llvm.ve.vl.vfmaxs.vvvmvl" => "__builtin_ve_vl_vfmaxs_vvvmvl", + "llvm.ve.vl.vfmaxs.vvvvl" => "__builtin_ve_vl_vfmaxs_vvvvl", + "llvm.ve.vl.vfmind.vsvl" => "__builtin_ve_vl_vfmind_vsvl", + "llvm.ve.vl.vfmind.vsvmvl" => "__builtin_ve_vl_vfmind_vsvmvl", + "llvm.ve.vl.vfmind.vsvvl" => "__builtin_ve_vl_vfmind_vsvvl", + "llvm.ve.vl.vfmind.vvvl" => "__builtin_ve_vl_vfmind_vvvl", + "llvm.ve.vl.vfmind.vvvmvl" => "__builtin_ve_vl_vfmind_vvvmvl", + "llvm.ve.vl.vfmind.vvvvl" => "__builtin_ve_vl_vfmind_vvvvl", + "llvm.ve.vl.vfmins.vsvl" => "__builtin_ve_vl_vfmins_vsvl", + "llvm.ve.vl.vfmins.vsvmvl" => "__builtin_ve_vl_vfmins_vsvmvl", + "llvm.ve.vl.vfmins.vsvvl" => "__builtin_ve_vl_vfmins_vsvvl", + "llvm.ve.vl.vfmins.vvvl" => "__builtin_ve_vl_vfmins_vvvl", + "llvm.ve.vl.vfmins.vvvmvl" => "__builtin_ve_vl_vfmins_vvvmvl", + "llvm.ve.vl.vfmins.vvvvl" => "__builtin_ve_vl_vfmins_vvvvl", + "llvm.ve.vl.vfmkdeq.mvl" => "__builtin_ve_vl_vfmkdeq_mvl", + "llvm.ve.vl.vfmkdeq.mvml" => "__builtin_ve_vl_vfmkdeq_mvml", + "llvm.ve.vl.vfmkdeqnan.mvl" => "__builtin_ve_vl_vfmkdeqnan_mvl", + "llvm.ve.vl.vfmkdeqnan.mvml" => "__builtin_ve_vl_vfmkdeqnan_mvml", + "llvm.ve.vl.vfmkdge.mvl" => "__builtin_ve_vl_vfmkdge_mvl", + "llvm.ve.vl.vfmkdge.mvml" => "__builtin_ve_vl_vfmkdge_mvml", + "llvm.ve.vl.vfmkdgenan.mvl" => "__builtin_ve_vl_vfmkdgenan_mvl", + "llvm.ve.vl.vfmkdgenan.mvml" => "__builtin_ve_vl_vfmkdgenan_mvml", + "llvm.ve.vl.vfmkdgt.mvl" => "__builtin_ve_vl_vfmkdgt_mvl", + "llvm.ve.vl.vfmkdgt.mvml" => "__builtin_ve_vl_vfmkdgt_mvml", + "llvm.ve.vl.vfmkdgtnan.mvl" => "__builtin_ve_vl_vfmkdgtnan_mvl", + "llvm.ve.vl.vfmkdgtnan.mvml" => "__builtin_ve_vl_vfmkdgtnan_mvml", + "llvm.ve.vl.vfmkdle.mvl" => "__builtin_ve_vl_vfmkdle_mvl", + "llvm.ve.vl.vfmkdle.mvml" => "__builtin_ve_vl_vfmkdle_mvml", + "llvm.ve.vl.vfmkdlenan.mvl" => "__builtin_ve_vl_vfmkdlenan_mvl", + "llvm.ve.vl.vfmkdlenan.mvml" => "__builtin_ve_vl_vfmkdlenan_mvml", + "llvm.ve.vl.vfmkdlt.mvl" => "__builtin_ve_vl_vfmkdlt_mvl", + "llvm.ve.vl.vfmkdlt.mvml" => "__builtin_ve_vl_vfmkdlt_mvml", + "llvm.ve.vl.vfmkdltnan.mvl" => "__builtin_ve_vl_vfmkdltnan_mvl", + "llvm.ve.vl.vfmkdltnan.mvml" => "__builtin_ve_vl_vfmkdltnan_mvml", + "llvm.ve.vl.vfmkdnan.mvl" => "__builtin_ve_vl_vfmkdnan_mvl", + "llvm.ve.vl.vfmkdnan.mvml" => "__builtin_ve_vl_vfmkdnan_mvml", + "llvm.ve.vl.vfmkdne.mvl" => "__builtin_ve_vl_vfmkdne_mvl", + "llvm.ve.vl.vfmkdne.mvml" => "__builtin_ve_vl_vfmkdne_mvml", + "llvm.ve.vl.vfmkdnenan.mvl" => "__builtin_ve_vl_vfmkdnenan_mvl", + "llvm.ve.vl.vfmkdnenan.mvml" => "__builtin_ve_vl_vfmkdnenan_mvml", + "llvm.ve.vl.vfmkdnum.mvl" => "__builtin_ve_vl_vfmkdnum_mvl", + "llvm.ve.vl.vfmkdnum.mvml" => "__builtin_ve_vl_vfmkdnum_mvml", + "llvm.ve.vl.vfmklaf.ml" => "__builtin_ve_vl_vfmklaf_ml", + "llvm.ve.vl.vfmklat.ml" => "__builtin_ve_vl_vfmklat_ml", + "llvm.ve.vl.vfmkleq.mvl" => "__builtin_ve_vl_vfmkleq_mvl", + "llvm.ve.vl.vfmkleq.mvml" => "__builtin_ve_vl_vfmkleq_mvml", + "llvm.ve.vl.vfmkleqnan.mvl" => "__builtin_ve_vl_vfmkleqnan_mvl", + "llvm.ve.vl.vfmkleqnan.mvml" => "__builtin_ve_vl_vfmkleqnan_mvml", + "llvm.ve.vl.vfmklge.mvl" => "__builtin_ve_vl_vfmklge_mvl", + "llvm.ve.vl.vfmklge.mvml" => "__builtin_ve_vl_vfmklge_mvml", + "llvm.ve.vl.vfmklgenan.mvl" => "__builtin_ve_vl_vfmklgenan_mvl", + "llvm.ve.vl.vfmklgenan.mvml" => "__builtin_ve_vl_vfmklgenan_mvml", + "llvm.ve.vl.vfmklgt.mvl" => "__builtin_ve_vl_vfmklgt_mvl", + "llvm.ve.vl.vfmklgt.mvml" => "__builtin_ve_vl_vfmklgt_mvml", + "llvm.ve.vl.vfmklgtnan.mvl" => "__builtin_ve_vl_vfmklgtnan_mvl", + "llvm.ve.vl.vfmklgtnan.mvml" => "__builtin_ve_vl_vfmklgtnan_mvml", + "llvm.ve.vl.vfmklle.mvl" => "__builtin_ve_vl_vfmklle_mvl", + "llvm.ve.vl.vfmklle.mvml" => "__builtin_ve_vl_vfmklle_mvml", + "llvm.ve.vl.vfmkllenan.mvl" => "__builtin_ve_vl_vfmkllenan_mvl", + "llvm.ve.vl.vfmkllenan.mvml" => "__builtin_ve_vl_vfmkllenan_mvml", + "llvm.ve.vl.vfmkllt.mvl" => "__builtin_ve_vl_vfmkllt_mvl", + "llvm.ve.vl.vfmkllt.mvml" => "__builtin_ve_vl_vfmkllt_mvml", + "llvm.ve.vl.vfmklltnan.mvl" => "__builtin_ve_vl_vfmklltnan_mvl", + "llvm.ve.vl.vfmklltnan.mvml" => "__builtin_ve_vl_vfmklltnan_mvml", + "llvm.ve.vl.vfmklnan.mvl" => "__builtin_ve_vl_vfmklnan_mvl", + "llvm.ve.vl.vfmklnan.mvml" => "__builtin_ve_vl_vfmklnan_mvml", + "llvm.ve.vl.vfmklne.mvl" => "__builtin_ve_vl_vfmklne_mvl", + "llvm.ve.vl.vfmklne.mvml" => "__builtin_ve_vl_vfmklne_mvml", + "llvm.ve.vl.vfmklnenan.mvl" => "__builtin_ve_vl_vfmklnenan_mvl", + "llvm.ve.vl.vfmklnenan.mvml" => "__builtin_ve_vl_vfmklnenan_mvml", + "llvm.ve.vl.vfmklnum.mvl" => "__builtin_ve_vl_vfmklnum_mvl", + "llvm.ve.vl.vfmklnum.mvml" => "__builtin_ve_vl_vfmklnum_mvml", + "llvm.ve.vl.vfmkseq.mvl" => "__builtin_ve_vl_vfmkseq_mvl", + "llvm.ve.vl.vfmkseq.mvml" => "__builtin_ve_vl_vfmkseq_mvml", + "llvm.ve.vl.vfmkseqnan.mvl" => "__builtin_ve_vl_vfmkseqnan_mvl", + "llvm.ve.vl.vfmkseqnan.mvml" => "__builtin_ve_vl_vfmkseqnan_mvml", + "llvm.ve.vl.vfmksge.mvl" => "__builtin_ve_vl_vfmksge_mvl", + "llvm.ve.vl.vfmksge.mvml" => "__builtin_ve_vl_vfmksge_mvml", + "llvm.ve.vl.vfmksgenan.mvl" => "__builtin_ve_vl_vfmksgenan_mvl", + "llvm.ve.vl.vfmksgenan.mvml" => "__builtin_ve_vl_vfmksgenan_mvml", + "llvm.ve.vl.vfmksgt.mvl" => "__builtin_ve_vl_vfmksgt_mvl", + "llvm.ve.vl.vfmksgt.mvml" => "__builtin_ve_vl_vfmksgt_mvml", + "llvm.ve.vl.vfmksgtnan.mvl" => "__builtin_ve_vl_vfmksgtnan_mvl", + "llvm.ve.vl.vfmksgtnan.mvml" => "__builtin_ve_vl_vfmksgtnan_mvml", + "llvm.ve.vl.vfmksle.mvl" => "__builtin_ve_vl_vfmksle_mvl", + "llvm.ve.vl.vfmksle.mvml" => "__builtin_ve_vl_vfmksle_mvml", + "llvm.ve.vl.vfmkslenan.mvl" => "__builtin_ve_vl_vfmkslenan_mvl", + "llvm.ve.vl.vfmkslenan.mvml" => "__builtin_ve_vl_vfmkslenan_mvml", + "llvm.ve.vl.vfmkslt.mvl" => "__builtin_ve_vl_vfmkslt_mvl", + "llvm.ve.vl.vfmkslt.mvml" => "__builtin_ve_vl_vfmkslt_mvml", + "llvm.ve.vl.vfmksltnan.mvl" => "__builtin_ve_vl_vfmksltnan_mvl", + "llvm.ve.vl.vfmksltnan.mvml" => "__builtin_ve_vl_vfmksltnan_mvml", + "llvm.ve.vl.vfmksnan.mvl" => "__builtin_ve_vl_vfmksnan_mvl", + "llvm.ve.vl.vfmksnan.mvml" => "__builtin_ve_vl_vfmksnan_mvml", + "llvm.ve.vl.vfmksne.mvl" => "__builtin_ve_vl_vfmksne_mvl", + "llvm.ve.vl.vfmksne.mvml" => "__builtin_ve_vl_vfmksne_mvml", + "llvm.ve.vl.vfmksnenan.mvl" => "__builtin_ve_vl_vfmksnenan_mvl", + "llvm.ve.vl.vfmksnenan.mvml" => "__builtin_ve_vl_vfmksnenan_mvml", + "llvm.ve.vl.vfmksnum.mvl" => "__builtin_ve_vl_vfmksnum_mvl", + "llvm.ve.vl.vfmksnum.mvml" => "__builtin_ve_vl_vfmksnum_mvml", + "llvm.ve.vl.vfmkweq.mvl" => "__builtin_ve_vl_vfmkweq_mvl", + "llvm.ve.vl.vfmkweq.mvml" => "__builtin_ve_vl_vfmkweq_mvml", + "llvm.ve.vl.vfmkweqnan.mvl" => "__builtin_ve_vl_vfmkweqnan_mvl", + "llvm.ve.vl.vfmkweqnan.mvml" => "__builtin_ve_vl_vfmkweqnan_mvml", + "llvm.ve.vl.vfmkwge.mvl" => "__builtin_ve_vl_vfmkwge_mvl", + "llvm.ve.vl.vfmkwge.mvml" => "__builtin_ve_vl_vfmkwge_mvml", + "llvm.ve.vl.vfmkwgenan.mvl" => "__builtin_ve_vl_vfmkwgenan_mvl", + "llvm.ve.vl.vfmkwgenan.mvml" => "__builtin_ve_vl_vfmkwgenan_mvml", + "llvm.ve.vl.vfmkwgt.mvl" => "__builtin_ve_vl_vfmkwgt_mvl", + "llvm.ve.vl.vfmkwgt.mvml" => "__builtin_ve_vl_vfmkwgt_mvml", + "llvm.ve.vl.vfmkwgtnan.mvl" => "__builtin_ve_vl_vfmkwgtnan_mvl", + "llvm.ve.vl.vfmkwgtnan.mvml" => "__builtin_ve_vl_vfmkwgtnan_mvml", + "llvm.ve.vl.vfmkwle.mvl" => "__builtin_ve_vl_vfmkwle_mvl", + "llvm.ve.vl.vfmkwle.mvml" => "__builtin_ve_vl_vfmkwle_mvml", + "llvm.ve.vl.vfmkwlenan.mvl" => "__builtin_ve_vl_vfmkwlenan_mvl", + "llvm.ve.vl.vfmkwlenan.mvml" => "__builtin_ve_vl_vfmkwlenan_mvml", + "llvm.ve.vl.vfmkwlt.mvl" => "__builtin_ve_vl_vfmkwlt_mvl", + "llvm.ve.vl.vfmkwlt.mvml" => "__builtin_ve_vl_vfmkwlt_mvml", + "llvm.ve.vl.vfmkwltnan.mvl" => "__builtin_ve_vl_vfmkwltnan_mvl", + "llvm.ve.vl.vfmkwltnan.mvml" => "__builtin_ve_vl_vfmkwltnan_mvml", + "llvm.ve.vl.vfmkwnan.mvl" => "__builtin_ve_vl_vfmkwnan_mvl", + "llvm.ve.vl.vfmkwnan.mvml" => "__builtin_ve_vl_vfmkwnan_mvml", + "llvm.ve.vl.vfmkwne.mvl" => "__builtin_ve_vl_vfmkwne_mvl", + "llvm.ve.vl.vfmkwne.mvml" => "__builtin_ve_vl_vfmkwne_mvml", + "llvm.ve.vl.vfmkwnenan.mvl" => "__builtin_ve_vl_vfmkwnenan_mvl", + "llvm.ve.vl.vfmkwnenan.mvml" => "__builtin_ve_vl_vfmkwnenan_mvml", + "llvm.ve.vl.vfmkwnum.mvl" => "__builtin_ve_vl_vfmkwnum_mvl", + "llvm.ve.vl.vfmkwnum.mvml" => "__builtin_ve_vl_vfmkwnum_mvml", + "llvm.ve.vl.vfmsbd.vsvvl" => "__builtin_ve_vl_vfmsbd_vsvvl", + "llvm.ve.vl.vfmsbd.vsvvmvl" => "__builtin_ve_vl_vfmsbd_vsvvmvl", + "llvm.ve.vl.vfmsbd.vsvvvl" => "__builtin_ve_vl_vfmsbd_vsvvvl", + "llvm.ve.vl.vfmsbd.vvsvl" => "__builtin_ve_vl_vfmsbd_vvsvl", + "llvm.ve.vl.vfmsbd.vvsvmvl" => "__builtin_ve_vl_vfmsbd_vvsvmvl", + "llvm.ve.vl.vfmsbd.vvsvvl" => "__builtin_ve_vl_vfmsbd_vvsvvl", + "llvm.ve.vl.vfmsbd.vvvvl" => "__builtin_ve_vl_vfmsbd_vvvvl", + "llvm.ve.vl.vfmsbd.vvvvmvl" => "__builtin_ve_vl_vfmsbd_vvvvmvl", + "llvm.ve.vl.vfmsbd.vvvvvl" => "__builtin_ve_vl_vfmsbd_vvvvvl", + "llvm.ve.vl.vfmsbs.vsvvl" => "__builtin_ve_vl_vfmsbs_vsvvl", + "llvm.ve.vl.vfmsbs.vsvvmvl" => "__builtin_ve_vl_vfmsbs_vsvvmvl", + "llvm.ve.vl.vfmsbs.vsvvvl" => "__builtin_ve_vl_vfmsbs_vsvvvl", + "llvm.ve.vl.vfmsbs.vvsvl" => "__builtin_ve_vl_vfmsbs_vvsvl", + "llvm.ve.vl.vfmsbs.vvsvmvl" => "__builtin_ve_vl_vfmsbs_vvsvmvl", + "llvm.ve.vl.vfmsbs.vvsvvl" => "__builtin_ve_vl_vfmsbs_vvsvvl", + "llvm.ve.vl.vfmsbs.vvvvl" => "__builtin_ve_vl_vfmsbs_vvvvl", + "llvm.ve.vl.vfmsbs.vvvvmvl" => "__builtin_ve_vl_vfmsbs_vvvvmvl", + "llvm.ve.vl.vfmsbs.vvvvvl" => "__builtin_ve_vl_vfmsbs_vvvvvl", + "llvm.ve.vl.vfmuld.vsvl" => "__builtin_ve_vl_vfmuld_vsvl", + "llvm.ve.vl.vfmuld.vsvmvl" => "__builtin_ve_vl_vfmuld_vsvmvl", + "llvm.ve.vl.vfmuld.vsvvl" => "__builtin_ve_vl_vfmuld_vsvvl", + "llvm.ve.vl.vfmuld.vvvl" => "__builtin_ve_vl_vfmuld_vvvl", + "llvm.ve.vl.vfmuld.vvvmvl" => "__builtin_ve_vl_vfmuld_vvvmvl", + "llvm.ve.vl.vfmuld.vvvvl" => "__builtin_ve_vl_vfmuld_vvvvl", + "llvm.ve.vl.vfmuls.vsvl" => "__builtin_ve_vl_vfmuls_vsvl", + "llvm.ve.vl.vfmuls.vsvmvl" => "__builtin_ve_vl_vfmuls_vsvmvl", + "llvm.ve.vl.vfmuls.vsvvl" => "__builtin_ve_vl_vfmuls_vsvvl", + "llvm.ve.vl.vfmuls.vvvl" => "__builtin_ve_vl_vfmuls_vvvl", + "llvm.ve.vl.vfmuls.vvvmvl" => "__builtin_ve_vl_vfmuls_vvvmvl", + "llvm.ve.vl.vfmuls.vvvvl" => "__builtin_ve_vl_vfmuls_vvvvl", + "llvm.ve.vl.vfnmadd.vsvvl" => "__builtin_ve_vl_vfnmadd_vsvvl", + "llvm.ve.vl.vfnmadd.vsvvmvl" => "__builtin_ve_vl_vfnmadd_vsvvmvl", + "llvm.ve.vl.vfnmadd.vsvvvl" => "__builtin_ve_vl_vfnmadd_vsvvvl", + "llvm.ve.vl.vfnmadd.vvsvl" => "__builtin_ve_vl_vfnmadd_vvsvl", + "llvm.ve.vl.vfnmadd.vvsvmvl" => "__builtin_ve_vl_vfnmadd_vvsvmvl", + "llvm.ve.vl.vfnmadd.vvsvvl" => "__builtin_ve_vl_vfnmadd_vvsvvl", + "llvm.ve.vl.vfnmadd.vvvvl" => "__builtin_ve_vl_vfnmadd_vvvvl", + "llvm.ve.vl.vfnmadd.vvvvmvl" => "__builtin_ve_vl_vfnmadd_vvvvmvl", + "llvm.ve.vl.vfnmadd.vvvvvl" => "__builtin_ve_vl_vfnmadd_vvvvvl", + "llvm.ve.vl.vfnmads.vsvvl" => "__builtin_ve_vl_vfnmads_vsvvl", + "llvm.ve.vl.vfnmads.vsvvmvl" => "__builtin_ve_vl_vfnmads_vsvvmvl", + "llvm.ve.vl.vfnmads.vsvvvl" => "__builtin_ve_vl_vfnmads_vsvvvl", + "llvm.ve.vl.vfnmads.vvsvl" => "__builtin_ve_vl_vfnmads_vvsvl", + "llvm.ve.vl.vfnmads.vvsvmvl" => "__builtin_ve_vl_vfnmads_vvsvmvl", + "llvm.ve.vl.vfnmads.vvsvvl" => "__builtin_ve_vl_vfnmads_vvsvvl", + "llvm.ve.vl.vfnmads.vvvvl" => "__builtin_ve_vl_vfnmads_vvvvl", + "llvm.ve.vl.vfnmads.vvvvmvl" => "__builtin_ve_vl_vfnmads_vvvvmvl", + "llvm.ve.vl.vfnmads.vvvvvl" => "__builtin_ve_vl_vfnmads_vvvvvl", + "llvm.ve.vl.vfnmsbd.vsvvl" => "__builtin_ve_vl_vfnmsbd_vsvvl", + "llvm.ve.vl.vfnmsbd.vsvvmvl" => "__builtin_ve_vl_vfnmsbd_vsvvmvl", + "llvm.ve.vl.vfnmsbd.vsvvvl" => "__builtin_ve_vl_vfnmsbd_vsvvvl", + "llvm.ve.vl.vfnmsbd.vvsvl" => "__builtin_ve_vl_vfnmsbd_vvsvl", + "llvm.ve.vl.vfnmsbd.vvsvmvl" => "__builtin_ve_vl_vfnmsbd_vvsvmvl", + "llvm.ve.vl.vfnmsbd.vvsvvl" => "__builtin_ve_vl_vfnmsbd_vvsvvl", + "llvm.ve.vl.vfnmsbd.vvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvl", + "llvm.ve.vl.vfnmsbd.vvvvmvl" => "__builtin_ve_vl_vfnmsbd_vvvvmvl", + "llvm.ve.vl.vfnmsbd.vvvvvl" => "__builtin_ve_vl_vfnmsbd_vvvvvl", + "llvm.ve.vl.vfnmsbs.vsvvl" => "__builtin_ve_vl_vfnmsbs_vsvvl", + "llvm.ve.vl.vfnmsbs.vsvvmvl" => "__builtin_ve_vl_vfnmsbs_vsvvmvl", + "llvm.ve.vl.vfnmsbs.vsvvvl" => "__builtin_ve_vl_vfnmsbs_vsvvvl", + "llvm.ve.vl.vfnmsbs.vvsvl" => "__builtin_ve_vl_vfnmsbs_vvsvl", + "llvm.ve.vl.vfnmsbs.vvsvmvl" => "__builtin_ve_vl_vfnmsbs_vvsvmvl", + "llvm.ve.vl.vfnmsbs.vvsvvl" => "__builtin_ve_vl_vfnmsbs_vvsvvl", + "llvm.ve.vl.vfnmsbs.vvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvl", + "llvm.ve.vl.vfnmsbs.vvvvmvl" => "__builtin_ve_vl_vfnmsbs_vvvvmvl", + "llvm.ve.vl.vfnmsbs.vvvvvl" => "__builtin_ve_vl_vfnmsbs_vvvvvl", + "llvm.ve.vl.vfrmaxdfst.vvl" => "__builtin_ve_vl_vfrmaxdfst_vvl", + "llvm.ve.vl.vfrmaxdfst.vvvl" => "__builtin_ve_vl_vfrmaxdfst_vvvl", + "llvm.ve.vl.vfrmaxdlst.vvl" => "__builtin_ve_vl_vfrmaxdlst_vvl", + "llvm.ve.vl.vfrmaxdlst.vvvl" => "__builtin_ve_vl_vfrmaxdlst_vvvl", + "llvm.ve.vl.vfrmaxsfst.vvl" => "__builtin_ve_vl_vfrmaxsfst_vvl", + "llvm.ve.vl.vfrmaxsfst.vvvl" => "__builtin_ve_vl_vfrmaxsfst_vvvl", + "llvm.ve.vl.vfrmaxslst.vvl" => "__builtin_ve_vl_vfrmaxslst_vvl", + "llvm.ve.vl.vfrmaxslst.vvvl" => "__builtin_ve_vl_vfrmaxslst_vvvl", + "llvm.ve.vl.vfrmindfst.vvl" => "__builtin_ve_vl_vfrmindfst_vvl", + "llvm.ve.vl.vfrmindfst.vvvl" => "__builtin_ve_vl_vfrmindfst_vvvl", + "llvm.ve.vl.vfrmindlst.vvl" => "__builtin_ve_vl_vfrmindlst_vvl", + "llvm.ve.vl.vfrmindlst.vvvl" => "__builtin_ve_vl_vfrmindlst_vvvl", + "llvm.ve.vl.vfrminsfst.vvl" => "__builtin_ve_vl_vfrminsfst_vvl", + "llvm.ve.vl.vfrminsfst.vvvl" => "__builtin_ve_vl_vfrminsfst_vvvl", + "llvm.ve.vl.vfrminslst.vvl" => "__builtin_ve_vl_vfrminslst_vvl", + "llvm.ve.vl.vfrminslst.vvvl" => "__builtin_ve_vl_vfrminslst_vvvl", + "llvm.ve.vl.vfsqrtd.vvl" => "__builtin_ve_vl_vfsqrtd_vvl", + "llvm.ve.vl.vfsqrtd.vvvl" => "__builtin_ve_vl_vfsqrtd_vvvl", + "llvm.ve.vl.vfsqrts.vvl" => "__builtin_ve_vl_vfsqrts_vvl", + "llvm.ve.vl.vfsqrts.vvvl" => "__builtin_ve_vl_vfsqrts_vvvl", + "llvm.ve.vl.vfsubd.vsvl" => "__builtin_ve_vl_vfsubd_vsvl", + "llvm.ve.vl.vfsubd.vsvmvl" => "__builtin_ve_vl_vfsubd_vsvmvl", + "llvm.ve.vl.vfsubd.vsvvl" => "__builtin_ve_vl_vfsubd_vsvvl", + "llvm.ve.vl.vfsubd.vvvl" => "__builtin_ve_vl_vfsubd_vvvl", + "llvm.ve.vl.vfsubd.vvvmvl" => "__builtin_ve_vl_vfsubd_vvvmvl", + "llvm.ve.vl.vfsubd.vvvvl" => "__builtin_ve_vl_vfsubd_vvvvl", + "llvm.ve.vl.vfsubs.vsvl" => "__builtin_ve_vl_vfsubs_vsvl", + "llvm.ve.vl.vfsubs.vsvmvl" => "__builtin_ve_vl_vfsubs_vsvmvl", + "llvm.ve.vl.vfsubs.vsvvl" => "__builtin_ve_vl_vfsubs_vsvvl", + "llvm.ve.vl.vfsubs.vvvl" => "__builtin_ve_vl_vfsubs_vvvl", + "llvm.ve.vl.vfsubs.vvvmvl" => "__builtin_ve_vl_vfsubs_vvvmvl", + "llvm.ve.vl.vfsubs.vvvvl" => "__builtin_ve_vl_vfsubs_vvvvl", + "llvm.ve.vl.vfsumd.vvl" => "__builtin_ve_vl_vfsumd_vvl", + "llvm.ve.vl.vfsumd.vvml" => "__builtin_ve_vl_vfsumd_vvml", + "llvm.ve.vl.vfsums.vvl" => "__builtin_ve_vl_vfsums_vvl", + "llvm.ve.vl.vfsums.vvml" => "__builtin_ve_vl_vfsums_vvml", + "llvm.ve.vl.vgt.vvssl" => "__builtin_ve_vl_vgt_vvssl", + "llvm.ve.vl.vgt.vvssml" => "__builtin_ve_vl_vgt_vvssml", + "llvm.ve.vl.vgt.vvssmvl" => "__builtin_ve_vl_vgt_vvssmvl", + "llvm.ve.vl.vgt.vvssvl" => "__builtin_ve_vl_vgt_vvssvl", + "llvm.ve.vl.vgtlsx.vvssl" => "__builtin_ve_vl_vgtlsx_vvssl", + "llvm.ve.vl.vgtlsx.vvssml" => "__builtin_ve_vl_vgtlsx_vvssml", + "llvm.ve.vl.vgtlsx.vvssmvl" => "__builtin_ve_vl_vgtlsx_vvssmvl", + "llvm.ve.vl.vgtlsx.vvssvl" => "__builtin_ve_vl_vgtlsx_vvssvl", + "llvm.ve.vl.vgtlsxnc.vvssl" => "__builtin_ve_vl_vgtlsxnc_vvssl", + "llvm.ve.vl.vgtlsxnc.vvssml" => "__builtin_ve_vl_vgtlsxnc_vvssml", + "llvm.ve.vl.vgtlsxnc.vvssmvl" => "__builtin_ve_vl_vgtlsxnc_vvssmvl", + "llvm.ve.vl.vgtlsxnc.vvssvl" => "__builtin_ve_vl_vgtlsxnc_vvssvl", + "llvm.ve.vl.vgtlzx.vvssl" => "__builtin_ve_vl_vgtlzx_vvssl", + "llvm.ve.vl.vgtlzx.vvssml" => "__builtin_ve_vl_vgtlzx_vvssml", + "llvm.ve.vl.vgtlzx.vvssmvl" => "__builtin_ve_vl_vgtlzx_vvssmvl", + "llvm.ve.vl.vgtlzx.vvssvl" => "__builtin_ve_vl_vgtlzx_vvssvl", + "llvm.ve.vl.vgtlzxnc.vvssl" => "__builtin_ve_vl_vgtlzxnc_vvssl", + "llvm.ve.vl.vgtlzxnc.vvssml" => "__builtin_ve_vl_vgtlzxnc_vvssml", + "llvm.ve.vl.vgtlzxnc.vvssmvl" => "__builtin_ve_vl_vgtlzxnc_vvssmvl", + "llvm.ve.vl.vgtlzxnc.vvssvl" => "__builtin_ve_vl_vgtlzxnc_vvssvl", + "llvm.ve.vl.vgtnc.vvssl" => "__builtin_ve_vl_vgtnc_vvssl", + "llvm.ve.vl.vgtnc.vvssml" => "__builtin_ve_vl_vgtnc_vvssml", + "llvm.ve.vl.vgtnc.vvssmvl" => "__builtin_ve_vl_vgtnc_vvssmvl", + "llvm.ve.vl.vgtnc.vvssvl" => "__builtin_ve_vl_vgtnc_vvssvl", + "llvm.ve.vl.vgtu.vvssl" => "__builtin_ve_vl_vgtu_vvssl", + "llvm.ve.vl.vgtu.vvssml" => "__builtin_ve_vl_vgtu_vvssml", + "llvm.ve.vl.vgtu.vvssmvl" => "__builtin_ve_vl_vgtu_vvssmvl", + "llvm.ve.vl.vgtu.vvssvl" => "__builtin_ve_vl_vgtu_vvssvl", + "llvm.ve.vl.vgtunc.vvssl" => "__builtin_ve_vl_vgtunc_vvssl", + "llvm.ve.vl.vgtunc.vvssml" => "__builtin_ve_vl_vgtunc_vvssml", + "llvm.ve.vl.vgtunc.vvssmvl" => "__builtin_ve_vl_vgtunc_vvssmvl", + "llvm.ve.vl.vgtunc.vvssvl" => "__builtin_ve_vl_vgtunc_vvssvl", + "llvm.ve.vl.vld.vssl" => "__builtin_ve_vl_vld_vssl", + "llvm.ve.vl.vld.vssvl" => "__builtin_ve_vl_vld_vssvl", + "llvm.ve.vl.vld2d.vssl" => "__builtin_ve_vl_vld2d_vssl", + "llvm.ve.vl.vld2d.vssvl" => "__builtin_ve_vl_vld2d_vssvl", + "llvm.ve.vl.vld2dnc.vssl" => "__builtin_ve_vl_vld2dnc_vssl", + "llvm.ve.vl.vld2dnc.vssvl" => "__builtin_ve_vl_vld2dnc_vssvl", + "llvm.ve.vl.vldl2dsx.vssl" => "__builtin_ve_vl_vldl2dsx_vssl", + "llvm.ve.vl.vldl2dsx.vssvl" => "__builtin_ve_vl_vldl2dsx_vssvl", + "llvm.ve.vl.vldl2dsxnc.vssl" => "__builtin_ve_vl_vldl2dsxnc_vssl", + "llvm.ve.vl.vldl2dsxnc.vssvl" => "__builtin_ve_vl_vldl2dsxnc_vssvl", + "llvm.ve.vl.vldl2dzx.vssl" => "__builtin_ve_vl_vldl2dzx_vssl", + "llvm.ve.vl.vldl2dzx.vssvl" => "__builtin_ve_vl_vldl2dzx_vssvl", + "llvm.ve.vl.vldl2dzxnc.vssl" => "__builtin_ve_vl_vldl2dzxnc_vssl", + "llvm.ve.vl.vldl2dzxnc.vssvl" => "__builtin_ve_vl_vldl2dzxnc_vssvl", + "llvm.ve.vl.vldlsx.vssl" => "__builtin_ve_vl_vldlsx_vssl", + "llvm.ve.vl.vldlsx.vssvl" => "__builtin_ve_vl_vldlsx_vssvl", + "llvm.ve.vl.vldlsxnc.vssl" => "__builtin_ve_vl_vldlsxnc_vssl", + "llvm.ve.vl.vldlsxnc.vssvl" => "__builtin_ve_vl_vldlsxnc_vssvl", + "llvm.ve.vl.vldlzx.vssl" => "__builtin_ve_vl_vldlzx_vssl", + "llvm.ve.vl.vldlzx.vssvl" => "__builtin_ve_vl_vldlzx_vssvl", + "llvm.ve.vl.vldlzxnc.vssl" => "__builtin_ve_vl_vldlzxnc_vssl", + "llvm.ve.vl.vldlzxnc.vssvl" => "__builtin_ve_vl_vldlzxnc_vssvl", + "llvm.ve.vl.vldnc.vssl" => "__builtin_ve_vl_vldnc_vssl", + "llvm.ve.vl.vldnc.vssvl" => "__builtin_ve_vl_vldnc_vssvl", + "llvm.ve.vl.vldu.vssl" => "__builtin_ve_vl_vldu_vssl", + "llvm.ve.vl.vldu.vssvl" => "__builtin_ve_vl_vldu_vssvl", + "llvm.ve.vl.vldu2d.vssl" => "__builtin_ve_vl_vldu2d_vssl", + "llvm.ve.vl.vldu2d.vssvl" => "__builtin_ve_vl_vldu2d_vssvl", + "llvm.ve.vl.vldu2dnc.vssl" => "__builtin_ve_vl_vldu2dnc_vssl", + "llvm.ve.vl.vldu2dnc.vssvl" => "__builtin_ve_vl_vldu2dnc_vssvl", + "llvm.ve.vl.vldunc.vssl" => "__builtin_ve_vl_vldunc_vssl", + "llvm.ve.vl.vldunc.vssvl" => "__builtin_ve_vl_vldunc_vssvl", + "llvm.ve.vl.vldz.vvl" => "__builtin_ve_vl_vldz_vvl", + "llvm.ve.vl.vldz.vvmvl" => "__builtin_ve_vl_vldz_vvmvl", + "llvm.ve.vl.vldz.vvvl" => "__builtin_ve_vl_vldz_vvvl", + "llvm.ve.vl.vmaxsl.vsvl" => "__builtin_ve_vl_vmaxsl_vsvl", + "llvm.ve.vl.vmaxsl.vsvmvl" => "__builtin_ve_vl_vmaxsl_vsvmvl", + "llvm.ve.vl.vmaxsl.vsvvl" => "__builtin_ve_vl_vmaxsl_vsvvl", + "llvm.ve.vl.vmaxsl.vvvl" => "__builtin_ve_vl_vmaxsl_vvvl", + "llvm.ve.vl.vmaxsl.vvvmvl" => "__builtin_ve_vl_vmaxsl_vvvmvl", + "llvm.ve.vl.vmaxsl.vvvvl" => "__builtin_ve_vl_vmaxsl_vvvvl", + "llvm.ve.vl.vmaxswsx.vsvl" => "__builtin_ve_vl_vmaxswsx_vsvl", + "llvm.ve.vl.vmaxswsx.vsvmvl" => "__builtin_ve_vl_vmaxswsx_vsvmvl", + "llvm.ve.vl.vmaxswsx.vsvvl" => "__builtin_ve_vl_vmaxswsx_vsvvl", + "llvm.ve.vl.vmaxswsx.vvvl" => "__builtin_ve_vl_vmaxswsx_vvvl", + "llvm.ve.vl.vmaxswsx.vvvmvl" => "__builtin_ve_vl_vmaxswsx_vvvmvl", + "llvm.ve.vl.vmaxswsx.vvvvl" => "__builtin_ve_vl_vmaxswsx_vvvvl", + "llvm.ve.vl.vmaxswzx.vsvl" => "__builtin_ve_vl_vmaxswzx_vsvl", + "llvm.ve.vl.vmaxswzx.vsvmvl" => "__builtin_ve_vl_vmaxswzx_vsvmvl", + "llvm.ve.vl.vmaxswzx.vsvvl" => "__builtin_ve_vl_vmaxswzx_vsvvl", + "llvm.ve.vl.vmaxswzx.vvvl" => "__builtin_ve_vl_vmaxswzx_vvvl", + "llvm.ve.vl.vmaxswzx.vvvmvl" => "__builtin_ve_vl_vmaxswzx_vvvmvl", + "llvm.ve.vl.vmaxswzx.vvvvl" => "__builtin_ve_vl_vmaxswzx_vvvvl", + "llvm.ve.vl.vminsl.vsvl" => "__builtin_ve_vl_vminsl_vsvl", + "llvm.ve.vl.vminsl.vsvmvl" => "__builtin_ve_vl_vminsl_vsvmvl", + "llvm.ve.vl.vminsl.vsvvl" => "__builtin_ve_vl_vminsl_vsvvl", + "llvm.ve.vl.vminsl.vvvl" => "__builtin_ve_vl_vminsl_vvvl", + "llvm.ve.vl.vminsl.vvvmvl" => "__builtin_ve_vl_vminsl_vvvmvl", + "llvm.ve.vl.vminsl.vvvvl" => "__builtin_ve_vl_vminsl_vvvvl", + "llvm.ve.vl.vminswsx.vsvl" => "__builtin_ve_vl_vminswsx_vsvl", + "llvm.ve.vl.vminswsx.vsvmvl" => "__builtin_ve_vl_vminswsx_vsvmvl", + "llvm.ve.vl.vminswsx.vsvvl" => "__builtin_ve_vl_vminswsx_vsvvl", + "llvm.ve.vl.vminswsx.vvvl" => "__builtin_ve_vl_vminswsx_vvvl", + "llvm.ve.vl.vminswsx.vvvmvl" => "__builtin_ve_vl_vminswsx_vvvmvl", + "llvm.ve.vl.vminswsx.vvvvl" => "__builtin_ve_vl_vminswsx_vvvvl", + "llvm.ve.vl.vminswzx.vsvl" => "__builtin_ve_vl_vminswzx_vsvl", + "llvm.ve.vl.vminswzx.vsvmvl" => "__builtin_ve_vl_vminswzx_vsvmvl", + "llvm.ve.vl.vminswzx.vsvvl" => "__builtin_ve_vl_vminswzx_vsvvl", + "llvm.ve.vl.vminswzx.vvvl" => "__builtin_ve_vl_vminswzx_vvvl", + "llvm.ve.vl.vminswzx.vvvmvl" => "__builtin_ve_vl_vminswzx_vvvmvl", + "llvm.ve.vl.vminswzx.vvvvl" => "__builtin_ve_vl_vminswzx_vvvvl", + "llvm.ve.vl.vmrg.vsvml" => "__builtin_ve_vl_vmrg_vsvml", + "llvm.ve.vl.vmrg.vsvmvl" => "__builtin_ve_vl_vmrg_vsvmvl", + "llvm.ve.vl.vmrg.vvvml" => "__builtin_ve_vl_vmrg_vvvml", + "llvm.ve.vl.vmrg.vvvmvl" => "__builtin_ve_vl_vmrg_vvvmvl", + "llvm.ve.vl.vmrgw.vsvMl" => "__builtin_ve_vl_vmrgw_vsvMl", + "llvm.ve.vl.vmrgw.vsvMvl" => "__builtin_ve_vl_vmrgw_vsvMvl", + "llvm.ve.vl.vmrgw.vvvMl" => "__builtin_ve_vl_vmrgw_vvvMl", + "llvm.ve.vl.vmrgw.vvvMvl" => "__builtin_ve_vl_vmrgw_vvvMvl", + "llvm.ve.vl.vmulsl.vsvl" => "__builtin_ve_vl_vmulsl_vsvl", + "llvm.ve.vl.vmulsl.vsvmvl" => "__builtin_ve_vl_vmulsl_vsvmvl", + "llvm.ve.vl.vmulsl.vsvvl" => "__builtin_ve_vl_vmulsl_vsvvl", + "llvm.ve.vl.vmulsl.vvvl" => "__builtin_ve_vl_vmulsl_vvvl", + "llvm.ve.vl.vmulsl.vvvmvl" => "__builtin_ve_vl_vmulsl_vvvmvl", + "llvm.ve.vl.vmulsl.vvvvl" => "__builtin_ve_vl_vmulsl_vvvvl", + "llvm.ve.vl.vmulslw.vsvl" => "__builtin_ve_vl_vmulslw_vsvl", + "llvm.ve.vl.vmulslw.vsvvl" => "__builtin_ve_vl_vmulslw_vsvvl", + "llvm.ve.vl.vmulslw.vvvl" => "__builtin_ve_vl_vmulslw_vvvl", + "llvm.ve.vl.vmulslw.vvvvl" => "__builtin_ve_vl_vmulslw_vvvvl", + "llvm.ve.vl.vmulswsx.vsvl" => "__builtin_ve_vl_vmulswsx_vsvl", + "llvm.ve.vl.vmulswsx.vsvmvl" => "__builtin_ve_vl_vmulswsx_vsvmvl", + "llvm.ve.vl.vmulswsx.vsvvl" => "__builtin_ve_vl_vmulswsx_vsvvl", + "llvm.ve.vl.vmulswsx.vvvl" => "__builtin_ve_vl_vmulswsx_vvvl", + "llvm.ve.vl.vmulswsx.vvvmvl" => "__builtin_ve_vl_vmulswsx_vvvmvl", + "llvm.ve.vl.vmulswsx.vvvvl" => "__builtin_ve_vl_vmulswsx_vvvvl", + "llvm.ve.vl.vmulswzx.vsvl" => "__builtin_ve_vl_vmulswzx_vsvl", + "llvm.ve.vl.vmulswzx.vsvmvl" => "__builtin_ve_vl_vmulswzx_vsvmvl", + "llvm.ve.vl.vmulswzx.vsvvl" => "__builtin_ve_vl_vmulswzx_vsvvl", + "llvm.ve.vl.vmulswzx.vvvl" => "__builtin_ve_vl_vmulswzx_vvvl", + "llvm.ve.vl.vmulswzx.vvvmvl" => "__builtin_ve_vl_vmulswzx_vvvmvl", + "llvm.ve.vl.vmulswzx.vvvvl" => "__builtin_ve_vl_vmulswzx_vvvvl", + "llvm.ve.vl.vmulul.vsvl" => "__builtin_ve_vl_vmulul_vsvl", + "llvm.ve.vl.vmulul.vsvmvl" => "__builtin_ve_vl_vmulul_vsvmvl", + "llvm.ve.vl.vmulul.vsvvl" => "__builtin_ve_vl_vmulul_vsvvl", + "llvm.ve.vl.vmulul.vvvl" => "__builtin_ve_vl_vmulul_vvvl", + "llvm.ve.vl.vmulul.vvvmvl" => "__builtin_ve_vl_vmulul_vvvmvl", + "llvm.ve.vl.vmulul.vvvvl" => "__builtin_ve_vl_vmulul_vvvvl", + "llvm.ve.vl.vmuluw.vsvl" => "__builtin_ve_vl_vmuluw_vsvl", + "llvm.ve.vl.vmuluw.vsvmvl" => "__builtin_ve_vl_vmuluw_vsvmvl", + "llvm.ve.vl.vmuluw.vsvvl" => "__builtin_ve_vl_vmuluw_vsvvl", + "llvm.ve.vl.vmuluw.vvvl" => "__builtin_ve_vl_vmuluw_vvvl", + "llvm.ve.vl.vmuluw.vvvmvl" => "__builtin_ve_vl_vmuluw_vvvmvl", + "llvm.ve.vl.vmuluw.vvvvl" => "__builtin_ve_vl_vmuluw_vvvvl", + "llvm.ve.vl.vmv.vsvl" => "__builtin_ve_vl_vmv_vsvl", + "llvm.ve.vl.vmv.vsvmvl" => "__builtin_ve_vl_vmv_vsvmvl", + "llvm.ve.vl.vmv.vsvvl" => "__builtin_ve_vl_vmv_vsvvl", + "llvm.ve.vl.vor.vsvl" => "__builtin_ve_vl_vor_vsvl", + "llvm.ve.vl.vor.vsvmvl" => "__builtin_ve_vl_vor_vsvmvl", + "llvm.ve.vl.vor.vsvvl" => "__builtin_ve_vl_vor_vsvvl", + "llvm.ve.vl.vor.vvvl" => "__builtin_ve_vl_vor_vvvl", + "llvm.ve.vl.vor.vvvmvl" => "__builtin_ve_vl_vor_vvvmvl", + "llvm.ve.vl.vor.vvvvl" => "__builtin_ve_vl_vor_vvvvl", + "llvm.ve.vl.vpcnt.vvl" => "__builtin_ve_vl_vpcnt_vvl", + "llvm.ve.vl.vpcnt.vvmvl" => "__builtin_ve_vl_vpcnt_vvmvl", + "llvm.ve.vl.vpcnt.vvvl" => "__builtin_ve_vl_vpcnt_vvvl", + "llvm.ve.vl.vrand.vvl" => "__builtin_ve_vl_vrand_vvl", + "llvm.ve.vl.vrand.vvml" => "__builtin_ve_vl_vrand_vvml", + "llvm.ve.vl.vrcpd.vvl" => "__builtin_ve_vl_vrcpd_vvl", + "llvm.ve.vl.vrcpd.vvvl" => "__builtin_ve_vl_vrcpd_vvvl", + "llvm.ve.vl.vrcps.vvl" => "__builtin_ve_vl_vrcps_vvl", + "llvm.ve.vl.vrcps.vvvl" => "__builtin_ve_vl_vrcps_vvvl", + "llvm.ve.vl.vrmaxslfst.vvl" => "__builtin_ve_vl_vrmaxslfst_vvl", + "llvm.ve.vl.vrmaxslfst.vvvl" => "__builtin_ve_vl_vrmaxslfst_vvvl", + "llvm.ve.vl.vrmaxsllst.vvl" => "__builtin_ve_vl_vrmaxsllst_vvl", + "llvm.ve.vl.vrmaxsllst.vvvl" => "__builtin_ve_vl_vrmaxsllst_vvvl", + "llvm.ve.vl.vrmaxswfstsx.vvl" => "__builtin_ve_vl_vrmaxswfstsx_vvl", + "llvm.ve.vl.vrmaxswfstsx.vvvl" => "__builtin_ve_vl_vrmaxswfstsx_vvvl", + "llvm.ve.vl.vrmaxswfstzx.vvl" => "__builtin_ve_vl_vrmaxswfstzx_vvl", + "llvm.ve.vl.vrmaxswfstzx.vvvl" => "__builtin_ve_vl_vrmaxswfstzx_vvvl", + "llvm.ve.vl.vrmaxswlstsx.vvl" => "__builtin_ve_vl_vrmaxswlstsx_vvl", + "llvm.ve.vl.vrmaxswlstsx.vvvl" => "__builtin_ve_vl_vrmaxswlstsx_vvvl", + "llvm.ve.vl.vrmaxswlstzx.vvl" => "__builtin_ve_vl_vrmaxswlstzx_vvl", + "llvm.ve.vl.vrmaxswlstzx.vvvl" => "__builtin_ve_vl_vrmaxswlstzx_vvvl", + "llvm.ve.vl.vrminslfst.vvl" => "__builtin_ve_vl_vrminslfst_vvl", + "llvm.ve.vl.vrminslfst.vvvl" => "__builtin_ve_vl_vrminslfst_vvvl", + "llvm.ve.vl.vrminsllst.vvl" => "__builtin_ve_vl_vrminsllst_vvl", + "llvm.ve.vl.vrminsllst.vvvl" => "__builtin_ve_vl_vrminsllst_vvvl", + "llvm.ve.vl.vrminswfstsx.vvl" => "__builtin_ve_vl_vrminswfstsx_vvl", + "llvm.ve.vl.vrminswfstsx.vvvl" => "__builtin_ve_vl_vrminswfstsx_vvvl", + "llvm.ve.vl.vrminswfstzx.vvl" => "__builtin_ve_vl_vrminswfstzx_vvl", + "llvm.ve.vl.vrminswfstzx.vvvl" => "__builtin_ve_vl_vrminswfstzx_vvvl", + "llvm.ve.vl.vrminswlstsx.vvl" => "__builtin_ve_vl_vrminswlstsx_vvl", + "llvm.ve.vl.vrminswlstsx.vvvl" => "__builtin_ve_vl_vrminswlstsx_vvvl", + "llvm.ve.vl.vrminswlstzx.vvl" => "__builtin_ve_vl_vrminswlstzx_vvl", + "llvm.ve.vl.vrminswlstzx.vvvl" => "__builtin_ve_vl_vrminswlstzx_vvvl", + "llvm.ve.vl.vror.vvl" => "__builtin_ve_vl_vror_vvl", + "llvm.ve.vl.vror.vvml" => "__builtin_ve_vl_vror_vvml", + "llvm.ve.vl.vrsqrtd.vvl" => "__builtin_ve_vl_vrsqrtd_vvl", + "llvm.ve.vl.vrsqrtd.vvvl" => "__builtin_ve_vl_vrsqrtd_vvvl", + "llvm.ve.vl.vrsqrtdnex.vvl" => "__builtin_ve_vl_vrsqrtdnex_vvl", + "llvm.ve.vl.vrsqrtdnex.vvvl" => "__builtin_ve_vl_vrsqrtdnex_vvvl", + "llvm.ve.vl.vrsqrts.vvl" => "__builtin_ve_vl_vrsqrts_vvl", + "llvm.ve.vl.vrsqrts.vvvl" => "__builtin_ve_vl_vrsqrts_vvvl", + "llvm.ve.vl.vrsqrtsnex.vvl" => "__builtin_ve_vl_vrsqrtsnex_vvl", + "llvm.ve.vl.vrsqrtsnex.vvvl" => "__builtin_ve_vl_vrsqrtsnex_vvvl", + "llvm.ve.vl.vrxor.vvl" => "__builtin_ve_vl_vrxor_vvl", + "llvm.ve.vl.vrxor.vvml" => "__builtin_ve_vl_vrxor_vvml", + "llvm.ve.vl.vsc.vvssl" => "__builtin_ve_vl_vsc_vvssl", + "llvm.ve.vl.vsc.vvssml" => "__builtin_ve_vl_vsc_vvssml", + "llvm.ve.vl.vscl.vvssl" => "__builtin_ve_vl_vscl_vvssl", + "llvm.ve.vl.vscl.vvssml" => "__builtin_ve_vl_vscl_vvssml", + "llvm.ve.vl.vsclnc.vvssl" => "__builtin_ve_vl_vsclnc_vvssl", + "llvm.ve.vl.vsclnc.vvssml" => "__builtin_ve_vl_vsclnc_vvssml", + "llvm.ve.vl.vsclncot.vvssl" => "__builtin_ve_vl_vsclncot_vvssl", + "llvm.ve.vl.vsclncot.vvssml" => "__builtin_ve_vl_vsclncot_vvssml", + "llvm.ve.vl.vsclot.vvssl" => "__builtin_ve_vl_vsclot_vvssl", + "llvm.ve.vl.vsclot.vvssml" => "__builtin_ve_vl_vsclot_vvssml", + "llvm.ve.vl.vscnc.vvssl" => "__builtin_ve_vl_vscnc_vvssl", + "llvm.ve.vl.vscnc.vvssml" => "__builtin_ve_vl_vscnc_vvssml", + "llvm.ve.vl.vscncot.vvssl" => "__builtin_ve_vl_vscncot_vvssl", + "llvm.ve.vl.vscncot.vvssml" => "__builtin_ve_vl_vscncot_vvssml", + "llvm.ve.vl.vscot.vvssl" => "__builtin_ve_vl_vscot_vvssl", + "llvm.ve.vl.vscot.vvssml" => "__builtin_ve_vl_vscot_vvssml", + "llvm.ve.vl.vscu.vvssl" => "__builtin_ve_vl_vscu_vvssl", + "llvm.ve.vl.vscu.vvssml" => "__builtin_ve_vl_vscu_vvssml", + "llvm.ve.vl.vscunc.vvssl" => "__builtin_ve_vl_vscunc_vvssl", + "llvm.ve.vl.vscunc.vvssml" => "__builtin_ve_vl_vscunc_vvssml", + "llvm.ve.vl.vscuncot.vvssl" => "__builtin_ve_vl_vscuncot_vvssl", + "llvm.ve.vl.vscuncot.vvssml" => "__builtin_ve_vl_vscuncot_vvssml", + "llvm.ve.vl.vscuot.vvssl" => "__builtin_ve_vl_vscuot_vvssl", + "llvm.ve.vl.vscuot.vvssml" => "__builtin_ve_vl_vscuot_vvssml", + "llvm.ve.vl.vseq.vl" => "__builtin_ve_vl_vseq_vl", + "llvm.ve.vl.vseq.vvl" => "__builtin_ve_vl_vseq_vvl", + "llvm.ve.vl.vsfa.vvssl" => "__builtin_ve_vl_vsfa_vvssl", + "llvm.ve.vl.vsfa.vvssmvl" => "__builtin_ve_vl_vsfa_vvssmvl", + "llvm.ve.vl.vsfa.vvssvl" => "__builtin_ve_vl_vsfa_vvssvl", + "llvm.ve.vl.vshf.vvvsl" => "__builtin_ve_vl_vshf_vvvsl", + "llvm.ve.vl.vshf.vvvsvl" => "__builtin_ve_vl_vshf_vvvsvl", + "llvm.ve.vl.vslal.vvsl" => "__builtin_ve_vl_vslal_vvsl", + "llvm.ve.vl.vslal.vvsmvl" => "__builtin_ve_vl_vslal_vvsmvl", + "llvm.ve.vl.vslal.vvsvl" => "__builtin_ve_vl_vslal_vvsvl", + "llvm.ve.vl.vslal.vvvl" => "__builtin_ve_vl_vslal_vvvl", + "llvm.ve.vl.vslal.vvvmvl" => "__builtin_ve_vl_vslal_vvvmvl", + "llvm.ve.vl.vslal.vvvvl" => "__builtin_ve_vl_vslal_vvvvl", + "llvm.ve.vl.vslawsx.vvsl" => "__builtin_ve_vl_vslawsx_vvsl", + "llvm.ve.vl.vslawsx.vvsmvl" => "__builtin_ve_vl_vslawsx_vvsmvl", + "llvm.ve.vl.vslawsx.vvsvl" => "__builtin_ve_vl_vslawsx_vvsvl", + "llvm.ve.vl.vslawsx.vvvl" => "__builtin_ve_vl_vslawsx_vvvl", + "llvm.ve.vl.vslawsx.vvvmvl" => "__builtin_ve_vl_vslawsx_vvvmvl", + "llvm.ve.vl.vslawsx.vvvvl" => "__builtin_ve_vl_vslawsx_vvvvl", + "llvm.ve.vl.vslawzx.vvsl" => "__builtin_ve_vl_vslawzx_vvsl", + "llvm.ve.vl.vslawzx.vvsmvl" => "__builtin_ve_vl_vslawzx_vvsmvl", + "llvm.ve.vl.vslawzx.vvsvl" => "__builtin_ve_vl_vslawzx_vvsvl", + "llvm.ve.vl.vslawzx.vvvl" => "__builtin_ve_vl_vslawzx_vvvl", + "llvm.ve.vl.vslawzx.vvvmvl" => "__builtin_ve_vl_vslawzx_vvvmvl", + "llvm.ve.vl.vslawzx.vvvvl" => "__builtin_ve_vl_vslawzx_vvvvl", + "llvm.ve.vl.vsll.vvsl" => "__builtin_ve_vl_vsll_vvsl", + "llvm.ve.vl.vsll.vvsmvl" => "__builtin_ve_vl_vsll_vvsmvl", + "llvm.ve.vl.vsll.vvsvl" => "__builtin_ve_vl_vsll_vvsvl", + "llvm.ve.vl.vsll.vvvl" => "__builtin_ve_vl_vsll_vvvl", + "llvm.ve.vl.vsll.vvvmvl" => "__builtin_ve_vl_vsll_vvvmvl", + "llvm.ve.vl.vsll.vvvvl" => "__builtin_ve_vl_vsll_vvvvl", + "llvm.ve.vl.vsral.vvsl" => "__builtin_ve_vl_vsral_vvsl", + "llvm.ve.vl.vsral.vvsmvl" => "__builtin_ve_vl_vsral_vvsmvl", + "llvm.ve.vl.vsral.vvsvl" => "__builtin_ve_vl_vsral_vvsvl", + "llvm.ve.vl.vsral.vvvl" => "__builtin_ve_vl_vsral_vvvl", + "llvm.ve.vl.vsral.vvvmvl" => "__builtin_ve_vl_vsral_vvvmvl", + "llvm.ve.vl.vsral.vvvvl" => "__builtin_ve_vl_vsral_vvvvl", + "llvm.ve.vl.vsrawsx.vvsl" => "__builtin_ve_vl_vsrawsx_vvsl", + "llvm.ve.vl.vsrawsx.vvsmvl" => "__builtin_ve_vl_vsrawsx_vvsmvl", + "llvm.ve.vl.vsrawsx.vvsvl" => "__builtin_ve_vl_vsrawsx_vvsvl", + "llvm.ve.vl.vsrawsx.vvvl" => "__builtin_ve_vl_vsrawsx_vvvl", + "llvm.ve.vl.vsrawsx.vvvmvl" => "__builtin_ve_vl_vsrawsx_vvvmvl", + "llvm.ve.vl.vsrawsx.vvvvl" => "__builtin_ve_vl_vsrawsx_vvvvl", + "llvm.ve.vl.vsrawzx.vvsl" => "__builtin_ve_vl_vsrawzx_vvsl", + "llvm.ve.vl.vsrawzx.vvsmvl" => "__builtin_ve_vl_vsrawzx_vvsmvl", + "llvm.ve.vl.vsrawzx.vvsvl" => "__builtin_ve_vl_vsrawzx_vvsvl", + "llvm.ve.vl.vsrawzx.vvvl" => "__builtin_ve_vl_vsrawzx_vvvl", + "llvm.ve.vl.vsrawzx.vvvmvl" => "__builtin_ve_vl_vsrawzx_vvvmvl", + "llvm.ve.vl.vsrawzx.vvvvl" => "__builtin_ve_vl_vsrawzx_vvvvl", + "llvm.ve.vl.vsrl.vvsl" => "__builtin_ve_vl_vsrl_vvsl", + "llvm.ve.vl.vsrl.vvsmvl" => "__builtin_ve_vl_vsrl_vvsmvl", + "llvm.ve.vl.vsrl.vvsvl" => "__builtin_ve_vl_vsrl_vvsvl", + "llvm.ve.vl.vsrl.vvvl" => "__builtin_ve_vl_vsrl_vvvl", + "llvm.ve.vl.vsrl.vvvmvl" => "__builtin_ve_vl_vsrl_vvvmvl", + "llvm.ve.vl.vsrl.vvvvl" => "__builtin_ve_vl_vsrl_vvvvl", + "llvm.ve.vl.vst.vssl" => "__builtin_ve_vl_vst_vssl", + "llvm.ve.vl.vst.vssml" => "__builtin_ve_vl_vst_vssml", + "llvm.ve.vl.vst2d.vssl" => "__builtin_ve_vl_vst2d_vssl", + "llvm.ve.vl.vst2d.vssml" => "__builtin_ve_vl_vst2d_vssml", + "llvm.ve.vl.vst2dnc.vssl" => "__builtin_ve_vl_vst2dnc_vssl", + "llvm.ve.vl.vst2dnc.vssml" => "__builtin_ve_vl_vst2dnc_vssml", + "llvm.ve.vl.vst2dncot.vssl" => "__builtin_ve_vl_vst2dncot_vssl", + "llvm.ve.vl.vst2dncot.vssml" => "__builtin_ve_vl_vst2dncot_vssml", + "llvm.ve.vl.vst2dot.vssl" => "__builtin_ve_vl_vst2dot_vssl", + "llvm.ve.vl.vst2dot.vssml" => "__builtin_ve_vl_vst2dot_vssml", + "llvm.ve.vl.vstl.vssl" => "__builtin_ve_vl_vstl_vssl", + "llvm.ve.vl.vstl.vssml" => "__builtin_ve_vl_vstl_vssml", + "llvm.ve.vl.vstl2d.vssl" => "__builtin_ve_vl_vstl2d_vssl", + "llvm.ve.vl.vstl2d.vssml" => "__builtin_ve_vl_vstl2d_vssml", + "llvm.ve.vl.vstl2dnc.vssl" => "__builtin_ve_vl_vstl2dnc_vssl", + "llvm.ve.vl.vstl2dnc.vssml" => "__builtin_ve_vl_vstl2dnc_vssml", + "llvm.ve.vl.vstl2dncot.vssl" => "__builtin_ve_vl_vstl2dncot_vssl", + "llvm.ve.vl.vstl2dncot.vssml" => "__builtin_ve_vl_vstl2dncot_vssml", + "llvm.ve.vl.vstl2dot.vssl" => "__builtin_ve_vl_vstl2dot_vssl", + "llvm.ve.vl.vstl2dot.vssml" => "__builtin_ve_vl_vstl2dot_vssml", + "llvm.ve.vl.vstlnc.vssl" => "__builtin_ve_vl_vstlnc_vssl", + "llvm.ve.vl.vstlnc.vssml" => "__builtin_ve_vl_vstlnc_vssml", + "llvm.ve.vl.vstlncot.vssl" => "__builtin_ve_vl_vstlncot_vssl", + "llvm.ve.vl.vstlncot.vssml" => "__builtin_ve_vl_vstlncot_vssml", + "llvm.ve.vl.vstlot.vssl" => "__builtin_ve_vl_vstlot_vssl", + "llvm.ve.vl.vstlot.vssml" => "__builtin_ve_vl_vstlot_vssml", + "llvm.ve.vl.vstnc.vssl" => "__builtin_ve_vl_vstnc_vssl", + "llvm.ve.vl.vstnc.vssml" => "__builtin_ve_vl_vstnc_vssml", + "llvm.ve.vl.vstncot.vssl" => "__builtin_ve_vl_vstncot_vssl", + "llvm.ve.vl.vstncot.vssml" => "__builtin_ve_vl_vstncot_vssml", + "llvm.ve.vl.vstot.vssl" => "__builtin_ve_vl_vstot_vssl", + "llvm.ve.vl.vstot.vssml" => "__builtin_ve_vl_vstot_vssml", + "llvm.ve.vl.vstu.vssl" => "__builtin_ve_vl_vstu_vssl", + "llvm.ve.vl.vstu.vssml" => "__builtin_ve_vl_vstu_vssml", + "llvm.ve.vl.vstu2d.vssl" => "__builtin_ve_vl_vstu2d_vssl", + "llvm.ve.vl.vstu2d.vssml" => "__builtin_ve_vl_vstu2d_vssml", + "llvm.ve.vl.vstu2dnc.vssl" => "__builtin_ve_vl_vstu2dnc_vssl", + "llvm.ve.vl.vstu2dnc.vssml" => "__builtin_ve_vl_vstu2dnc_vssml", + "llvm.ve.vl.vstu2dncot.vssl" => "__builtin_ve_vl_vstu2dncot_vssl", + "llvm.ve.vl.vstu2dncot.vssml" => "__builtin_ve_vl_vstu2dncot_vssml", + "llvm.ve.vl.vstu2dot.vssl" => "__builtin_ve_vl_vstu2dot_vssl", + "llvm.ve.vl.vstu2dot.vssml" => "__builtin_ve_vl_vstu2dot_vssml", + "llvm.ve.vl.vstunc.vssl" => "__builtin_ve_vl_vstunc_vssl", + "llvm.ve.vl.vstunc.vssml" => "__builtin_ve_vl_vstunc_vssml", + "llvm.ve.vl.vstuncot.vssl" => "__builtin_ve_vl_vstuncot_vssl", + "llvm.ve.vl.vstuncot.vssml" => "__builtin_ve_vl_vstuncot_vssml", + "llvm.ve.vl.vstuot.vssl" => "__builtin_ve_vl_vstuot_vssl", + "llvm.ve.vl.vstuot.vssml" => "__builtin_ve_vl_vstuot_vssml", + "llvm.ve.vl.vsubsl.vsvl" => "__builtin_ve_vl_vsubsl_vsvl", + "llvm.ve.vl.vsubsl.vsvmvl" => "__builtin_ve_vl_vsubsl_vsvmvl", + "llvm.ve.vl.vsubsl.vsvvl" => "__builtin_ve_vl_vsubsl_vsvvl", + "llvm.ve.vl.vsubsl.vvvl" => "__builtin_ve_vl_vsubsl_vvvl", + "llvm.ve.vl.vsubsl.vvvmvl" => "__builtin_ve_vl_vsubsl_vvvmvl", + "llvm.ve.vl.vsubsl.vvvvl" => "__builtin_ve_vl_vsubsl_vvvvl", + "llvm.ve.vl.vsubswsx.vsvl" => "__builtin_ve_vl_vsubswsx_vsvl", + "llvm.ve.vl.vsubswsx.vsvmvl" => "__builtin_ve_vl_vsubswsx_vsvmvl", + "llvm.ve.vl.vsubswsx.vsvvl" => "__builtin_ve_vl_vsubswsx_vsvvl", + "llvm.ve.vl.vsubswsx.vvvl" => "__builtin_ve_vl_vsubswsx_vvvl", + "llvm.ve.vl.vsubswsx.vvvmvl" => "__builtin_ve_vl_vsubswsx_vvvmvl", + "llvm.ve.vl.vsubswsx.vvvvl" => "__builtin_ve_vl_vsubswsx_vvvvl", + "llvm.ve.vl.vsubswzx.vsvl" => "__builtin_ve_vl_vsubswzx_vsvl", + "llvm.ve.vl.vsubswzx.vsvmvl" => "__builtin_ve_vl_vsubswzx_vsvmvl", + "llvm.ve.vl.vsubswzx.vsvvl" => "__builtin_ve_vl_vsubswzx_vsvvl", + "llvm.ve.vl.vsubswzx.vvvl" => "__builtin_ve_vl_vsubswzx_vvvl", + "llvm.ve.vl.vsubswzx.vvvmvl" => "__builtin_ve_vl_vsubswzx_vvvmvl", + "llvm.ve.vl.vsubswzx.vvvvl" => "__builtin_ve_vl_vsubswzx_vvvvl", + "llvm.ve.vl.vsubul.vsvl" => "__builtin_ve_vl_vsubul_vsvl", + "llvm.ve.vl.vsubul.vsvmvl" => "__builtin_ve_vl_vsubul_vsvmvl", + "llvm.ve.vl.vsubul.vsvvl" => "__builtin_ve_vl_vsubul_vsvvl", + "llvm.ve.vl.vsubul.vvvl" => "__builtin_ve_vl_vsubul_vvvl", + "llvm.ve.vl.vsubul.vvvmvl" => "__builtin_ve_vl_vsubul_vvvmvl", + "llvm.ve.vl.vsubul.vvvvl" => "__builtin_ve_vl_vsubul_vvvvl", + "llvm.ve.vl.vsubuw.vsvl" => "__builtin_ve_vl_vsubuw_vsvl", + "llvm.ve.vl.vsubuw.vsvmvl" => "__builtin_ve_vl_vsubuw_vsvmvl", + "llvm.ve.vl.vsubuw.vsvvl" => "__builtin_ve_vl_vsubuw_vsvvl", + "llvm.ve.vl.vsubuw.vvvl" => "__builtin_ve_vl_vsubuw_vvvl", + "llvm.ve.vl.vsubuw.vvvmvl" => "__builtin_ve_vl_vsubuw_vvvmvl", + "llvm.ve.vl.vsubuw.vvvvl" => "__builtin_ve_vl_vsubuw_vvvvl", + "llvm.ve.vl.vsuml.vvl" => "__builtin_ve_vl_vsuml_vvl", + "llvm.ve.vl.vsuml.vvml" => "__builtin_ve_vl_vsuml_vvml", + "llvm.ve.vl.vsumwsx.vvl" => "__builtin_ve_vl_vsumwsx_vvl", + "llvm.ve.vl.vsumwsx.vvml" => "__builtin_ve_vl_vsumwsx_vvml", + "llvm.ve.vl.vsumwzx.vvl" => "__builtin_ve_vl_vsumwzx_vvl", + "llvm.ve.vl.vsumwzx.vvml" => "__builtin_ve_vl_vsumwzx_vvml", + "llvm.ve.vl.vxor.vsvl" => "__builtin_ve_vl_vxor_vsvl", + "llvm.ve.vl.vxor.vsvmvl" => "__builtin_ve_vl_vxor_vsvmvl", + "llvm.ve.vl.vxor.vsvvl" => "__builtin_ve_vl_vxor_vsvvl", + "llvm.ve.vl.vxor.vvvl" => "__builtin_ve_vl_vxor_vvvl", + "llvm.ve.vl.vxor.vvvmvl" => "__builtin_ve_vl_vxor_vvvmvl", + "llvm.ve.vl.vxor.vvvvl" => "__builtin_ve_vl_vxor_vvvvl", + "llvm.ve.vl.xorm.MMM" => "__builtin_ve_vl_xorm_MMM", + "llvm.ve.vl.xorm.mmm" => "__builtin_ve_vl_xorm_mmm", // x86 "llvm.x86.3dnow.pavgusb" => "__builtin_ia32_pavgusb", "llvm.x86.3dnow.pf2id" => "__builtin_ia32_pf2id", @@ -3430,6 +5706,10 @@ match name { "llvm.x86.3dnowa.pfnacc" => "__builtin_ia32_pfnacc", "llvm.x86.3dnowa.pfpnacc" => "__builtin_ia32_pfpnacc", "llvm.x86.3dnowa.pi2fw" => "__builtin_ia32_pi2fw", + "llvm.x86.aadd32" => "__builtin_ia32_aadd32", + "llvm.x86.aadd64" => "__builtin_ia32_aadd64", + "llvm.x86.aand32" => "__builtin_ia32_aand32", + "llvm.x86.aand64" => "__builtin_ia32_aand64", "llvm.x86.addcarry.u32" => "__builtin_ia32_addcarry_u32", "llvm.x86.addcarry.u64" => "__builtin_ia32_addcarry_u64", "llvm.x86.addcarryx.u32" => "__builtin_ia32_addcarryx_u32", @@ -3448,6 +5728,8 @@ match name { "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_aesenclast512", "llvm.x86.aesni.aesimc" => "__builtin_ia32_aesimc128", "llvm.x86.aesni.aeskeygenassist" => "__builtin_ia32_aeskeygenassist128", + "llvm.x86.aor32" => "__builtin_ia32_aor32", + "llvm.x86.aor64" => "__builtin_ia32_aor64", "llvm.x86.avx.addsub.pd.256" => "__builtin_ia32_addsubpd256", "llvm.x86.avx.addsub.ps.256" => "__builtin_ia32_addsubps256", "llvm.x86.avx.blend.pd.256" => "__builtin_ia32_blendpd256", @@ -3660,6 +5942,18 @@ match name { "llvm.x86.avx2.vbroadcast.ss.ps.256" => "__builtin_ia32_vbroadcastss_ps256", "llvm.x86.avx2.vextracti128" => "__builtin_ia32_extract128i256", "llvm.x86.avx2.vinserti128" => "__builtin_ia32_insert128i256", + "llvm.x86.avx2.vpdpbssd.128" => "__builtin_ia32_vpdpbssd128", + "llvm.x86.avx2.vpdpbssd.256" => "__builtin_ia32_vpdpbssd256", + "llvm.x86.avx2.vpdpbssds.128" => "__builtin_ia32_vpdpbssds128", + "llvm.x86.avx2.vpdpbssds.256" => "__builtin_ia32_vpdpbssds256", + "llvm.x86.avx2.vpdpbsud.128" => "__builtin_ia32_vpdpbsud128", + "llvm.x86.avx2.vpdpbsud.256" => "__builtin_ia32_vpdpbsud256", + "llvm.x86.avx2.vpdpbsuds.128" => "__builtin_ia32_vpdpbsuds128", + "llvm.x86.avx2.vpdpbsuds.256" => "__builtin_ia32_vpdpbsuds256", + "llvm.x86.avx2.vpdpbuud.128" => "__builtin_ia32_vpdpbuud128", + "llvm.x86.avx2.vpdpbuud.256" => "__builtin_ia32_vpdpbuud256", + "llvm.x86.avx2.vpdpbuuds.128" => "__builtin_ia32_vpdpbuuds128", + "llvm.x86.avx2.vpdpbuuds.256" => "__builtin_ia32_vpdpbuuds256", "llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256", "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512", "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512", @@ -3779,8 +6073,8 @@ match name { "llvm.x86.avx512.mask.add.ps.128" => "__builtin_ia32_addps128_mask", "llvm.x86.avx512.mask.add.ps.256" => "__builtin_ia32_addps256_mask", "llvm.x86.avx512.mask.add.ps.512" => "__builtin_ia32_addps512_mask", - "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask", - "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask", "llvm.x86.avx512.mask.and.pd.128" => "__builtin_ia32_andpd128_mask", "llvm.x86.avx512.mask.and.pd.256" => "__builtin_ia32_andpd256_mask", "llvm.x86.avx512.mask.and.pd.512" => "__builtin_ia32_andpd512_mask", @@ -3894,8 +6188,8 @@ match name { "llvm.x86.avx512.mask.cvtqq2ps.128" => "__builtin_ia32_cvtqq2ps128_mask", "llvm.x86.avx512.mask.cvtqq2ps.256" => "__builtin_ia32_cvtqq2ps256_mask", "llvm.x86.avx512.mask.cvtqq2ps.512" => "__builtin_ia32_cvtqq2ps512_mask", - "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask", - "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask", "llvm.x86.avx512.mask.cvttpd2dq.128" => "__builtin_ia32_cvttpd2dq128_mask", "llvm.x86.avx512.mask.cvttpd2dq.256" => "__builtin_ia32_cvttpd2dq256_mask", "llvm.x86.avx512.mask.cvttpd2dq.512" => "__builtin_ia32_cvttpd2dq512_mask", @@ -3941,8 +6235,8 @@ match name { "llvm.x86.avx512.mask.div.ps.128" => "__builtin_ia32_divps_mask", "llvm.x86.avx512.mask.div.ps.256" => "__builtin_ia32_divps256_mask", "llvm.x86.avx512.mask.div.ps.512" => "__builtin_ia32_divps512_mask", - "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask", - "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask", "llvm.x86.avx512.mask.expand.d.128" => "__builtin_ia32_expandsi128_mask", "llvm.x86.avx512.mask.expand.d.256" => "__builtin_ia32_expandsi256_mask", "llvm.x86.avx512.mask.expand.d.512" => "__builtin_ia32_expandsi512_mask", @@ -3989,16 +6283,16 @@ match name { "llvm.x86.avx512.mask.getexp.ps.128" => "__builtin_ia32_getexpps128_mask", "llvm.x86.avx512.mask.getexp.ps.256" => "__builtin_ia32_getexpps256_mask", "llvm.x86.avx512.mask.getexp.ps.512" => "__builtin_ia32_getexpps512_mask", - "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask", - "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask", "llvm.x86.avx512.mask.getmant.pd.128" => "__builtin_ia32_getmantpd128_mask", "llvm.x86.avx512.mask.getmant.pd.256" => "__builtin_ia32_getmantpd256_mask", "llvm.x86.avx512.mask.getmant.pd.512" => "__builtin_ia32_getmantpd512_mask", "llvm.x86.avx512.mask.getmant.ps.128" => "__builtin_ia32_getmantps128_mask", "llvm.x86.avx512.mask.getmant.ps.256" => "__builtin_ia32_getmantps256_mask", "llvm.x86.avx512.mask.getmant.ps.512" => "__builtin_ia32_getmantps512_mask", - "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask", - "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask", "llvm.x86.avx512.mask.insertf32x4.256" => "__builtin_ia32_insertf32x4_256_mask", "llvm.x86.avx512.mask.insertf32x4.512" => "__builtin_ia32_insertf32x4_mask", "llvm.x86.avx512.mask.insertf32x8.512" => "__builtin_ia32_insertf32x8_mask", @@ -4023,16 +6317,16 @@ match name { "llvm.x86.avx512.mask.max.ps.128" => "__builtin_ia32_maxps_mask", "llvm.x86.avx512.mask.max.ps.256" => "__builtin_ia32_maxps256_mask", "llvm.x86.avx512.mask.max.ps.512" => "__builtin_ia32_maxps512_mask", - "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask", - "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask", "llvm.x86.avx512.mask.min.pd.128" => "__builtin_ia32_minpd_mask", "llvm.x86.avx512.mask.min.pd.256" => "__builtin_ia32_minpd256_mask", "llvm.x86.avx512.mask.min.pd.512" => "__builtin_ia32_minpd512_mask", "llvm.x86.avx512.mask.min.ps.128" => "__builtin_ia32_minps_mask", "llvm.x86.avx512.mask.min.ps.256" => "__builtin_ia32_minps256_mask", "llvm.x86.avx512.mask.min.ps.512" => "__builtin_ia32_minps512_mask", - "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask", - "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask", "llvm.x86.avx512.mask.move.sd" => "__builtin_ia32_movsd_mask", "llvm.x86.avx512.mask.move.ss" => "__builtin_ia32_movss_mask", "llvm.x86.avx512.mask.mul.pd.128" => "__builtin_ia32_mulpd_mask", @@ -4041,8 +6335,8 @@ match name { "llvm.x86.avx512.mask.mul.ps.128" => "__builtin_ia32_mulps_mask", "llvm.x86.avx512.mask.mul.ps.256" => "__builtin_ia32_mulps256_mask", "llvm.x86.avx512.mask.mul.ps.512" => "__builtin_ia32_mulps512_mask", - "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask", - "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask", "llvm.x86.avx512.mask.or.pd.128" => "__builtin_ia32_orpd128_mask", "llvm.x86.avx512.mask.or.pd.256" => "__builtin_ia32_orpd256_mask", "llvm.x86.avx512.mask.or.pd.512" => "__builtin_ia32_orpd512_mask", @@ -4527,8 +6821,8 @@ match name { "llvm.x86.avx512.mask.range.ps.128" => "__builtin_ia32_rangeps128_mask", "llvm.x86.avx512.mask.range.ps.256" => "__builtin_ia32_rangeps256_mask", "llvm.x86.avx512.mask.range.ps.512" => "__builtin_ia32_rangeps512_mask", - "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask", - "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask", "llvm.x86.avx512.mask.reduce.pd.128" => "__builtin_ia32_reducepd128_mask", "llvm.x86.avx512.mask.reduce.pd.256" => "__builtin_ia32_reducepd256_mask", "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask", @@ -4543,16 +6837,16 @@ match name { "llvm.x86.avx512.mask.rndscale.ps.128" => "__builtin_ia32_rndscaleps_128_mask", "llvm.x86.avx512.mask.rndscale.ps.256" => "__builtin_ia32_rndscaleps_256_mask", "llvm.x86.avx512.mask.rndscale.ps.512" => "__builtin_ia32_rndscaleps_mask", - "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask", - "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask", "llvm.x86.avx512.mask.scalef.pd.128" => "__builtin_ia32_scalefpd128_mask", "llvm.x86.avx512.mask.scalef.pd.256" => "__builtin_ia32_scalefpd256_mask", "llvm.x86.avx512.mask.scalef.pd.512" => "__builtin_ia32_scalefpd512_mask", "llvm.x86.avx512.mask.scalef.ps.128" => "__builtin_ia32_scalefps128_mask", "llvm.x86.avx512.mask.scalef.ps.256" => "__builtin_ia32_scalefps256_mask", "llvm.x86.avx512.mask.scalef.ps.512" => "__builtin_ia32_scalefps512_mask", - "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask", - "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask", "llvm.x86.avx512.mask.shuf.f32x4" => "__builtin_ia32_shuf_f32x4_mask", "llvm.x86.avx512.mask.shuf.f32x4.256" => "__builtin_ia32_shuf_f32x4_256_mask", "llvm.x86.avx512.mask.shuf.f64x2" => "__builtin_ia32_shuf_f64x2_mask", @@ -4573,8 +6867,8 @@ match name { "llvm.x86.avx512.mask.sqrt.ps.128" => "__builtin_ia32_sqrtps128_mask", "llvm.x86.avx512.mask.sqrt.ps.256" => "__builtin_ia32_sqrtps256_mask", "llvm.x86.avx512.mask.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask", - "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask", - "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask", "llvm.x86.avx512.mask.store.ss" => "__builtin_ia32_storess_mask", "llvm.x86.avx512.mask.storeu.d.512" => "__builtin_ia32_storedqusi512_mask", "llvm.x86.avx512.mask.storeu.pd.512" => "__builtin_ia32_storeupd512_mask", @@ -4586,8 +6880,8 @@ match name { "llvm.x86.avx512.mask.sub.ps.128" => "__builtin_ia32_subps128_mask", "llvm.x86.avx512.mask.sub.ps.256" => "__builtin_ia32_subps256_mask", "llvm.x86.avx512.mask.sub.ps.512" => "__builtin_ia32_subps512_mask", - "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask", - "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask", "llvm.x86.avx512.mask.valign.d.128" => "__builtin_ia32_alignd128_mask", "llvm.x86.avx512.mask.valign.d.256" => "__builtin_ia32_alignd256_mask", "llvm.x86.avx512.mask.valign.d.512" => "__builtin_ia32_alignd512_mask", @@ -4905,9 +7199,9 @@ match name { "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask", "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask", "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask", - "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask", - "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask", "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd", "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless", @@ -4921,9 +7215,9 @@ match name { "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask", "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask", "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask", - "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask", - "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask", // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask", "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df", "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si", @@ -5021,21 +7315,21 @@ match name { "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_512", "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512", "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512", - "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask", "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask", - "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask", "llvm.x86.avx512fp16.mask.fpclass.sh" => "__builtin_ia32_fpclasssh_mask", "llvm.x86.avx512fp16.mask.getexp.ph.128" => "__builtin_ia32_getexpph128_mask", "llvm.x86.avx512fp16.mask.getexp.ph.256" => "__builtin_ia32_getexpph256_mask", "llvm.x86.avx512fp16.mask.getexp.ph.512" => "__builtin_ia32_getexpph512_mask", - "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask", "llvm.x86.avx512fp16.mask.getmant.ph.128" => "__builtin_ia32_getmantph128_mask", "llvm.x86.avx512fp16.mask.getmant.ph.256" => "__builtin_ia32_getmantph256_mask", "llvm.x86.avx512fp16.mask.getmant.ph.512" => "__builtin_ia32_getmantph512_mask", - "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask", - "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask", - "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask", - "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask", "llvm.x86.avx512fp16.mask.rcp.ph.128" => "__builtin_ia32_rcpph128_mask", "llvm.x86.avx512fp16.mask.rcp.ph.256" => "__builtin_ia32_rcpph256_mask", "llvm.x86.avx512fp16.mask.rcp.ph.512" => "__builtin_ia32_rcpph512_mask", @@ -5047,7 +7341,7 @@ match name { "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph_128_mask", "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph_256_mask", "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph_mask", - "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask", "llvm.x86.avx512fp16.mask.rsqrt.ph.128" => "__builtin_ia32_rsqrtph128_mask", "llvm.x86.avx512fp16.mask.rsqrt.ph.256" => "__builtin_ia32_rsqrtph256_mask", "llvm.x86.avx512fp16.mask.rsqrt.ph.512" => "__builtin_ia32_rsqrtph512_mask", @@ -5055,8 +7349,8 @@ match name { "llvm.x86.avx512fp16.mask.scalef.ph.128" => "__builtin_ia32_scalefph128_mask", "llvm.x86.avx512fp16.mask.scalef.ph.256" => "__builtin_ia32_scalefph256_mask", "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask", - "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask", - "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask", "llvm.x86.avx512fp16.mask.vcvtdq2ph.128" => "__builtin_ia32_vcvtdq2ph128_mask", "llvm.x86.avx512fp16.mask.vcvtpd2ph.128" => "__builtin_ia32_vcvtpd2ph128_mask", "llvm.x86.avx512fp16.mask.vcvtpd2ph.256" => "__builtin_ia32_vcvtpd2ph256_mask", @@ -5090,10 +7384,10 @@ match name { "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask", "llvm.x86.avx512fp16.mask.vcvtqq2ph.128" => "__builtin_ia32_vcvtqq2ph128_mask", "llvm.x86.avx512fp16.mask.vcvtqq2ph.256" => "__builtin_ia32_vcvtqq2ph256_mask", - "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask", - "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask", - "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask", - "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask", + // [INVALID CONVERSION]: "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask", "llvm.x86.avx512fp16.mask.vcvttph2dq.128" => "__builtin_ia32_vcvttph2dq128_mask", "llvm.x86.avx512fp16.mask.vcvttph2dq.256" => "__builtin_ia32_vcvttph2dq256_mask", "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask", @@ -5162,6 +7456,8 @@ match name { "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi642sh", "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph", "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256", + "llvm.x86.axor32" => "__builtin_ia32_axor32", + "llvm.x86.axor64" => "__builtin_ia32_axor64", "llvm.x86.bmi.bextr.32" => "__builtin_ia32_bextr_u32", "llvm.x86.bmi.bextr.64" => "__builtin_ia32_bextr_u64", "llvm.x86.bmi.bzhi.32" => "__builtin_ia32_bzhi_si", @@ -5176,6 +7472,8 @@ match name { "llvm.x86.clui" => "__builtin_ia32_clui", "llvm.x86.clwb" => "__builtin_ia32_clwb", "llvm.x86.clzero" => "__builtin_ia32_clzero", + "llvm.x86.cmpccxadd32" => "__builtin_ia32_cmpccxadd32", + "llvm.x86.cmpccxadd64" => "__builtin_ia32_cmpccxadd64", "llvm.x86.directstore32" => "__builtin_ia32_directstore_u32", "llvm.x86.directstore64" => "__builtin_ia32_directstore_u64", "llvm.x86.enqcmd" => "__builtin_ia32_enqcmd", @@ -5329,6 +7627,7 @@ match name { "llvm.x86.rdpid" => "__builtin_ia32_rdpid", "llvm.x86.rdpkru" => "__builtin_ia32_rdpkru", "llvm.x86.rdpmc" => "__builtin_ia32_rdpmc", + "llvm.x86.rdpru" => "__builtin_ia32_rdpru", "llvm.x86.rdsspd" => "__builtin_ia32_rdsspd", "llvm.x86.rdsspq" => "__builtin_ia32_rdsspq", "llvm.x86.rdtsc" => "__builtin_ia32_rdtsc", @@ -5606,6 +7905,8 @@ match name { "llvm.x86.tdpbusd.internal" => "__builtin_ia32_tdpbusd_internal", "llvm.x86.tdpbuud" => "__builtin_ia32_tdpbuud", "llvm.x86.tdpbuud.internal" => "__builtin_ia32_tdpbuud_internal", + "llvm.x86.tdpfp16ps" => "__builtin_ia32_tdpfp16ps", + "llvm.x86.tdpfp16ps.internal" => "__builtin_ia32_tdpfp16ps_internal", "llvm.x86.testui" => "__builtin_ia32_testui", "llvm.x86.tileloadd64" => "__builtin_ia32_tileloadd64", "llvm.x86.tileloadd64.internal" => "__builtin_ia32_tileloadd64_internal", @@ -5619,6 +7920,20 @@ match name { "llvm.x86.tpause" => "__builtin_ia32_tpause", "llvm.x86.umonitor" => "__builtin_ia32_umonitor", "llvm.x86.umwait" => "__builtin_ia32_umwait", + "llvm.x86.vbcstnebf162ps128" => "__builtin_ia32_vbcstnebf162ps128", + "llvm.x86.vbcstnebf162ps256" => "__builtin_ia32_vbcstnebf162ps256", + "llvm.x86.vbcstnesh2ps128" => "__builtin_ia32_vbcstnesh2ps128", + "llvm.x86.vbcstnesh2ps256" => "__builtin_ia32_vbcstnesh2ps256", + "llvm.x86.vcvtneebf162ps128" => "__builtin_ia32_vcvtneebf162ps128", + "llvm.x86.vcvtneebf162ps256" => "__builtin_ia32_vcvtneebf162ps256", + "llvm.x86.vcvtneeph2ps128" => "__builtin_ia32_vcvtneeph2ps128", + "llvm.x86.vcvtneeph2ps256" => "__builtin_ia32_vcvtneeph2ps256", + "llvm.x86.vcvtneobf162ps128" => "__builtin_ia32_vcvtneobf162ps128", + "llvm.x86.vcvtneobf162ps256" => "__builtin_ia32_vcvtneobf162ps256", + "llvm.x86.vcvtneoph2ps128" => "__builtin_ia32_vcvtneoph2ps128", + "llvm.x86.vcvtneoph2ps256" => "__builtin_ia32_vcvtneoph2ps256", + "llvm.x86.vcvtneps2bf16128" => "__builtin_ia32_vcvtneps2bf16128", + "llvm.x86.vcvtneps2bf16256" => "__builtin_ia32_vcvtneps2bf16256", "llvm.x86.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps", "llvm.x86.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256", "llvm.x86.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph", diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs index 1b089f08f764a..0edec566be309 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs @@ -1,159 +1,387 @@ use std::borrow::Cow; -use gccjit::{Function, FunctionPtrType, RValue, ToRValue}; +use gccjit::{Function, FunctionPtrType, RValue, ToRValue, UnaryOp}; +use rustc_codegen_ssa::traits::BuilderMethods; use crate::{context::CodegenCx, builder::Builder}; -pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str) -> Cow<'b, [RValue<'gcc>]> { +pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str, original_function_name: Option<&String>) -> Cow<'b, [RValue<'gcc>]> { // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing // arguments here. if gcc_func.get_param_count() != args.len() { match &*func_name { - "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask" - // FIXME(antoyo): the following intrinsics has 4 (or 5) arguments according to the doc, but is defined with 2 (or 3) arguments in library/stdarch/crates/core_arch/src/x86/avx512f.rs. + // NOTE: the following intrinsics have a different number of parameters in LLVM and GCC. + "__builtin_ia32_prold512_mask" | "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask" | "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask" - | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" - | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" | "__builtin_ia32_pmaxuq256_mask" - | "__builtin_ia32_pmaxuq128_mask" + | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" | "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask" - | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" - | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" | "__builtin_ia32_pminuq256_mask" - | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" + | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" + | "__builtin_ia32_prolq512_mask" | "__builtin_ia32_prorq512_mask" | "__builtin_ia32_pslldi512_mask" + | "__builtin_ia32_psrldi512_mask" | "__builtin_ia32_psllqi512_mask" | "__builtin_ia32_psrlqi512_mask" + | "__builtin_ia32_pslld512_mask" | "__builtin_ia32_psrld512_mask" | "__builtin_ia32_psllq512_mask" + | "__builtin_ia32_psrlq512_mask" | "__builtin_ia32_psrad512_mask" | "__builtin_ia32_psraq512_mask" + | "__builtin_ia32_psradi512_mask" | "__builtin_ia32_psraqi512_mask" | "__builtin_ia32_psrav16si_mask" + | "__builtin_ia32_psrav8di_mask" | "__builtin_ia32_prolvd512_mask" | "__builtin_ia32_prorvd512_mask" + | "__builtin_ia32_prolvq512_mask" | "__builtin_ia32_prorvq512_mask" | "__builtin_ia32_psllv16si_mask" + | "__builtin_ia32_psrlv16si_mask" | "__builtin_ia32_psllv8di_mask" | "__builtin_ia32_psrlv8di_mask" + | "__builtin_ia32_permvarsi512_mask" | "__builtin_ia32_vpermilvarps512_mask" + | "__builtin_ia32_vpermilvarpd512_mask" | "__builtin_ia32_permvardi512_mask" + | "__builtin_ia32_permvarsf512_mask" | "__builtin_ia32_permvarqi512_mask" + | "__builtin_ia32_permvarqi256_mask" | "__builtin_ia32_permvarqi128_mask" + | "__builtin_ia32_vpmultishiftqb512_mask" | "__builtin_ia32_vpmultishiftqb256_mask" + | "__builtin_ia32_vpmultishiftqb128_mask" => { - // TODO: refactor by separating those intrinsics outside of this branch. - let add_before_last_arg = - match &*func_name { - "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" - | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" - | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => true, - _ => false, - }; - let new_first_arg_is_zero = - match &*func_name { - "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" - | "__builtin_ia32_pminuq256_mask" | "__builtin_ia32_pminuq128_mask" => true, - _ => false - }; - let arg3_index = - match &*func_name { - "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 1, - _ => 2, - }; - let mut new_args = args.to_vec(); - let arg3_type = gcc_func.get_param_type(arg3_index); - let first_arg = - if new_first_arg_is_zero { - let vector_type = arg3_type.dyncast_vector().expect("vector type"); - let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); - let num_units = vector_type.get_num_units(); - builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]) - } - else { - builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue() - }; - if add_before_last_arg { - new_args.insert(new_args.len() - 1, first_arg); - } - else { - new_args.push(first_arg); - } - let arg4_index = - match &*func_name { - "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 2, - _ => 3, - }; - let arg4_type = gcc_func.get_param_type(arg4_index); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - if add_before_last_arg { - new_args.insert(new_args.len() - 1, minus_one); - } - else { - new_args.push(minus_one); + let mut new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let first_arg = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(first_arg); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" | "__builtin_ia32_pminuq256_mask" + | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_prold256_mask" | "__builtin_ia32_prold128_mask" + | "__builtin_ia32_prord512_mask" | "__builtin_ia32_prord256_mask" | "__builtin_ia32_prord128_mask" + | "__builtin_ia32_prolq256_mask" | "__builtin_ia32_prolq128_mask" | "__builtin_ia32_prorq256_mask" + | "__builtin_ia32_prorq128_mask" | "__builtin_ia32_psraq256_mask" | "__builtin_ia32_psraq128_mask" + | "__builtin_ia32_psraqi256_mask" | "__builtin_ia32_psraqi128_mask" | "__builtin_ia32_psravq256_mask" + | "__builtin_ia32_psravq128_mask" | "__builtin_ia32_prolvd256_mask" | "__builtin_ia32_prolvd128_mask" + | "__builtin_ia32_prorvd256_mask" | "__builtin_ia32_prorvd128_mask" | "__builtin_ia32_prolvq256_mask" + | "__builtin_ia32_prolvq128_mask" | "__builtin_ia32_prorvq256_mask" | "__builtin_ia32_prorvq128_mask" + | "__builtin_ia32_permvardi256_mask" | "__builtin_ia32_permvardf512_mask" | "__builtin_ia32_permvardf256_mask" + | "__builtin_ia32_pmulhuw512_mask" | "__builtin_ia32_pmulhw512_mask" | "__builtin_ia32_pmulhrsw512_mask" + | "__builtin_ia32_pmaxuw512_mask" | "__builtin_ia32_pmaxub512_mask" | "__builtin_ia32_pmaxsw512_mask" + | "__builtin_ia32_pmaxsb512_mask" | "__builtin_ia32_pminuw512_mask" | "__builtin_ia32_pminub512_mask" + | "__builtin_ia32_pminsw512_mask" | "__builtin_ia32_pminsb512_mask" + | "__builtin_ia32_pmaddwd512_mask" | "__builtin_ia32_pmaddubsw512_mask" | "__builtin_ia32_packssdw512_mask" + | "__builtin_ia32_packsswb512_mask" | "__builtin_ia32_packusdw512_mask" | "__builtin_ia32_packuswb512_mask" + | "__builtin_ia32_pavgw512_mask" | "__builtin_ia32_pavgb512_mask" | "__builtin_ia32_psllw512_mask" + | "__builtin_ia32_psllwi512_mask" | "__builtin_ia32_psllv32hi_mask" | "__builtin_ia32_psrlw512_mask" + | "__builtin_ia32_psrlwi512_mask" | "__builtin_ia32_psllv16hi_mask" | "__builtin_ia32_psllv8hi_mask" + | "__builtin_ia32_psrlv32hi_mask" | "__builtin_ia32_psraw512_mask" | "__builtin_ia32_psrawi512_mask" + | "__builtin_ia32_psrlv16hi_mask" | "__builtin_ia32_psrlv8hi_mask" | "__builtin_ia32_psrav32hi_mask" + | "__builtin_ia32_permvarhi512_mask" | "__builtin_ia32_pshufb512_mask" | "__builtin_ia32_psrav16hi_mask" + | "__builtin_ia32_psrav8hi_mask" | "__builtin_ia32_permvarhi256_mask" | "__builtin_ia32_permvarhi128_mask" + => { + let mut new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let vector_type = arg3_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_dbpsadbw512_mask" | "__builtin_ia32_dbpsadbw256_mask" | "__builtin_ia32_dbpsadbw128_mask" => { + let mut new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let vector_type = arg4_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg5_type = gcc_func.get_param_type(4); + let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask" + | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => { + let mut new_args = args.to_vec(); + // Remove last arg as it doesn't seem to be used in GCC and is always false. + new_args.pop(); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vpconflictsi_512_mask" | "__builtin_ia32_vpconflictsi_256_mask" + | "__builtin_ia32_vpconflictsi_128_mask" | "__builtin_ia32_vpconflictdi_512_mask" + | "__builtin_ia32_vpconflictdi_256_mask" | "__builtin_ia32_vpconflictdi_128_mask" => { + let mut new_args = args.to_vec(); + let arg2_type = gcc_func.get_param_type(1); + let vector_type = arg2_type.dyncast_vector().expect("vector type"); + let zero = builder.context.new_rvalue_zero(vector_type.get_element_type()); + let num_units = vector_type.get_num_units(); + let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]); + new_args.push(first_arg); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask" + | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask" + | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => { + let mut new_args = args.to_vec(); + let arg5_type = gcc_func.get_param_type(4); + let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { + let mut new_args = args.to_vec(); + + let mut last_arg = None; + if args.len() == 4 { + last_arg = new_args.pop(); + } + + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + + if args.len() == 3 { + // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to + // the same GCC intrinsic, but the former has 3 parameters and the + // latter has 4 so it doesn't require this additional argument. + let arg5_type = gcc_func.get_param_type(4); + new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4)); + } + + if let Some(last_arg) = last_arg { + new_args.push(last_arg); + } + + args = new_args.into(); + }, + "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" + | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" + | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" + | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" + | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" + | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg3_type = gcc_func.get_param_type(2); + let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(undefined); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_vpermi2vard512_mask" | "__builtin_ia32_vpermi2vard256_mask" + | "__builtin_ia32_vpermi2vard128_mask" | "__builtin_ia32_vpermi2varq512_mask" + | "__builtin_ia32_vpermi2varq256_mask" | "__builtin_ia32_vpermi2varq128_mask" + | "__builtin_ia32_vpermi2varps512_mask" | "__builtin_ia32_vpermi2varps256_mask" + | "__builtin_ia32_vpermi2varps128_mask" | "__builtin_ia32_vpermi2varpd512_mask" + | "__builtin_ia32_vpermi2varpd256_mask" | "__builtin_ia32_vpermi2varpd128_mask" | "__builtin_ia32_vpmadd52huq512_mask" + | "__builtin_ia32_vpmadd52luq512_mask" | "__builtin_ia32_vpmadd52huq256_mask" | "__builtin_ia32_vpmadd52luq256_mask" + | "__builtin_ia32_vpmadd52huq128_mask" + => { + let mut new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + new_args.push(minus_one); + args = new_args.into(); + }, + "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" + | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => { + let mut new_args = args.to_vec(); + let last_arg = new_args.pop().expect("last arg"); + let arg2_type = gcc_func.get_param_type(1); + let undefined = builder.current_func().new_local(None, arg2_type, "undefined_for_intrinsic").to_rvalue(); + new_args.push(undefined); + let arg3_type = gcc_func.get_param_type(2); + let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1); + new_args.push(minus_one); + new_args.push(last_arg); + args = new_args.into(); + }, + "__builtin_ia32_stmxcsr" => { + args = vec![].into(); + }, + "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => { + let mut new_args = args.to_vec(); + let arg2_type = gcc_func.get_param_type(1); + let variable = builder.current_func().new_local(None, arg2_type, "addcarryResult"); + new_args.push(variable.get_address(None)); + args = new_args.into(); + }, + "__builtin_ia32_vpermt2varqi512_mask" | "__builtin_ia32_vpermt2varqi256_mask" + | "__builtin_ia32_vpermt2varqi128_mask" | "__builtin_ia32_vpermt2varhi512_mask" + | "__builtin_ia32_vpermt2varhi256_mask" | "__builtin_ia32_vpermt2varhi128_mask" + => { + let new_args = args.to_vec(); + let arg4_type = gcc_func.get_param_type(3); + let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); + args = vec![new_args[1], new_args[0], new_args[2], minus_one].into(); + }, + "__builtin_ia32_xrstor" | "__builtin_ia32_xsavec" => { + let new_args = args.to_vec(); + let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32); + let arg2 = new_args[1] << thirty_two | new_args[2]; + let arg2_type = gcc_func.get_param_type(1); + let arg2 = builder.context.new_cast(None, arg2, arg2_type); + args = vec![new_args[0], arg2].into(); + }, + "__builtin_prefetch" => { + let mut new_args = args.to_vec(); + new_args.pop(); + args = new_args.into(); + }, + _ => (), + } + } + else { + match &*func_name { + "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + let new_args = args.to_vec(); + let arg3_type = gcc_func.get_param_type(2); + let arg3 = builder.context.new_cast(None, new_args[4], arg3_type); + let arg4_type = gcc_func.get_param_type(3); + let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type); + args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into(); + }, + // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + // FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC + // instrinsic to avoid this. + "__builtin_ia32_vfmaddss3_round" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 4]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 4]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 4]); + args = vec![a, b, c, new_args[3]].into(); + }, + "__builtin_ia32_vfmaddsd3_round" => { + let new_args = args.to_vec(); + let arg1_type = gcc_func.get_param_type(0); + let arg2_type = gcc_func.get_param_type(1); + let arg3_type = gcc_func.get_param_type(2); + let a = builder.context.new_rvalue_from_vector(None, arg1_type, &[new_args[0]; 2]); + let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 2]); + let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]); + args = vec![a, b, c, new_args[3]].into(); + }, + "__builtin_ia32_vfmaddsubpd256" | "__builtin_ia32_vfmaddsubps" | "__builtin_ia32_vfmaddsubps256" + | "__builtin_ia32_vfmaddsubpd" => { + if let Some(original_function_name) = original_function_name { + match &**original_function_name { + "llvm.x86.fma.vfmsubadd.pd.256" | "llvm.x86.fma.vfmsubadd.ps" | "llvm.x86.fma.vfmsubadd.ps.256" + | "llvm.x86.fma.vfmsubadd.pd" => { + // NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to + // __builtin_ia32_vfmaddsubps, only add minus if this comes from a + // subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd. + let mut new_args = args.to_vec(); + let arg3 = &mut new_args[2]; + *arg3 = builder.context.new_unary_op(None, UnaryOp::Minus, arg3.get_type(), *arg3); + args = new_args.into(); + }, + _ => (), } - args = new_args.into(); - }, - "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask" - | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask" - | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => { - let mut new_args = args.to_vec(); - let arg5_type = gcc_func.get_param_type(4); - let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1); - new_args.push(minus_one); - args = new_args.into(); - }, - "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { - let mut new_args = args.to_vec(); - - let mut last_arg = None; - if args.len() == 4 { - last_arg = new_args.pop(); - } - - let arg4_type = gcc_func.get_param_type(3); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - new_args.push(minus_one); - - if args.len() == 3 { - // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to - // the same GCC intrinsic, but the former has 3 parameters and the - // latter has 4 so it doesn't require this additional argument. - let arg5_type = gcc_func.get_param_type(4); - new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4)); - } - - if let Some(last_arg) = last_arg { - new_args.push(last_arg); - } - - args = new_args.into(); - }, - "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" - | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" - | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" - | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" => { - let mut new_args = args.to_vec(); - let last_arg = new_args.pop().expect("last arg"); - let arg3_type = gcc_func.get_param_type(2); - let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue(); - new_args.push(undefined); - let arg4_type = gcc_func.get_param_type(3); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - new_args.push(minus_one); - new_args.push(last_arg); - args = new_args.into(); - }, - "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { - let mut new_args = args.to_vec(); - let last_arg = new_args.pop().expect("last arg"); - let arg4_type = gcc_func.get_param_type(3); - let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1); - new_args.push(minus_one); - new_args.push(last_arg); - args = new_args.into(); - }, - _ => (), + } + }, + "__builtin_ia32_ldmxcsr" => { + // The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer, + // so dereference the pointer. + let mut new_args = args.to_vec(); + let uint_ptr_type = builder.uint_type.make_pointer(); + let arg1 = builder.context.new_cast(None, args[0], uint_ptr_type); + new_args[0] = arg1.dereference(None).to_rvalue(); + args = new_args.into(); + }, + "__builtin_ia32_rcp14sd_mask" | "__builtin_ia32_rcp14ss_mask" | "__builtin_ia32_rsqrt14sd_mask" + | "__builtin_ia32_rsqrt14ss_mask" => { + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2], new_args[3]].into(); + }, + "__builtin_ia32_sqrtsd_mask_round" | "__builtin_ia32_sqrtss_mask_round" => { + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into(); + }, + _ => (), } } args } +pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, mut return_value: RValue<'gcc>, func_name: &str, args: &[RValue<'gcc>], args_adjusted: bool, orig_args: &[RValue<'gcc>]) -> RValue<'gcc> { + match func_name { + "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => { + #[cfg(feature="master")] + { + let zero = builder.context.new_rvalue_zero(builder.int_type); + return_value = builder.context.new_vector_access(None, return_value, zero).to_rvalue(); + } + }, + "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => { + // Both llvm.x86.addcarry.32 and llvm.x86.addcarryx.u32 points to the same GCC builtin, + // but only the former requires adjusting the return value. + // Those 2 LLVM intrinsics differ by their argument count, that's why we check if the + // arguments were adjusted. + if args_adjusted { + let last_arg = args.last().expect("last arg"); + let field1 = builder.context.new_field(None, builder.u8_type, "carryFlag"); + let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult"); + let struct_type = builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]); + return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[return_value, last_arg.dereference(None).to_rvalue()]); + } + }, + "__builtin_ia32_stmxcsr" => { + // The builtin __builtin_ia32_stmxcsr returns a value while llvm.x86.sse.stmxcsr writes + // the result in its pointer argument. + // We removed the argument since __builtin_ia32_stmxcsr takes no arguments, so we need + // to get back the original argument to get the pointer we need to write the result to. + let uint_ptr_type = builder.uint_type.make_pointer(); + let ptr = builder.context.new_cast(None, orig_args[0], uint_ptr_type); + builder.llbb().add_assignment(None, ptr.dereference(None), return_value); + // The return value was assigned to the result pointer above. In order to not call the + // builtin twice, we overwrite the return value with a dummy value. + return_value = builder.context.new_rvalue_zero(builder.int_type); + }, + _ => (), + } + + return_value +} + pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { - // NOTE: these intrinsics have missing parameters before the last one, so ignore the - // last argument type check. // FIXME(antoyo): find a way to refactor in order to avoid this hack. match func_name { + // NOTE: these intrinsics have missing parameters before the last one, so ignore the + // last argument type check. "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask" | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask" | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask" | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask" | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" - | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => { + | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" + | "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" => { if index == args_len - 1 { return true; } }, + "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => { + if index == 2 || index == 3 { + return true; + } + }, "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => { // Since there are two LLVM intrinsics that map to each of these GCC builtins and only // one of them has a missing parameter before the last one, we check the number of @@ -162,6 +390,14 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { return true; } }, + // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors. + "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => return true, + "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask" + | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => { + if index == args_len - 1 { + return true; + } + }, _ => (), } @@ -171,7 +407,7 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool { #[cfg(not(feature="master"))] pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { match name { - "llvm.x86.xgetbv" => { + "llvm.x86.xgetbv" | "llvm.x86.sse2.pause" => { let gcc_name = "__builtin_trap"; let func = cx.context.get_builtin_function(gcc_name); cx.functions.borrow_mut().insert(gcc_name.to_string(), func); @@ -183,24 +419,26 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function #[cfg(feature="master")] pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> { + match name { + "llvm.prefetch" => { + let gcc_name = "__builtin_prefetch"; + let func = cx.context.get_builtin_function(gcc_name); + cx.functions.borrow_mut().insert(gcc_name.to_string(), func); + return func + }, + _ => (), + } + let gcc_name = match name { "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv", // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd", "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask", "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask", - "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask", - "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask", "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask", "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512_mask", - "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask", - "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask", - "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask", - "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask", "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512_mask", "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512_mask", - "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask", - "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask", "llvm.fma.v16f32" => "__builtin_ia32_vfmaddps512_mask", "llvm.fma.v8f64" => "__builtin_ia32_vfmaddpd512_mask", "llvm.x86.avx512.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask", @@ -221,6 +459,153 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512_mask", "llvm.x86.avx512.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask", "llvm.x86.avx512.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask", + "llvm.x86.avx512.sitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtdq2ps512_mask", + "llvm.x86.avx512.uitofp.round.v16f32.v16i32" => "__builtin_ia32_cvtudq2ps512_mask", + "llvm.x86.avx512.mask.ucmp.d.512" => "__builtin_ia32_ucmpd512_mask", + "llvm.x86.avx512.mask.ucmp.d.256" => "__builtin_ia32_ucmpd256_mask", + "llvm.x86.avx512.mask.ucmp.d.128" => "__builtin_ia32_ucmpd128_mask", + "llvm.x86.avx512.mask.cmp.d.512" => "__builtin_ia32_cmpd512_mask", + "llvm.x86.avx512.mask.cmp.d.256" => "__builtin_ia32_cmpd256_mask", + "llvm.x86.avx512.mask.cmp.d.128" => "__builtin_ia32_cmpd128_mask", + "llvm.x86.avx512.mask.ucmp.q.512" => "__builtin_ia32_ucmpq512_mask", + "llvm.x86.avx512.mask.ucmp.q.256" => "__builtin_ia32_ucmpq256_mask", + "llvm.x86.avx512.mask.ucmp.q.128" => "__builtin_ia32_ucmpq128_mask", + "llvm.x86.avx512.mask.cmp.q.512" => "__builtin_ia32_cmpq512_mask", + "llvm.x86.avx512.mask.cmp.q.256" => "__builtin_ia32_cmpq256_mask", + "llvm.x86.avx512.mask.cmp.q.128" => "__builtin_ia32_cmpq128_mask", + "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_mask_round", + "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_mask_round", + "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_mask_round", + "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_mask_round", + "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_mask_round", + "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_mask_round", + "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss_mask_round", + "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd_mask_round", + "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_mask_round", + "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_mask_round", + "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_mask_round", + "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_mask_round", + "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_mask_round", + "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_mask_round", + "llvm.x86.avx512.vfmadd.f32" => "__builtin_ia32_vfmaddss3_round", + "llvm.x86.avx512.vfmadd.f64" => "__builtin_ia32_vfmaddsd3_round", + "llvm.ceil.v4f64" => "__builtin_ia32_ceilpd256", + "llvm.ceil.v8f32" => "__builtin_ia32_ceilps256", + "llvm.floor.v4f64" => "__builtin_ia32_floorpd256", + "llvm.floor.v8f32" => "__builtin_ia32_floorps256", + "llvm.sqrt.v4f64" => "__builtin_ia32_sqrtpd256", + "llvm.x86.sse.stmxcsr" => "__builtin_ia32_stmxcsr", + "llvm.x86.sse.ldmxcsr" => "__builtin_ia32_ldmxcsr", + "llvm.ctpop.v16i32" => "__builtin_ia32_vpopcountd_v16si", + "llvm.ctpop.v8i32" => "__builtin_ia32_vpopcountd_v8si", + "llvm.ctpop.v4i32" => "__builtin_ia32_vpopcountd_v4si", + "llvm.ctpop.v8i64" => "__builtin_ia32_vpopcountq_v8di", + "llvm.ctpop.v4i64" => "__builtin_ia32_vpopcountq_v4di", + "llvm.ctpop.v2i64" => "__builtin_ia32_vpopcountq_v2di", + "llvm.x86.addcarry.64" => "__builtin_ia32_addcarryx_u64", + "llvm.x86.subborrow.64" => "__builtin_ia32_sbb_u64", + "llvm.floor.v2f64" => "__builtin_ia32_floorpd", + "llvm.floor.v4f32" => "__builtin_ia32_floorps", + "llvm.ceil.v2f64" => "__builtin_ia32_ceilpd", + "llvm.ceil.v4f32" => "__builtin_ia32_ceilps", + "llvm.fma.v2f64" => "__builtin_ia32_vfmaddpd", + "llvm.fma.v4f64" => "__builtin_ia32_vfmaddpd256", + "llvm.fma.v4f32" => "__builtin_ia32_vfmaddps", + "llvm.fma.v8f32" => "__builtin_ia32_vfmaddps256", + "llvm.ctlz.v16i32" => "__builtin_ia32_vplzcntd_512_mask", + "llvm.ctlz.v8i32" => "__builtin_ia32_vplzcntd_256_mask", + "llvm.ctlz.v4i32" => "__builtin_ia32_vplzcntd_128_mask", + "llvm.ctlz.v8i64" => "__builtin_ia32_vplzcntq_512_mask", + "llvm.ctlz.v4i64" => "__builtin_ia32_vplzcntq_256_mask", + "llvm.ctlz.v2i64" => "__builtin_ia32_vplzcntq_128_mask", + "llvm.ctpop.v32i16" => "__builtin_ia32_vpopcountw_v32hi", + "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd3", + "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss3", + "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmaddsubpd", + "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmaddsubpd256", + "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmaddsubps", + "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmaddsubps256", + "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd3", + "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss3", + "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd3", + "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss3", + "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask", + "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask", + "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask", + "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask", + "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask", + "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask", + "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask", + "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask", + "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask", + "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask", + "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask", + "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask", + "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask", + "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask", + "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask", + "llvm.ctpop.v16i16" => "__builtin_ia32_vpopcountw_v16hi", + "llvm.ctpop.v8i16" => "__builtin_ia32_vpopcountw_v8hi", + "llvm.ctpop.v64i8" => "__builtin_ia32_vpopcountb_v64qi", + "llvm.ctpop.v32i8" => "__builtin_ia32_vpopcountb_v32qi", + "llvm.ctpop.v16i8" => "__builtin_ia32_vpopcountb_v16qi", + "llvm.x86.avx512.mask.vpshufbitqmb.512" => "__builtin_ia32_vpshufbitqmb512_mask", + "llvm.x86.avx512.mask.vpshufbitqmb.256" => "__builtin_ia32_vpshufbitqmb256_mask", + "llvm.x86.avx512.mask.vpshufbitqmb.128" => "__builtin_ia32_vpshufbitqmb128_mask", + "llvm.x86.avx512.mask.ucmp.w.512" => "__builtin_ia32_ucmpw512_mask", + "llvm.x86.avx512.mask.ucmp.w.256" => "__builtin_ia32_ucmpw256_mask", + "llvm.x86.avx512.mask.ucmp.w.128" => "__builtin_ia32_ucmpw128_mask", + "llvm.x86.avx512.mask.ucmp.b.512" => "__builtin_ia32_ucmpb512_mask", + "llvm.x86.avx512.mask.ucmp.b.256" => "__builtin_ia32_ucmpb256_mask", + "llvm.x86.avx512.mask.ucmp.b.128" => "__builtin_ia32_ucmpb128_mask", + "llvm.x86.avx512.mask.cmp.w.512" => "__builtin_ia32_cmpw512_mask", + "llvm.x86.avx512.mask.cmp.w.256" => "__builtin_ia32_cmpw256_mask", + "llvm.x86.avx512.mask.cmp.w.128" => "__builtin_ia32_cmpw128_mask", + "llvm.x86.avx512.mask.cmp.b.512" => "__builtin_ia32_cmpb512_mask", + "llvm.x86.avx512.mask.cmp.b.256" => "__builtin_ia32_cmpb256_mask", + "llvm.x86.avx512.mask.cmp.b.128" => "__builtin_ia32_cmpb128_mask", + "llvm.x86.xrstor" => "__builtin_ia32_xrstor", + "llvm.x86.xsavec" => "__builtin_ia32_xsavec", + "llvm.x86.addcarry.32" => "__builtin_ia32_addcarryx_u32", + "llvm.x86.subborrow.32" => "__builtin_ia32_sbb_u32", + "llvm.x86.avx512.mask.compress.store.w.512" => "__builtin_ia32_compressstoreuhi512_mask", + "llvm.x86.avx512.mask.compress.store.w.256" => "__builtin_ia32_compressstoreuhi256_mask", + "llvm.x86.avx512.mask.compress.store.w.128" => "__builtin_ia32_compressstoreuhi128_mask", + "llvm.x86.avx512.mask.compress.store.b.512" => "__builtin_ia32_compressstoreuqi512_mask", + "llvm.x86.avx512.mask.compress.store.b.256" => "__builtin_ia32_compressstoreuqi256_mask", + "llvm.x86.avx512.mask.compress.store.b.128" => "__builtin_ia32_compressstoreuqi128_mask", + "llvm.x86.avx512.mask.compress.w.512" => "__builtin_ia32_compresshi512_mask", + "llvm.x86.avx512.mask.compress.w.256" => "__builtin_ia32_compresshi256_mask", + "llvm.x86.avx512.mask.compress.w.128" => "__builtin_ia32_compresshi128_mask", + "llvm.x86.avx512.mask.compress.b.512" => "__builtin_ia32_compressqi512_mask", + "llvm.x86.avx512.mask.compress.b.256" => "__builtin_ia32_compressqi256_mask", + "llvm.x86.avx512.mask.compress.b.128" => "__builtin_ia32_compressqi128_mask", + "llvm.x86.avx512.mask.expand.w.512" => "__builtin_ia32_expandhi512_mask", + "llvm.x86.avx512.mask.expand.w.256" => "__builtin_ia32_expandhi256_mask", + "llvm.x86.avx512.mask.expand.w.128" => "__builtin_ia32_expandhi128_mask", + "llvm.x86.avx512.mask.expand.b.512" => "__builtin_ia32_expandqi512_mask", + "llvm.x86.avx512.mask.expand.b.256" => "__builtin_ia32_expandqi256_mask", + "llvm.x86.avx512.mask.expand.b.128" => "__builtin_ia32_expandqi128_mask", + "llvm.fshl.v8i64" => "__builtin_ia32_vpshldv_v8di", + "llvm.fshl.v4i64" => "__builtin_ia32_vpshldv_v4di", + "llvm.fshl.v2i64" => "__builtin_ia32_vpshldv_v2di", + "llvm.fshl.v16i32" => "__builtin_ia32_vpshldv_v16si", + "llvm.fshl.v8i32" => "__builtin_ia32_vpshldv_v8si", + "llvm.fshl.v4i32" => "__builtin_ia32_vpshldv_v4si", + "llvm.fshl.v32i16" => "__builtin_ia32_vpshldv_v32hi", + "llvm.fshl.v16i16" => "__builtin_ia32_vpshldv_v16hi", + "llvm.fshl.v8i16" => "__builtin_ia32_vpshldv_v8hi", + "llvm.fshr.v8i64" => "__builtin_ia32_vpshrdv_v8di", + "llvm.fshr.v4i64" => "__builtin_ia32_vpshrdv_v4di", + "llvm.fshr.v2i64" => "__builtin_ia32_vpshrdv_v2di", + "llvm.fshr.v16i32" => "__builtin_ia32_vpshrdv_v16si", + "llvm.fshr.v8i32" => "__builtin_ia32_vpshrdv_v8si", + "llvm.fshr.v4i32" => "__builtin_ia32_vpshrdv_v4si", + "llvm.fshr.v32i16" => "__builtin_ia32_vpshrdv_v32hi", + "llvm.fshr.v16i16" => "__builtin_ia32_vpshrdv_v16hi", + "llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi", + "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd3", + "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss3", // The above doc points to unknown builtins for the following, so override them: "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si", @@ -239,7 +624,151 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherdiv4di", "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherdiv2df", "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherdiv4df", - "" => "", + "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512_mask", + "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512_mask", + "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512_mask", + "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512_mask", + "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512_mask", + "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512_mask", + "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512_mask", + "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512_mask", + "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512_mask", + "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512_mask", + "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256_mask", + "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128_mask", + "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512_mask", + "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512_mask", + "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256_mask", + "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128_mask", + "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si_mask", + "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di_mask", + "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256_mask", + "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128_mask", + "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si_mask", + "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si_mask", + "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di_mask", + "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di_mask", + "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512_mask", + "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask", + "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask", + "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512_mask", + "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256_mask", + "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask", + "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512_mask", + "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256_mask", + "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask", + "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask", + "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask", + "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask", + "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask", + "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask", + "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask", + "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask", + "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask", + "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask", + "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask", + "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask", + "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_mask_round", + "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_mask_round", + "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_mask_round", + "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_mask_round", + "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_mask_round", + "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_mask_round", + "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_mask_round", + "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_mask_round", + "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_mask_round", + "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_mask_round", + "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_mask_round", + "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_mask_round", + "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask_round", + "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask_round", + "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask_round", + "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask_round", + "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_mask_round", + "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_mask_round", + "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_mask_round", + "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_mask_round", + "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_mask_round", + "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_mask_round", + "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_mask_round", + "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_mask_round", + "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_mask_round", + "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_mask_round", + "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_mask_round", + "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_vaesenc_v32qi", + "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_vaesenclast_v32qi", + "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_vaesdec_v32qi", + "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_vaesdeclast_v32qi", + "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_vaesenc_v64qi", + "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_vaesenclast_v64qi", + "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_vaesdec_v64qi", + "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_vaesdeclast_v64qi", + "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_v8bf", + "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_v16bf", + "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_v32bf", + "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_v8sf", + "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_v16sf", + "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_v4sf", + "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_v8sf", + "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_v16sf", + "llvm.x86.pclmulqdq.512" => "__builtin_ia32_vpclmulqdq_v8di", + "llvm.x86.pclmulqdq.256" => "__builtin_ia32_vpclmulqdq_v4di", + "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask", + "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask", + "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask", + "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask", + "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask", + "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512_mask", + "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512_mask", + "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512_mask", + "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512_mask", + "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512_mask", + "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512_mask", + "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512_mask", + "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512_mask", + "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi_mask", + "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi_mask", + "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi_mask", + "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512_mask", + "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512_mask", + "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi_mask", + "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi_mask", + "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi_mask", + "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512_mask", + "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512_mask", + "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi_mask", + "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi_mask", + "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi_mask", + "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask", + "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask", + "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask", + "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask", + "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask", + "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask", + "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512_mask", + "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask", + "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask", + "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask", + "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask", + "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask", + "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask", + "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask", + "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask", + "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd_v16si", + "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd_v8si", + "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd_v4si", + "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds_v16si", + "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds_v8si", + "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds_v4si", + "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd_v16si", + "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd_v8si", + "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd_v4si", + "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds_v16si", + "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds_v8si", + "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds_v4si", + // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py _ => include!("archs.rs"), }; diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 35e650c65a081..94dc8c9e93b0d 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -1,6 +1,9 @@ pub mod llvm; mod simd; +#[cfg(feature="master")] +use std::iter; + use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::base::wants_msvc_seh; @@ -8,15 +11,23 @@ use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; +#[cfg(feature="master")] +use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; +#[cfg(feature="master")] +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_span::{Span, Symbol, symbol::kw, sym}; use rustc_target::abi::HasDataLayout; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; use rustc_target::spec::PanicStrategy; +#[cfg(feature="master")] +use rustc_target::spec::abi::Abi; use crate::abi::GccType; +#[cfg(feature="master")] +use crate::abi::FnAbiGccExt; use crate::builder::Builder; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; @@ -93,7 +104,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let name = tcx.item_name(def_id); let name_str = name.as_str(); - let llret_ty = self.layout_of(ret_ty).gcc_type(self, true); + let llret_ty = self.layout_of(ret_ty).gcc_type(self); let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); @@ -406,7 +417,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { /// Gets the LLVM type for a place of the original Rust type of /// this argument/return, i.e., the result of `type_of::type_of`. fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { - self.layout.gcc_type(cx, true) + self.layout.gcc_type(cx) } /// Stores a direct/indirect value described by this ArgAbi into a @@ -1122,10 +1133,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } -fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { - // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too - if bx.sess().panic_strategy() == PanicStrategy::Abort || true { - // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done. +fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + if bx.sess().panic_strategy() == PanicStrategy::Abort { bx.call(bx.type_void(), None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. @@ -1136,6 +1145,141 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue< unimplemented!(); } else { + #[cfg(feature="master")] + codegen_gnu_try(bx, try_func, data, _catch_func, dest); + #[cfg(not(feature="master"))] unimplemented!(); } } + +// Definition of the standard `try` function for Rust using the GNU-like model +// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke` +// instructions). +// +// This codegen is a little surprising because we always call a shim +// function instead of inlining the call to `invoke` manually here. This is done +// because in LLVM we're only allowed to have one personality per function +// definition. The call to the `try` intrinsic is being inlined into the +// function calling it, and that function may already have other personality +// functions in play. By calling a shim we're guaranteed that our shim will have +// the right personality function. +#[cfg(feature="master")] +fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>, data: RValue<'gcc>, catch_func: RValue<'gcc>, dest: RValue<'gcc>) { + let cx: &CodegenCx<'gcc, '_> = bx.cx; + let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| { + // Codegens the shims described above: + // + // bx: + // invoke %try_func(%data) normal %normal unwind %catch + // + // normal: + // ret 0 + // + // catch: + // (%ptr, _) = landingpad + // call %catch_func(%data, %ptr) + // ret 1 + let then = bx.append_sibling_block("then"); + let catch = bx.append_sibling_block("catch"); + + let func = bx.current_func(); + let try_func = func.get_param(0).to_rvalue(); + let data = func.get_param(1).to_rvalue(); + let catch_func = func.get_param(2).to_rvalue(); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + + let current_block = bx.block.clone(); + + bx.switch_to_block(then); + bx.ret(bx.const_i32(0)); + + // Type indicator for the exception being thrown. + // + // The value is a pointer to the exception object + // being thrown. + bx.switch_to_block(catch); + bx.set_personality_fn(bx.eh_personality()); + + let eh_pointer_builtin = bx.cx.context.get_target_builtin_function("__builtin_eh_pointer"); + let zero = bx.cx.context.new_rvalue_zero(bx.int_type); + let ptr = bx.cx.context.new_call(None, eh_pointer_builtin, &[zero]); + let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + bx.call(catch_ty, None, catch_func, &[data, ptr], None); + bx.ret(bx.const_i32(1)); + + // NOTE: the blocks must be filled before adding the try/catch, otherwise gcc will not + // generate a try/catch. + // FIXME(antoyo): add a check in the libgccjit API to prevent this. + bx.switch_to_block(current_block); + bx.invoke(try_func_ty, None, try_func, &[data], then, catch, None); + }); + + let func = unsafe { std::mem::transmute(func) }; + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llty, None, func, &[try_func, data, catch_func], None); + let i32_align = bx.tcx().data_layout.i32_align.abi; + bx.store(ret, dest, i32_align); +} + + +// Helper function used to get a handle to the `__rust_try` function used to +// catch exceptions. +// +// This function is only generated once and is then cached. +#[cfg(feature="master")] +fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) { + if let Some(llfn) = cx.rust_try_fn.get() { + return llfn; + } + + // Define the type up front for the signature of the rust_try function. + let tcx = cx.tcx; + let i8p = tcx.mk_mut_ptr(tcx.types.i8); + // `unsafe fn(*mut i8) -> ()` + let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + iter::once(i8p), + tcx.mk_unit(), + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + ))); + // `unsafe fn(*mut i8, *mut i8) -> ()` + let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + [i8p, i8p].iter().cloned(), + tcx.mk_unit(), + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + ))); + // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32` + let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig( + [try_fn_ty, i8p, catch_fn_ty], + tcx.types.i32, + false, + rustc_hir::Unsafety::Unsafe, + Abi::Rust, + )); + let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen); + cx.rust_try_fn.set(Some(rust_try)); + rust_try +} + +// Helper function to give a Block to a closure to codegen a shim function. +// This is currently primarily used for the `try` intrinsic functions above. +#[cfg(feature="master")] +fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) { + let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty()); + let (typ, _, _, _) = fn_abi.gcc_type(cx); + // FIXME(eddyb) find a nicer way to do this. + cx.linkage.set(FunctionType::Internal); + let func = cx.declare_fn(name, fn_abi); + let func_val = unsafe { std::mem::transmute(func) }; + cx.set_frame_pointer_type(func_val); + cx.apply_target_cpu_attr(func_val); + let block = Builder::append_block(cx, func_val, "entry-block"); + let bx = Builder::build(cx, block); + codegen(bx); + (typ, func) +} diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index cb8168b407184..b59c3a64f5728 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -1,8 +1,13 @@ -use std::cmp::Ordering; +#[cfg(feature="master")] +use gccjit::{ComparisonOp, UnaryOp}; +use gccjit::ToRValue; +use gccjit::{BinaryOp, RValue, Type}; -use gccjit::{BinaryOp, RValue, ToRValue, Type}; use rustc_codegen_ssa::base::compare_simd_types; -use rustc_codegen_ssa::common::TypeKind; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; +#[cfg(feature="master")] +use rustc_codegen_ssa::errors::ExpectedPointerMutability; +use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods}; @@ -14,18 +19,21 @@ use rustc_span::{sym, Span, Symbol}; use rustc_target::abi::Align; use crate::builder::Builder; +#[cfg(feature="master")] +use crate::context::CodegenCx; +#[cfg(feature="master")] +use crate::errors::{InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationInsertedType}; use crate::errors::{ - InvalidMonomorphizationExpectedSignedUnsigned, InvalidMonomorphizationExpectedSimd, - InvalidMonomorphizationInsertedType, InvalidMonomorphizationInvalidBitmask, + InvalidMonomorphizationExpectedSimd, + InvalidMonomorphizationInvalidBitmask, InvalidMonomorphizationInvalidFloatVector, InvalidMonomorphizationMaskType, InvalidMonomorphizationMismatchedLengths, InvalidMonomorphizationNotFloat, InvalidMonomorphizationReturnElement, InvalidMonomorphizationReturnIntegerType, InvalidMonomorphizationReturnLength, InvalidMonomorphizationReturnLengthInputType, InvalidMonomorphizationReturnType, InvalidMonomorphizationSimdShuffle, - InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedCast, - InvalidMonomorphizationUnsupportedElement, InvalidMonomorphizationUnsupportedOperation, + InvalidMonomorphizationUnrecognized, InvalidMonomorphizationUnsupportedElement, + InvalidMonomorphizationUnsupportedOperation, }; -use crate::intrinsic; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bx: &mut Builder<'a, 'gcc, 'tcx>, @@ -105,14 +113,19 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type"); let arg1_element_type = arg1_vector_type.get_element_type(); + // NOTE: since the arguments can be vectors of floats, make sure the mask is a vector of + // integer. + let mask_element_type = bx.type_ix(arg1_element_type.get_size() as u64 * 8); + let vector_mask_type = bx.context.new_vector_type(mask_element_type, arg1_vector_type.get_num_units() as u64); + let mut elements = vec![]; let one = bx.context.new_rvalue_one(mask.get_type()); for _ in 0..len { - let element = bx.context.new_cast(None, mask & one, arg1_element_type); + let element = bx.context.new_cast(None, mask & one, mask_element_type); elements.push(element); mask = mask >> one; } - let vector_mask = bx.context.new_rvalue_from_vector(None, arg1_type, &elements); + let vector_mask = bx.context.new_rvalue_from_vector(None, vector_mask_type, &elements); return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate())); } @@ -210,48 +223,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let vector = args[0].immediate(); let index = args[1].immediate(); let value = args[2].immediate(); - // TODO(antoyo): use a recursive unqualified() here. - let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type"); - let element_type = vector_type.get_element_type(); - // NOTE: we cannot cast to an array and assign to its element here because the value might - // not be an l-value. So, call a builtin to set the element. - // TODO(antoyo): perhaps we could create a new vector or maybe there's a GIMPLE instruction for that? - // TODO(antoyo): don't use target specific builtins here. - let func_name = match in_len { - 2 => { - if element_type == bx.i64_type { - "__builtin_ia32_vec_set_v2di" - } else { - unimplemented!(); - } - } - 4 => { - if element_type == bx.i32_type { - "__builtin_ia32_vec_set_v4si" - } else { - unimplemented!(); - } - } - 8 => { - if element_type == bx.i16_type { - "__builtin_ia32_vec_set_v8hi" - } else { - unimplemented!(); - } - } - _ => unimplemented!("Len: {}", in_len), - }; - let builtin = bx.context.get_target_builtin_function(func_name); - let param1_type = builtin.get_param(0).to_rvalue().get_type(); - // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. - let vector = bx.cx.bitcast_if_needed(vector, param1_type); - let result = bx.context.new_call( - None, - builtin, - &[vector, value, bx.context.new_cast(None, index, bx.int_type)], - ); - // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. - return Ok(bx.context.new_bitcast(None, result, vector.get_type())); + let variable = bx.current_func().new_local(None, vector.get_type(), "new_vector"); + bx.llbb().add_assignment(None, variable, vector); + let lvalue = bx.context.new_vector_access(None, variable.to_rvalue(), index); + // TODO(antoyo): if simd_insert is constant, use BIT_REF. + bx.llbb().add_assignment(None, lvalue, value); + return Ok(variable.to_rvalue()); } #[cfg(feature = "master")] @@ -280,7 +257,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate())); } - if name == sym::simd_cast { + #[cfg(feature="master")] + if name == sym::simd_cast || name == sym::simd_as { require_simd!(ret_ty, "return"); let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); require!( @@ -301,125 +279,40 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( enum Style { Float, - Int(/* is signed? */ bool), + Int, Unsupported, } - let (in_style, in_width) = match in_elem.kind() { - // vectors of pointer-sized integers should've been - // disallowed before here, so this unwrap is safe. - ty::Int(i) => ( - Style::Int(true), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(false), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - let (out_style, out_width) = match out_elem.kind() { - ty::Int(i) => ( - Style::Int(true), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(false), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - - let extend = |in_type, out_type| { - let vector_type = bx.context.new_vector_type(out_type, 8); - let vector = args[0].immediate(); - let array_type = bx.context.new_array_type(None, in_type, 8); - // TODO(antoyo): switch to using new_vector_access or __builtin_convertvector for vector casting. - let array = bx.context.new_bitcast(None, vector, array_type); - - let cast_vec_element = |index| { - let index = bx.context.new_rvalue_from_int(bx.int_type, index); - bx.context.new_cast( - None, - bx.context.new_array_access(None, array, index).to_rvalue(), - out_type, - ) + let in_style = + match in_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, }; - bx.context.new_rvalue_from_vector( - None, - vector_type, - &[ - cast_vec_element(0), - cast_vec_element(1), - cast_vec_element(2), - cast_vec_element(3), - cast_vec_element(4), - cast_vec_element(5), - cast_vec_element(6), - cast_vec_element(7), - ], - ) - }; + let out_style = + match out_elem.kind() { + ty::Int(_) | ty::Uint(_) => Style::Int, + ty::Float(_) => Style::Float, + _ => Style::Unsupported, + }; match (in_style, out_style) { - (Style::Int(in_is_signed), Style::Int(_)) => { - return Ok(match in_width.cmp(&out_width) { - Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => { - if in_is_signed { - match (in_width, out_width) { - // FIXME(antoyo): the function _mm_cvtepi8_epi16 should directly - // call an intrinsic equivalent to __builtin_ia32_pmovsxbw128 so that - // we can generate a call to it. - (8, 16) => extend(bx.i8_type, bx.i16_type), - (8, 32) => extend(bx.i8_type, bx.i32_type), - (8, 64) => extend(bx.i8_type, bx.i64_type), - (16, 32) => extend(bx.i16_type, bx.i32_type), - (32, 64) => extend(bx.i32_type, bx.i64_type), - (16, 64) => extend(bx.i16_type, bx.i64_type), - _ => unimplemented!("in: {}, out: {}", in_width, out_width), - } - } else { - match (in_width, out_width) { - (8, 16) => extend(bx.u8_type, bx.u16_type), - (8, 32) => extend(bx.u8_type, bx.u32_type), - (8, 64) => extend(bx.u8_type, bx.u64_type), - (16, 32) => extend(bx.u16_type, bx.u32_type), - (16, 64) => extend(bx.u16_type, bx.u64_type), - (32, 64) => extend(bx.u32_type, bx.u64_type), - _ => unimplemented!("in: {}, out: {}", in_width, out_width), - } - } + (Style::Unsupported, Style::Unsupported) => { + require!( + false, + InvalidMonomorphization::UnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem } - }); - } - (Style::Int(_), Style::Float) => { - // TODO: add support for internal functions in libgccjit to get access to IFN_VEC_CONVERT which is - // doing like __builtin_convertvector? - // Or maybe provide convert_vector as an API since it might not easy to get the - // types of internal functions. - unimplemented!(); - } - (Style::Float, Style::Int(_)) => { - unimplemented!(); - } - (Style::Float, Style::Float) => { - unimplemented!(); - } - _ => { /* Unsupported. Fallthrough. */ } + ); + }, + _ => return Ok(bx.context.convert_vector(None, args[0].immediate(), llret_ty)), } - return_error!(InvalidMonomorphizationUnsupportedCast { - span, - name, - in_ty, - in_elem, - ret_ty, - out_elem - }); } macro_rules! arith_binary { @@ -436,6 +329,71 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } } + if name == sym::simd_bitmask { + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a + // vector mask and returns the most significant bit (MSB) of each lane in the form + // of either: + // * an unsigned integer + // * an array of `u8` + // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits. + // + // The bit order of the result depends on the byte endianness, LSB-first for little + // endian and MSB-first for big endian. + + let vector = args[0].immediate(); + let vector_type = vector.get_type().dyncast_vector().expect("vector type"); + let elem_type = vector_type.get_element_type(); + + let expected_int_bits = in_len.max(8); + let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64); + + // FIXME(antoyo): that's not going to work for masks bigger than 128 bits. + let result_type = bx.type_ix(expected_int_bits); + let mut result = bx.context.new_rvalue_zero(result_type); + + let elem_size = elem_type.get_size() * 8; + let sign_shift = bx.context.new_rvalue_from_int(elem_type, elem_size as i32 - 1); + let one = bx.context.new_rvalue_one(elem_type); + + let mut shift = 0; + for i in 0..in_len { + let elem = bx.extract_element(vector, bx.context.new_rvalue_from_int(bx.int_type, i as i32)); + let shifted = elem >> sign_shift; + let masked = shifted & one; + result = result | (bx.context.new_cast(None, masked, result_type) << bx.context.new_rvalue_from_int(result_type, shift)); + shift += 1; + } + + match ret_ty.kind() { + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => { + // Zero-extend iN to the bitmask type: + return Ok(result); + } + ty::Array(elem, len) + if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) + && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all()) + == Some(expected_bytes) => + { + // Zero-extend iN to the array length: + let ze = bx.zext(result, bx.type_ix(expected_bytes * 8)); + + // Convert the integer to a byte array + let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); + bx.store(ze, ptr, Align::ONE); + let array_ty = bx.type_array(bx.type_i8(), expected_bytes); + let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); + return Ok(bx.load(array_ty, ptr, Align::ONE)); + } + _ => return_error!(InvalidMonomorphization::CannotReturn { + span, + name, + ret_ty, + expected_int_bits, + expected_bytes + }), + } + } + fn simd_simple_float_intrinsic<'gcc, 'tcx>( name: Symbol, in_elem: Ty<'_>, @@ -451,55 +409,66 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return Err(()); }}; } - let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() { - let elem_ty = bx.cx.type_float_from_ty(*f); - match f.bit_width() { - 32 => ("f32", elem_ty), - 64 => ("f64", elem_ty), - _ => { - return_error!(InvalidMonomorphizationInvalidFloatVector { - span, - name, - elem_ty: f.name_str(), - vec_ty: in_ty - }); + let (elem_ty_str, elem_ty) = + if let ty::Float(f) = in_elem.kind() { + let elem_ty = bx.cx.type_float_from_ty(*f); + match f.bit_width() { + 32 => ("f", elem_ty), + 64 => ("", elem_ty), + _ => { + return_error!(InvalidMonomorphizationInvalidFloatVector { span, name, elem_ty: f.name_str(), vec_ty: in_ty }); + } } } - } else { - return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); - }; + else { + return_error!(InvalidMonomorphizationNotFloat { span, name, ty: in_ty }); + }; let vec_ty = bx.cx.type_vector(elem_ty, in_len); - let (intr_name, fn_ty) = match name { - sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103 - sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), - sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), - sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), - sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), - sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)), - _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }), - }; - let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); - let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx); - let function: RValue<'gcc> = unsafe { std::mem::transmute(function) }; - let c = bx.call( - fn_ty, - None, - function, - &args.iter().map(|arg| arg.immediate()).collect::>(), - None, - ); + let intr_name = + match name { + sym::simd_ceil => "ceil", + sym::simd_fabs => "fabs", // TODO(antoyo): pand with 170141183420855150465331762880109871103 + sym::simd_fcos => "cos", + sym::simd_fexp2 => "exp2", + sym::simd_fexp => "exp", + sym::simd_flog10 => "log10", + sym::simd_flog2 => "log2", + sym::simd_flog => "log", + sym::simd_floor => "floor", + sym::simd_fma => "fma", + sym::simd_fpowi => "__builtin_powi", + sym::simd_fpow => "pow", + sym::simd_fsin => "sin", + sym::simd_fsqrt => "sqrt", + sym::simd_round => "round", + sym::simd_trunc => "trunc", + _ => return_error!(InvalidMonomorphizationUnrecognized { span, name }) + }; + let builtin_name = format!("{}{}", intr_name, elem_ty_str); + let funcs = bx.cx.functions.borrow(); + let function = funcs.get(&builtin_name).unwrap_or_else(|| panic!("unable to find builtin function {}", builtin_name)); + + // TODO(antoyo): add platform-specific behavior here for architectures that have these + // intrinsics as instructions (for instance, gpus) + let mut vector_elements = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64); + // we have to treat fpowi specially, since fpowi's second argument is always an i32 + let arguments = if name == sym::simd_fpowi { + vec![ + bx.extract_element(args[0].immediate(), index).to_rvalue(), + args[1].immediate(), + ] + } else { + args.iter() + .map(|arg| bx.extract_element(arg.immediate(), index).to_rvalue()) + .collect() + }; + vector_elements.push(bx.context.new_call(None, *function, &arguments)); + } + let c = bx.context.new_rvalue_from_vector(None, vec_ty, &vector_elements); Ok(c) } @@ -525,6 +494,297 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args); } + #[cfg(feature="master")] + fn vector_ty<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, elem_ty: Ty<'tcx>, vec_len: u64) -> Type<'gcc> { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let elem_ty = match *elem_ty.kind() { + ty::Int(v) => cx.type_int_from_ty(v), + ty::Uint(v) => cx.type_uint_from_ty(v), + ty::Float(v) => cx.type_float_from_ty(v), + _ => unreachable!(), + }; + cx.type_vector(elem_ty, vec_len) + } + + #[cfg(feature="master")] + fn gather<'a, 'gcc, 'tcx>(default: RValue<'gcc>, pointers: RValue<'gcc>, mask: RValue<'gcc>, pointer_count: usize, bx: &mut Builder<'a, 'gcc, 'tcx>, in_len: u64, underlying_ty: Ty<'tcx>, invert: bool) -> RValue<'gcc> { + let vector_type = + if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } + else { + vector_ty(bx, underlying_ty, in_len) + }; + let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); + + let mut values = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64); + let int = bx.context.new_vector_access(None, pointers, index).to_rvalue(); + + let ptr_type = elem_type.make_pointer(); + let ptr = bx.context.new_bitcast(None, int, ptr_type); + let value = ptr.dereference(None).to_rvalue(); + values.push(value); + } + + let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values); + + let mut mask_types = vec![]; + let mut mask_values = vec![]; + for i in 0..in_len { + let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64); + mask_types.push(bx.context.new_field(None, bx.i32_type, "m")); + let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue(); + let masked = bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value; + let value = index + masked; + mask_values.push(value); + } + let mask_type = bx.context.new_struct_type(None, "mask_type", &mask_types); + let mask = bx.context.new_struct_constructor(None, mask_type.as_type(), None, &mask_values); + + if invert { + bx.shuffle_vector(vector, default, mask) + } + else { + bx.shuffle_vector(default, vector, mask) + } + } + + #[cfg(feature="master")] + if name == sym::simd_gather { + // simd_gather(values: , pointers: , + // mask: ) -> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + require_simd!(ret_ty, "return"); + + // Of the same length: + let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == out_len, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len + } + ); + require!( + in_len == out_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: out_len2 + } + ); + + // The return type must match the first argument type + require!( + ret_ty == in_ty, + InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), + _ => { + require!( + false, + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Not, + } + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + } + ); + } + } + + return Ok(gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, false)); + } + + #[cfg(feature="master")] + if name == sym::simd_scatter { + // simd_scatter(values: , pointers: , + // mask: ) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + + // Of the same length: + let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + require!( + in_len == element_len1, + InvalidMonomorphization::SecondArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[1], + out_len: element_len1 + } + ); + require!( + in_len == element_len2, + InvalidMonomorphization::ThirdArgumentLength { + span, + name, + in_len, + in_ty, + arg_ty: arg_tys[2], + out_len: element_len2 + } + ); + + // This counts how many pointers + fn ptr_count(t: Ty<'_>) -> usize { + match t.kind() { + ty::RawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: Ty<'_>) -> Ty<'_> { + match t.kind() { + ty::RawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (pointer_count, underlying_ty) = match element_ty1.kind() { + ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => { + (ptr_count(element_ty1), non_ptr(element_ty1)) + } + _ => { + require!( + false, + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Mut, + } + ); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert_eq!(pointer_count - 1, ptr_count(element_ty0)); + assert_eq!(underlying_ty, non_ptr(element_ty0)); + + // The element type of the third argument must be a signed integer type of any width: + match element_ty2.kind() { + ty::Int(_) => (), + _ => { + require!( + false, + InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + } + ); + } + } + + let result = gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), pointer_count, bx, in_len, underlying_ty, true); + + let pointers = args[1].immediate(); + + let vector_type = + if pointer_count > 1 { + bx.context.new_vector_type(bx.usize_type, in_len) + } + else { + vector_ty(bx, underlying_ty, in_len) + }; + let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type(); + + for i in 0..in_len { + let index = bx.context.new_rvalue_from_int(bx.int_type, i as i32); + let value = bx.context.new_vector_access(None, result, index); + + let int = bx.context.new_vector_access(None, pointers, index).to_rvalue(); + let ptr_type = elem_type.make_pointer(); + let ptr = bx.context.new_bitcast(None, int, ptr_type); + bx.llbb().add_assignment(None, ptr.dereference(None), value); + } + + return Ok(bx.context.new_rvalue_zero(bx.i32_type)); + } + arith_binary! { simd_add: Uint, Int => add, Float => fadd; simd_sub: Uint, Int => sub, Float => fsub; @@ -536,6 +796,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( simd_and: Uint, Int => and; simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors. simd_xor: Uint, Int => xor; + simd_fmin: Float => vector_fmin; + simd_fmax: Float => vector_fmax; } macro_rules! arith_unary { @@ -562,10 +824,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let rhs = args[1].immediate(); let is_add = name == sym::simd_saturating_add; let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; - let (signed, elem_width, elem_ty) = match *in_elem.kind() { - ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)), - ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)), - _ => { + let (signed, elem_width, elem_ty) = + match *in_elem.kind() { + ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_int_from_ty(i)), + ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits) / 8, bx.cx.type_uint_from_ty(i)), + _ => { return_error!(InvalidMonomorphizationExpectedSignedUnsigned { span, name, @@ -574,33 +837,78 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }); } }; - let builtin_name = match (signed, is_add, in_len, elem_width) { - (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned. - (false, true, 32, 8) => "__builtin_ia32_paddusb256", - (true, true, 16, 16) => "__builtin_ia32_paddsw256", - (false, true, 16, 16) => "__builtin_ia32_paddusw256", - (true, false, 16, 16) => "__builtin_ia32_psubsw256", - (false, false, 16, 16) => "__builtin_ia32_psubusw256", - (true, false, 32, 8) => "__builtin_ia32_psubsb256", - (false, false, 32, 8) => "__builtin_ia32_psubusb256", - _ => unimplemented!( - "signed: {}, is_add: {}, in_len: {}, elem_width: {}", - signed, - is_add, - in_len, - elem_width - ), - }; - let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); - - let func = bx.context.get_target_builtin_function(builtin_name); - let param1_type = func.get_param(0).to_rvalue().get_type(); - let param2_type = func.get_param(1).to_rvalue().get_type(); - let lhs = bx.cx.bitcast_if_needed(lhs, param1_type); - let rhs = bx.cx.bitcast_if_needed(rhs, param2_type); - let result = bx.context.new_call(None, func, &[lhs, rhs]); - // TODO(antoyo): perhaps use __builtin_convertvector for vector casting. - return Ok(bx.context.new_bitcast(None, result, vec_ty)); + + let result = + match (signed, is_add) { + (false, true) => { + let res = lhs + rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThan, res, lhs); + res | cmp + }, + (true, true) => { + // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition + // TODO(antoyo): improve using conditional operators if possible. + let arg_type = lhs.get_type(); + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + }, + (false, false) => { + let res = lhs - rhs; + let cmp = bx.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs); + res & cmp + }, + (true, false) => { + let arg_type = lhs.get_type(); + // TODO(antoyo): this uses the same algorithm from saturating add, but add the + // negative of the right operand. Find a proper subtraction algorithm. + let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); + + // TODO(antoyo): convert lhs and rhs to unsigned. + let sum = lhs + rhs; + let vector_type = arg_type.dyncast_vector().expect("vector type"); + let unit = vector_type.get_num_units(); + let a = bx.context.new_rvalue_from_int(elem_ty, ((elem_width as i32) << 3) - 1); + let width = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![a; unit]); + + let xor1 = lhs ^ rhs; + let xor2 = lhs ^ sum; + let and = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, xor1) & xor2; + let mask = and >> width; + + let one = bx.context.new_rvalue_one(elem_ty); + let ones = bx.context.new_rvalue_from_vector(None, lhs.get_type(), &vec![one; unit]); + let shift1 = ones << width; + let shift2 = sum >> width; + let mask_min = shift1 ^ shift2; + + let and1 = bx.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, mask) & sum; + let and2 = mask & mask_min; + + and1 + and2 + } + }; + + return Ok(result); } macro_rules! arith_red { @@ -650,33 +958,50 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( add, 0.0 // TODO: Use this argument. ); - arith_red!(simd_reduce_mul_unordered: BinaryOp::Mult, vector_reduce_fmul_fast, false, mul, 1.0); + arith_red!( + simd_reduce_mul_unordered: BinaryOp::Mult, + vector_reduce_fmul_fast, + false, + mul, + 1.0 + ); + arith_red!( + simd_reduce_add_ordered: BinaryOp::Plus, + vector_reduce_fadd, + true, + add, + 0.0 + ); + arith_red!( + simd_reduce_mul_ordered: BinaryOp::Mult, + vector_reduce_fmul, + true, + mul, + 1.0 + ); + macro_rules! minmax_red { - ($name:ident: $reduction:ident) => { + ($name:ident: $int_red:ident, $float_red:ident) => { if name == sym::$name { require!( ret_ty == in_elem, InvalidMonomorphizationReturnType { span, name, in_elem, in_ty, ret_ty } ); return match in_elem.kind() { - ty::Int(_) | ty::Uint(_) | ty::Float(_) => { - Ok(bx.$reduction(args[0].immediate())) - } - _ => return_error!(InvalidMonomorphizationUnsupportedElement { - span, - name, - in_ty, - elem_ty: in_elem, - ret_ty - }), + ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())), + ty::Float(_) => Ok(bx.$float_red(args[0].immediate())), + _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, name, in_ty, elem_ty: in_elem, ret_ty }), }; } }; } - minmax_red!(simd_reduce_min: vector_reduce_min); - minmax_red!(simd_reduce_max: vector_reduce_max); + minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax); + // TODO(sadlerap): revisit these intrinsics to generate more optimal reductions + minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin); + minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax); macro_rules! bitwise_red { ($name:ident : $op:expr, $boolean:expr) => { @@ -699,15 +1024,12 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }), } - // boolean reductions operate on vectors of i1s: - let i1 = bx.type_i1(); - let i1xn = bx.type_vector(i1, in_len as u64); - bx.trunc(args[0].immediate(), i1xn) + args[0].immediate() }; return match in_elem.kind() { ty::Int(_) | ty::Uint(_) => { let r = bx.vector_reduce_op(input, $op); - Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) }) + Ok(if !$boolean { r } else { bx.icmp(IntPredicate::IntNE, r, bx.context.new_rvalue_zero(r.get_type())) }) } _ => return_error!(InvalidMonomorphizationUnsupportedElement { span, @@ -723,6 +1045,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false); bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false); + bitwise_red!(simd_reduce_xor: BinaryOp::BitwiseXor, false); + bitwise_red!(simd_reduce_all: BinaryOp::BitwiseAnd, true); + bitwise_red!(simd_reduce_any: BinaryOp::BitwiseOr, true); unimplemented!("simd {}", name); } diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 44538b415283c..1b7feb5f8a18e 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -1,7 +1,7 @@ /* * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?) * TODO(antoyo): support #[inline] attributes. - * TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto). + * TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html). * * TODO(antoyo): remove the patches. */ @@ -23,6 +23,7 @@ extern crate rustc_apfloat; extern crate rustc_ast; +extern crate rustc_attr; extern crate rustc_codegen_ssa; extern crate rustc_data_structures; extern crate rustc_errors; @@ -43,6 +44,7 @@ mod abi; mod allocator; mod archive; mod asm; +mod attributes; mod back; mod base; mod builder; @@ -314,9 +316,12 @@ pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec { .filter(|_feature| { // TODO(antoyo): implement a way to get enabled feature in libgccjit. // Probably using the equivalent of __builtin_cpu_supports. + // TODO(antoyo): maybe use whatever outputs the following command: + // gcc -march=native -Q --help=target #[cfg(feature="master")] { - _feature.contains("sse") || _feature.contains("avx") + // NOTE: the CPU in the CI doesn't support sse4a, so disable it to make the stdarch tests pass in the CI. + (_feature.contains("sse") || _feature.contains("avx")) && !_feature.contains("avx512") && !_feature.contains("sse4a") } #[cfg(not(feature="master"))] { diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs index a7c868354fb27..c1f6340866cac 100644 --- a/compiler/rustc_codegen_gcc/src/mono_item.rs +++ b/compiler/rustc_codegen_gcc/src/mono_item.rs @@ -1,38 +1,66 @@ +#[cfg(feature="master")] +use gccjit::{VarAttribute, FnAttribute}; use rustc_codegen_ssa::traits::PreDefineMethods; +use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{Linkage, Visibility}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; -use rustc_span::def_id::DefId; +use crate::attributes; use crate::base; use crate::context::CodegenCx; use crate::type_of::LayoutGccExt; impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> { - fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + fn predefine_static(&self, def_id: DefId, _linkage: Linkage, visibility: Visibility, symbol_name: &str) { let attrs = self.tcx.codegen_fn_attrs(def_id); let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); - let gcc_type = self.layout_of(ty).gcc_type(self, true); + let gcc_type = self.layout_of(ty).gcc_type(self); let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section); + #[cfg(feature="master")] + global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility))); - // TODO(antoyo): set linkage and visibility. + // TODO(antoyo): set linkage. self.instances.borrow_mut().insert(instance, global); } - fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) { + #[cfg_attr(not(feature="master"), allow(unused_variables))] + fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) { assert!(!instance.substs.needs_infer()); let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); self.linkage.set(base::linkage_to_gcc(linkage)); - let _decl = self.declare_fn(symbol_name, &fn_abi); + let decl = self.declare_fn(symbol_name, &fn_abi); //let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + attributes::from_fn_attrs(self, decl, instance); + + // If we're compiling the compiler-builtins crate, e.g., the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal + && linkage != Linkage::Private + && self.tcx.is_compiler_builtins(LOCAL_CRATE) + { + #[cfg(feature="master")] + decl.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)); + } + else { + #[cfg(feature="master")] + decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility))); + } + // TODO(antoyo): call set_link_section() to allow initializing argc/argv. // TODO(antoyo): set unique comdat. // TODO(antoyo): use inline attribute from there in linkage.set() above. + + self.functions.borrow_mut().insert(symbol_name.to_string(), decl); + self.function_instances.borrow_mut().insert(instance, unsafe { std::mem::transmute(decl) }); } } diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index 89a415cdb36cd..daa661f35c4c1 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - use gccjit::{RValue, Struct, Type}; use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods}; use rustc_codegen_ssa::common::TypeKind; @@ -202,8 +200,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { value.get_type() } - fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> { - if let Some(struct_type) = ty.is_struct() { + fn type_array(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> { + // TODO: remove this as well? + /*if let Some(struct_type) = ty.is_struct() { if struct_type.get_field_count() == 0 { // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a // size of usize::MAX in test_binary_search, we workaround this by setting the size to @@ -211,14 +210,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { // FIXME(antoyo): fix gccjit API. len = 0; } - } - - // NOTE: see note above. Some other test uses usize::MAX. - if len == u64::MAX { - len = 0; - } - - let len: i32 = len.try_into().expect("array len"); + }*/ self.context.new_array_type(None, ty, len) } @@ -247,10 +239,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> { self.context.new_opaque_struct_type(None, name) } - - pub fn type_bool(&self) -> Type<'gcc> { - self.context.new_type::() - } } pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec>, bool) { @@ -273,7 +261,7 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout assert_eq!(offset.align_to(padding_align) + padding, target_offset); result.push(cx.type_padding_filler(padding, padding_align)); - result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box. + result.push(field.gcc_type(cx)); offset = target_offset + field.size; prev_effective_align = effective_field_align; } diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index ea2ce765053ff..5df8c1a209db2 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -6,7 +6,7 @@ use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; +use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; use crate::abi::{FnAbiGccExt, GccType}; @@ -50,11 +50,25 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { +impl<'a, 'tcx> CodegenCx<'a, 'tcx> { + pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + self.layout_of(ty).align.abi + } +} + +fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Vector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); + let element = + // NOTE: gcc doesn't allow pointer types in vectors. + if element.get_pointee().is_some() { + cx.usize_type + } + else { + element + }; return cx.context.new_vector_type(element, count); }, Abi::ScalarPair(..) => { @@ -114,7 +128,7 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa }, } } - FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count), + FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx), count), FieldsShape::Arbitrary { .. } => match name { None => { @@ -133,7 +147,7 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa pub trait LayoutGccExt<'tcx> { fn is_gcc_immediate(&self) -> bool; fn is_gcc_scalar_pair(&self) -> bool; - fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>; + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; @@ -168,8 +182,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. - //TODO(antoyo): do we still need the set_fields parameter? - fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> { + fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { if let Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). @@ -179,10 +192,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { let ty = match *self.ty.kind() { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields)) + cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true)) + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx)) } ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), @@ -199,13 +212,6 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { }; let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned(); if let Some(ty) = cached_type { - let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty); - if let Some((struct_type, layout)) = type_to_set_fields { - // Since we might be trying to generate a type containing another type which is not - // completely generated yet, we deferred setting the fields until now. - let (fields, packed) = struct_fields(cx, layout); - cx.set_struct_body(struct_type, &fields, packed); - } return ty; } @@ -222,7 +228,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { if let Some(v) = variant_index { layout = layout.for_variant(cx, v); } - layout.gcc_type(cx, true) + layout.gcc_type(cx) } else { uncached_gcc_type(cx, *self, &mut defer) @@ -230,9 +236,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { cx.types.borrow_mut().insert((self.ty, variant_index), ty); - if let Some((ty, layout)) = defer { + if let Some((deferred_ty, layout)) = defer { let (fields, packed) = struct_fields(cx, layout); - cx.set_struct_body(ty, &fields, packed); + cx.set_struct_body(deferred_ty, &fields, packed); } ty @@ -244,7 +250,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { return cx.type_i1(); } } - self.gcc_type(cx, true) + self.gcc_type(cx) } fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> { @@ -273,7 +279,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // pointee types, to avoid bitcasting every `OperandRef::deref`. match self.ty.kind() { ty::Ref(..) | ty::RawPtr(_) => { - return self.field(cx, index).gcc_type(cx, true); + return self.field(cx, index).gcc_type(cx); } // only wide pointer boxes are handled as pointers // thin pointer boxes with scalar allocators are handled by the general logic below @@ -343,7 +349,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { - layout.gcc_type(self, true) + layout.gcc_type(self) } fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> { diff --git a/compiler/rustc_codegen_gcc/test.sh b/compiler/rustc_codegen_gcc/test.sh index c5ffb763673d2..6139892aefca7 100755 --- a/compiler/rustc_codegen_gcc/test.sh +++ b/compiler/rustc_codegen_gcc/test.sh @@ -17,17 +17,20 @@ export LIBRARY_PATH="$GCC_PATH" flags= gcc_master_branch=1 channel="debug" -func=all +funcs=() build_only=0 +nb_parts=0 +current_part=0 while [[ $# -gt 0 ]]; do case $1 in --release) codegen_channel=release + channel="release" shift ;; --release-sysroot) - sysroot_channel=release + sysroot_channel="--release" shift ;; --no-default-features) @@ -40,43 +43,83 @@ while [[ $# -gt 0 ]]; do flags="$flags --features $1" shift ;; - --release) - channel="release" + "--test-rustc") + funcs+=(test_rustc) shift ;; - "--test-rustc") - func=test_rustc + "--test-successful-rustc") + funcs+=(test_successful_rustc) + shift + ;; + "--test-failing-rustc") + funcs+=(test_failing_rustc) shift ;; "--test-libcore") - func=test_libcore + funcs+=(test_libcore) shift ;; "--clean-ui-tests") - func=clean_ui_tests + funcs+=(clean_ui_tests) + shift + ;; + "--clean") + funcs+=(clean) shift ;; "--std-tests") - func=std_tests + funcs+=(std_tests) + shift + ;; + + "--asm-tests") + funcs+=(asm_tests) shift ;; "--extended-tests") - func=extended_sysroot_tests + funcs+=(extended_sysroot_tests) + shift + ;; + "--extended-rand-tests") + funcs+=(extended_rand_tests) + shift + ;; + "--extended-regex-example-tests") + funcs+=(extended_regex_example_tests) + shift + ;; + "--extended-regex-tests") + funcs+=(extended_regex_tests) + shift + ;; + + "--mini-tests") + funcs+=(mini_tests) shift ;; "--build-sysroot") - func=build_sysroot + funcs+=(build_sysroot) shift ;; "--build") build_only=1 shift ;; + "--nb-parts") + shift + nb_parts=$1 + shift + ;; + "--current-part") + shift + current_part=$1 + shift + ;; *) echo "Unknown option $1" exit 1 @@ -87,7 +130,6 @@ done if [[ $channel == "release" ]]; then export CHANNEL='release' CARGO_INCREMENTAL=1 cargo rustc --release $flags - shift else echo $LD_LIBRARY_PATH export CHANNEL='debug' @@ -95,6 +137,7 @@ else fi if (( $build_only == 1 )); then + echo "Since it's 'build-only', exiting..." exit fi @@ -119,7 +162,7 @@ function mini_tests() { function build_sysroot() { echo "[BUILD] sysroot" - time ./build_sysroot/build_sysroot.sh + time ./build_sysroot/build_sysroot.sh $sysroot_channel } function std_tests() { @@ -148,17 +191,57 @@ function std_tests() { $RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE echo "[AOT] subslice-patterns-const-eval" - $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE + $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin $TEST_FLAGS --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/subslice-patterns-const-eval echo "[AOT] track-caller-attribute" - $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE + $RUSTC example/track-caller-attribute.rs --crate-type bin $TEST_FLAGS --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/track-caller-attribute echo "[BUILD] mod_bench" $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE } +function setup_rustc() { + rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') + + git clone https://github.com/rust-lang/rust.git || true + cd rust + git fetch + git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') + export RUSTFLAGS= + + rm config.toml || true + + cat > config.toml < res.txt diff -u res.txt examples/regexdna-output.txt + popd +} + +function extended_regex_tests() { + if (( $gcc_master_branch == 0 )); then + return + fi + pushd regex echo "[TEST] rust-lang/regex tests" + export CG_RUSTFLAGS="--cap-lints warn" # newer aho_corasick versions throw a deprecation warning ../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q popd } -function test_rustc() { - echo - echo "[TEST] rust-lang/rust" - - rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/') - - git clone https://github.com/rust-lang/rust.git || true - cd rust - git fetch - git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') - export RUSTFLAGS= - - git apply ../rustc_patches/compile_test.patch || true +function extended_sysroot_tests() { + #pushd simple-raytracer + #echo "[BENCH COMPILE] ebobby/simple-raytracer" + #hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \ + #"RUSTC=rustc RUSTFLAGS='' cargo build" \ + #"../cargo.sh build" - rm config.toml || true + #echo "[BENCH RUN] ebobby/simple-raytracer" + #cp ./target/debug/main ./raytracer_cg_gcc + #hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_gcc + #popd - cat > config.toml < ui_tests + # To ensure it'll be always the same sub files, we sort the content. + sort ui_tests -o ui_tests + count=$((`wc -l < ui_tests` / $nb_parts)) + # We increment the number of tests by one because if this is an odd number, we would skip + # one test. + count=$((count + 1)) + split -d -l $count -a 1 ui_tests ui_tests.split + # Removing all tests. + find tests/ui -type f -name '*.rs' -not -path "*/auxiliary/*" -delete + # Putting back only the ones we want to test. + xargs -a "ui_tests.split$current_part" -d'\n' git checkout -- + fi echo "[TEST] rustc test suite" COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 tests/ui/ --rustc-args "$RUSTC_ARGS" } +function test_failing_rustc() { + test_rustc "1" +} + +function test_successful_rustc() { + test_rustc "0" +} + function clean_ui_tests() { - find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -exec rm -rf {} \; + find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -delete } function all() { @@ -283,9 +403,17 @@ function all() { mini_tests build_sysroot std_tests + #asm_tests test_libcore extended_sysroot_tests test_rustc } -$func +if [ ${#funcs[@]} -eq 0 ]; then + echo "No command passed, running '--all'..." + all +else + for t in ${funcs[@]}; do + $t + done +fi diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs index 8e378177e2457..06de26f7efc9f 100644 --- a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs +++ b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs @@ -46,11 +46,15 @@ pub fn main_inner(profile: Profile) { &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir), "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir), "-Zno-parallel-llvm", - "-C", "panic=abort", "-C", "link-arg=-lc", "-o", exe.to_str().expect("to_str"), path.to_str().expect("to_str"), ]); + if let Some(flags) = option_env!("TEST_FLAGS") { + for flag in flags.split_whitespace() { + compiler.arg(&flag); + } + } match profile { Profile::Debug => {} Profile::Release => { diff --git a/compiler/rustc_codegen_gcc/tests/run/abort1.rs b/compiler/rustc_codegen_gcc/tests/run/abort1.rs index 291af5993aa25..25041d93e748a 100644 --- a/compiler/rustc_codegen_gcc/tests/run/abort1.rs +++ b/compiler/rustc_codegen_gcc/tests/run/abort1.rs @@ -33,6 +33,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/abort2.rs b/compiler/rustc_codegen_gcc/tests/run/abort2.rs index 3c87c567892b5..e7443c8dbe5b2 100644 --- a/compiler/rustc_codegen_gcc/tests/run/abort2.rs +++ b/compiler/rustc_codegen_gcc/tests/run/abort2.rs @@ -33,6 +33,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/array.rs b/compiler/rustc_codegen_gcc/tests/run/array.rs index 8b621d8a3530f..49b28d98f2fec 100644 --- a/compiler/rustc_codegen_gcc/tests/run/array.rs +++ b/compiler/rustc_codegen_gcc/tests/run/array.rs @@ -79,7 +79,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -105,6 +105,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/assign.rs b/compiler/rustc_codegen_gcc/tests/run/assign.rs index eb38a8a383575..427c1a2503397 100644 --- a/compiler/rustc_codegen_gcc/tests/run/assign.rs +++ b/compiler/rustc_codegen_gcc/tests/run/assign.rs @@ -57,6 +57,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -64,7 +65,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); libc::fflush(libc::stdout); diff --git a/compiler/rustc_codegen_gcc/tests/run/closure.rs b/compiler/rustc_codegen_gcc/tests/run/closure.rs index 7121a5f0d5221..8daa681abf7da 100644 --- a/compiler/rustc_codegen_gcc/tests/run/closure.rs +++ b/compiler/rustc_codegen_gcc/tests/run/closure.rs @@ -97,10 +97,14 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } +#[lang = "tuple_trait"] +pub trait Tuple {} + #[lang = "unsize"] pub trait Unsize {} @@ -114,7 +118,7 @@ impl, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} #[lang = "fn_once"] #[rustc_paren_sugar] -pub trait FnOnce { +pub trait FnOnce { #[lang = "fn_once_output"] type Output; @@ -123,7 +127,7 @@ pub trait FnOnce { #[lang = "fn_mut"] #[rustc_paren_sugar] -pub trait FnMut: FnOnce { +pub trait FnMut: FnOnce { extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; } @@ -177,7 +181,7 @@ impl Add for isize { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); diff --git a/compiler/rustc_codegen_gcc/tests/run/condition.rs b/compiler/rustc_codegen_gcc/tests/run/condition.rs index 6a2e2d5bb11a9..b7a13081deae0 100644 --- a/compiler/rustc_codegen_gcc/tests/run/condition.rs +++ b/compiler/rustc_codegen_gcc/tests/run/condition.rs @@ -82,7 +82,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -108,6 +108,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs index a226fff79e51b..8a196f774c82b 100644 --- a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs +++ b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs @@ -76,7 +76,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -102,6 +102,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/int.rs b/compiler/rustc_codegen_gcc/tests/run/int.rs index 75779622b54cd..bfe73c38435a3 100644 --- a/compiler/rustc_codegen_gcc/tests/run/int.rs +++ b/compiler/rustc_codegen_gcc/tests/run/int.rs @@ -3,22 +3,14 @@ // Run-time: // status: 0 -#![feature(const_black_box, core_intrinsics, start)] - -#![no_std] - -#[panic_handler] -fn panic_handler(_: &core::panic::PanicInfo) -> ! { - core::intrinsics::abort(); -} +#![feature(const_black_box)] /* * Code */ -#[start] -fn main(_argc: isize, _argv: *const *const u8) -> isize { - use core::hint::black_box; +fn main() { + use std::hint::black_box; macro_rules! check { ($ty:ty, $expr:expr) => { @@ -335,6 +327,4 @@ fn main(_argc: isize, _argv: *const *const u8) -> isize { const VAL5: T = 73236519889708027473620326106273939584_i128; check_ops128!(); } - - 0 } diff --git a/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs index ea2c5add962ae..c3fcb3c0a2a06 100644 --- a/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs +++ b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs @@ -55,6 +55,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -62,7 +63,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { // Panicking is expected iff overflow checking is enabled. #[cfg(debug_assertions)] diff --git a/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs index 52de20021f3e0..2a2ea8b8bf0ab 100644 --- a/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs +++ b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs @@ -59,6 +59,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -66,7 +67,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); libc::fflush(libc::stdout); diff --git a/compiler/rustc_codegen_gcc/tests/run/operations.rs b/compiler/rustc_codegen_gcc/tests/run/operations.rs index e078b37b4aba9..67b9f241dbbb3 100644 --- a/compiler/rustc_codegen_gcc/tests/run/operations.rs +++ b/compiler/rustc_codegen_gcc/tests/run/operations.rs @@ -65,6 +65,7 @@ mod libc { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } @@ -72,7 +73,7 @@ mod intrinsics { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); libc::fflush(libc::stdout); diff --git a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs index 6ac099ea145c2..da8a8295d564c 100644 --- a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs +++ b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs @@ -76,7 +76,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { #[lang = "panic"] #[track_caller] #[no_mangle] -pub fn panic(_msg: &str) -> ! { +pub fn panic(_msg: &'static str) -> ! { unsafe { libc::puts("Panicking\0" as *const str as *const u8); intrinsics::abort(); @@ -102,6 +102,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { mod intrinsics { extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/slice.rs b/compiler/rustc_codegen_gcc/tests/run/slice.rs index ad9258ed0bdeb..96f1c4792e58f 100644 --- a/compiler/rustc_codegen_gcc/tests/run/slice.rs +++ b/compiler/rustc_codegen_gcc/tests/run/slice.rs @@ -102,6 +102,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tests/run/static.rs b/compiler/rustc_codegen_gcc/tests/run/static.rs index 294add968449a..19201f1df2667 100644 --- a/compiler/rustc_codegen_gcc/tests/run/static.rs +++ b/compiler/rustc_codegen_gcc/tests/run/static.rs @@ -45,6 +45,7 @@ mod intrinsics { use super::Sized; extern "rust-intrinsic" { + #[rustc_safe_intrinsic] pub fn abort() -> !; } } diff --git a/compiler/rustc_codegen_gcc/tools/check_intrinsics_duplicates.py b/compiler/rustc_codegen_gcc/tools/check_intrinsics_duplicates.py new file mode 100644 index 0000000000000..c09fb3c759f3b --- /dev/null +++ b/compiler/rustc_codegen_gcc/tools/check_intrinsics_duplicates.py @@ -0,0 +1,67 @@ +import sys + + +def check_duplicates(): + auto_content = "" + manual_content = "" + + with open("src/intrinsic/llvm.rs", "r", encoding="utf8") as f: + manual_content = f.read() + with open("src/intrinsic/archs.rs", "r", encoding="utf8") as f: + auto_content = f.read() + + intrinsics_map = {} + for line in auto_content.splitlines(): + line = line.strip() + if not line.startswith('"'): + continue + parts = line.split('"') + if len(parts) != 5: + continue + intrinsics_map[parts[1]] = parts[3] + + if len(intrinsics_map) == 0: + print("No intrinsics found in auto code... Aborting.") + return 1 + print("Found {} intrinsics in auto code".format(len(intrinsics_map))) + errors = [] + lines = manual_content.splitlines() + pos = 0 + found = 0 + while pos < len(lines): + line = lines[pos].strip() + # This is our marker. + if line == "let gcc_name = match name {": + while pos < len(lines): + line = lines[pos].strip() + pos += 1 + if line == "};": + # We're done! + if found == 0: + print("No intrinsics found in manual code even though we found the " + "marker... Aborting...") + return 1 + for error in errors: + print("ERROR => {}".format(error)) + return 1 if len(errors) != 0 else 0 + parts = line.split('"') + if len(parts) != 5: + continue + found += 1 + if parts[1] in intrinsics_map: + if parts[3] != intrinsics_map[parts[1]]: + print("Same intrinsics (`{}` at line {}) but different GCC " + "translations: `{}` != `{}`".format( + parts[1], pos, intrinsics_map[parts[1]], parts[3])) + else: + errors.append("Duplicated intrinsics: `{}` at line {}. Please remove it " + " from manual code".format(parts[1], pos)) + # Weird but whatever... + return 1 if len(errors) != 0 else 0 + pos += 1 + print("No intrinsics found in manual code... Aborting") + return 1 + + +if __name__ == "__main__": + sys.exit(check_duplicates()) diff --git a/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py b/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py index 849c6e9c98164..6188924b0d50a 100644 --- a/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py +++ b/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py @@ -13,7 +13,7 @@ def run_command(command, cwd=None): sys.exit(1) -def clone_repository(repo_name, path, repo_url, sub_path=None): +def clone_repository(repo_name, path, repo_url, sub_paths=None): if os.path.exists(path): while True: choice = input("There is already a `{}` folder, do you want to update it? [y/N]".format(path)) @@ -27,12 +27,12 @@ def clone_repository(repo_name, path, repo_url, sub_path=None): else: print("Didn't understand answer...") print("Cloning {} repository...".format(repo_name)) - if sub_path is None: + if sub_paths is None: run_command(["git", "clone", repo_url, "--depth", "1", path]) else: run_command(["git", "clone", repo_url, "--filter=tree:0", "--no-checkout", path]) run_command(["git", "sparse-checkout", "init"], cwd=path) - run_command(["git", "sparse-checkout", "set", "add", sub_path], cwd=path) + run_command(["git", "sparse-checkout", "set", *sub_paths], cwd=path) run_command(["git", "checkout"], cwd=path) @@ -40,56 +40,45 @@ def append_intrinsic(array, intrinsic_name, translation): array.append((intrinsic_name, translation)) -def extract_instrinsics(intrinsics, file): - print("Extracting intrinsics from `{}`...".format(file)) - with open(file, "r", encoding="utf8") as f: - content = f.read() +def convert_to_string(content): + if content.__class__.__name__ == 'bytes': + return content.decode('utf-8') + return content - lines = content.splitlines() + +def extract_instrinsics_from_llvm(llvm_path, intrinsics): + p = subprocess.Popen( + ["llvm-tblgen", "llvm/IR/Intrinsics.td"], + cwd=os.path.join(llvm_path, "llvm/include"), + stdout=subprocess.PIPE) + output, err = p.communicate() + lines = convert_to_string(output).splitlines() pos = 0 - current_arch = None while pos < len(lines): - line = lines[pos].strip() - if line.startswith("let TargetPrefix ="): - current_arch = line.split('"')[1].strip() - if len(current_arch) == 0: - current_arch = None - elif current_arch is None: - pass - elif line == "}": - current_arch = None - elif line.startswith("def "): - content = "" - while not content.endswith(";") and not content.endswith("}") and pos < len(lines): - line = lines[pos].split(" // ")[0].strip() - content += line - pos += 1 - entries = re.findall('GCCBuiltin<"(\\w+)">', content) - if len(entries) > 0: - intrinsic = content.split("def ")[1].strip().split(":")[0].strip() - intrinsic = intrinsic.split("_") - if len(intrinsic) < 2 or intrinsic[0] != "int": - continue - intrinsic[0] = "llvm" - intrinsic = ".".join(intrinsic) - if current_arch not in intrinsics: - intrinsics[current_arch] = [] - for entry in entries: - append_intrinsic(intrinsics[current_arch], intrinsic, entry) + line = lines[pos] + if not line.startswith("def "): + pos += 1 continue - pos += 1 - continue - print("Done!") - - -def extract_instrinsics_from_llvm(llvm_path, intrinsics): - files = [] - intrinsics_path = os.path.join(llvm_path, "llvm/include/llvm/IR") - for (dirpath, dirnames, filenames) in walk(intrinsics_path): - files.extend([os.path.join(intrinsics_path, f) for f in filenames if f.endswith(".td")]) - - for file in files: - extract_instrinsics(intrinsics, file) + intrinsic = line.split(" ")[1].strip() + content = line + while pos < len(lines): + line = lines[pos].split(" // ")[0].strip() + content += line + pos += 1 + if line == "}": + break + entries = re.findall('string ClangBuiltinName = "(\\w+)";', content) + current_arch = re.findall('string TargetPrefix = "(\\w+)";', content) + if len(entries) == 1 and len(current_arch) == 1: + current_arch = current_arch[0] + intrinsic = intrinsic.split("_") + if len(intrinsic) < 2 or intrinsic[0] != "int": + continue + intrinsic[0] = "llvm" + intrinsic = ".".join(intrinsic) + if current_arch not in intrinsics: + intrinsics[current_arch] = [] + append_intrinsic(intrinsics[current_arch], intrinsic, entries[0]) def append_translation(json_data, p, array): @@ -193,6 +182,8 @@ def update_intrinsics(llvm_path, llvmint, llvmint2): for entry in intrinsics[arch]: if entry[2] == True: # if it is a duplicate out.write(' // [DUPLICATE]: "{}" => "{}",\n'.format(entry[0], entry[1])) + elif "_round_mask" in entry[1]: + out.write(' // [INVALID CONVERSION]: "{}" => "{}",\n'.format(entry[0], entry[1])) else: out.write(' "{}" => "{}",\n'.format(entry[0], entry[1])) out.write(' _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),\n') @@ -219,7 +210,7 @@ def main(): "llvm-project", llvm_path, "https://github.com/llvm/llvm-project", - sub_path="llvm/include/llvm/IR", + sub_paths=["llvm/include/llvm/IR", "llvm/include/llvm/CodeGen/"], ) clone_repository( "llvmint", diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 253c2ca7c768e..509cb0fef565e 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -640,9 +640,6 @@ extern "C" { pub struct Builder<'a>(InvariantOpaque<'a>); #[repr(C)] pub struct PassManager<'a>(InvariantOpaque<'a>); -extern "C" { - pub type PassManagerBuilder; -} extern "C" { pub type Pass; } diff --git a/compiler/rustc_hir_analysis/locales/en-US.ftl b/compiler/rustc_hir_analysis/locales/en-US.ftl index 1d313945b529c..50e857ef60d1d 100644 --- a/compiler/rustc_hir_analysis/locales/en-US.ftl +++ b/compiler/rustc_hir_analysis/locales/en-US.ftl @@ -155,3 +155,11 @@ hir_analysis_cannot_capture_late_bound_ty_in_anon_const = hir_analysis_cannot_capture_late_bound_const_in_anon_const = cannot capture late-bound const parameter in a constant .label = parameter defined here + +hir_analysis_variances_of = {$variances_of} + +hir_analysis_pass_to_variadic_function = can't pass `{$ty}` to variadic function + .suggestion = cast the value to `{$cast_ty}` + .help = cast the value to `{$cast_ty}` + +hir_analysis_cast_thin_pointer_to_fat_pointer = cannot cast thin pointer `{$expr_ty}` to fat pointer `{$cast_ty}` diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index 3e0692757754f..74fec93d91e1e 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -399,3 +399,34 @@ pub(crate) enum CannotCaptureLateBoundInAnonConst { def_span: Span, }, } + +#[derive(Diagnostic)] +#[diag(hir_analysis_variances_of)] +pub(crate) struct VariancesOf { + #[primary_span] + pub span: Span, + pub variances_of: String, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_pass_to_variadic_function, code = "E0617")] +pub(crate) struct PassToVariadicFunction<'tcx, 'a> { + #[primary_span] + pub span: Span, + pub ty: Ty<'tcx>, + pub cast_ty: &'a str, + #[suggestion(code = "{replace}", applicability = "machine-applicable")] + pub sugg_span: Option, + pub replace: String, + #[help] + pub help: Option<()>, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_cast_thin_pointer_to_fat_pointer, code = "E0607")] +pub(crate) struct CastThinPointerToFatPointer<'tcx> { + #[primary_span] + pub span: Span, + pub expr_ty: Ty<'tcx>, + pub cast_ty: String, +} diff --git a/compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs b/compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs index 089491bef5ea5..0bfbf99cb0b58 100644 --- a/compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs +++ b/compiler/rustc_hir_analysis/src/structured_errors/missing_cast_for_variadic_arg.rs @@ -1,5 +1,5 @@ -use crate::structured_errors::StructuredDiagnostic; -use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticId, ErrorGuaranteed}; +use crate::{errors, structured_errors::StructuredDiagnostic}; +use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed}; use rustc_middle::ty::{Ty, TypeVisitableExt}; use rustc_session::Session; use rustc_span::Span; @@ -21,27 +21,26 @@ impl<'tcx> StructuredDiagnostic<'tcx> for MissingCastForVariadicArg<'tcx, '_> { } fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - let mut err = self.sess.struct_span_err_with_code( - self.span, - &format!("can't pass `{}` to variadic function", self.ty), - self.code(), - ); + let (sugg_span, replace, help) = + if let Ok(snippet) = self.sess.source_map().span_to_snippet(self.span) { + (Some(self.span), format!("{} as {}", snippet, self.cast_ty), None) + } else { + (None, "".to_string(), Some(())) + }; + + let mut err = self.sess.create_err(errors::PassToVariadicFunction { + span: self.span, + ty: self.ty, + cast_ty: self.cast_ty, + help, + replace, + sugg_span, + }); if self.ty.references_error() { err.downgrade_to_delayed_bug(); } - if let Ok(snippet) = self.sess.source_map().span_to_snippet(self.span) { - err.span_suggestion( - self.span, - &format!("cast the value to `{}`", self.cast_ty), - format!("{} as {}", snippet, self.cast_ty), - Applicability::MachineApplicable, - ); - } else { - err.help(&format!("cast the value to `{}`", self.cast_ty)); - } - err } diff --git a/compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs b/compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs index 3b9fb367813d8..910417abe6e7c 100644 --- a/compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs +++ b/compiler/rustc_hir_analysis/src/structured_errors/sized_unsized_cast.rs @@ -1,4 +1,4 @@ -use crate::structured_errors::StructuredDiagnostic; +use crate::{errors, structured_errors::StructuredDiagnostic}; use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed}; use rustc_middle::ty::{Ty, TypeVisitableExt}; use rustc_session::Session; @@ -21,14 +21,11 @@ impl<'tcx> StructuredDiagnostic<'tcx> for SizedUnsizedCast<'tcx> { } fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { - let mut err = self.sess.struct_span_err_with_code( - self.span, - &format!( - "cannot cast thin pointer `{}` to fat pointer `{}`", - self.expr_ty, self.cast_ty - ), - self.code(), - ); + let mut err = self.sess.create_err(errors::CastThinPointerToFatPointer { + span: self.span, + expr_ty: self.expr_ty, + cast_ty: self.cast_ty.to_owned(), + }); if self.expr_ty.references_error() { err.downgrade_to_delayed_bug(); diff --git a/compiler/rustc_hir_analysis/src/variance/test.rs b/compiler/rustc_hir_analysis/src/variance/test.rs index 5feeb92d33782..64614831f560f 100644 --- a/compiler/rustc_hir_analysis/src/variance/test.rs +++ b/compiler/rustc_hir_analysis/src/variance/test.rs @@ -1,6 +1,8 @@ use rustc_middle::ty::TyCtxt; use rustc_span::symbol::sym; +use crate::errors; + pub fn test_variance(tcx: TyCtxt<'_>) { // For unit testing: check for a special "rustc_variance" // attribute and report an error with various results if found. @@ -8,7 +10,10 @@ pub fn test_variance(tcx: TyCtxt<'_>) { if tcx.has_attr(id.owner_id.to_def_id(), sym::rustc_variance) { let variances_of = tcx.variances_of(id.owner_id); - tcx.sess.struct_span_err(tcx.def_span(id.owner_id), format!("{variances_of:?}")).emit(); + tcx.sess.emit_err(errors::VariancesOf { + span: tcx.def_span(id.owner_id), + variances_of: format!("{variances_of:?}"), + }); } } } diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 4761ce83fabf1..e604e44a7157b 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -26,7 +26,6 @@ #endif #include "llvm/Support/Host.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Transforms/IPO/PassManagerBuilder.h" #include "llvm/Transforms/IPO/AlwaysInliner.h" #include "llvm/Transforms/IPO/FunctionImport.h" #include "llvm/Transforms/IPO/Internalize.h" @@ -35,7 +34,6 @@ #include "llvm/Transforms/Utils/FunctionImportUtils.h" #include "llvm/LTO/LTO.h" #include "llvm/Bitcode/BitcodeWriter.h" -#include "llvm-c/Transforms/PassManagerBuilder.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation/AddressSanitizer.h" diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index 18159778a8e39..70c9de91fbfef 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -142,12 +142,14 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> { && tcx.associated_item(def_id).container == ty::AssocItemContainer::TraitContainer { let sig = tcx.fn_sig(def_id).subst_identity(); - sig.visit_with(&mut ImplTraitInTraitFinder { + // We accounted for the binder of the fn sig, so skip the binder. + sig.skip_binder().visit_with(&mut ImplTraitInTraitFinder { tcx, fn_def_id: def_id, bound_vars: sig.bound_vars(), predicates: &mut predicates, seen: FxHashSet::default(), + depth: ty::INNERMOST, }); } @@ -244,15 +246,36 @@ struct ImplTraitInTraitFinder<'a, 'tcx> { fn_def_id: DefId, bound_vars: &'tcx ty::List, seen: FxHashSet, + depth: ty::DebruijnIndex, } impl<'tcx> TypeVisitor> for ImplTraitInTraitFinder<'_, 'tcx> { + fn visit_binder>>( + &mut self, + binder: &ty::Binder<'tcx, T>, + ) -> std::ops::ControlFlow { + self.depth.shift_in(1); + let binder = binder.super_visit_with(self); + self.depth.shift_out(1); + binder + } + fn visit_ty(&mut self, ty: Ty<'tcx>) -> std::ops::ControlFlow { if let ty::Alias(ty::Projection, alias_ty) = *ty.kind() && self.tcx.def_kind(alias_ty.def_id) == DefKind::ImplTraitPlaceholder && self.tcx.impl_trait_in_trait_parent(alias_ty.def_id) == self.fn_def_id && self.seen.insert(alias_ty.def_id) { + // We have entered some binders as we've walked into the + // bounds of the RPITIT. Shift these binders back out when + // constructing the top-level projection predicate. + let alias_ty = self.tcx.fold_regions(alias_ty, |re, _| { + if let ty::ReLateBound(index, bv) = re.kind() { + self.tcx.mk_re_late_bound(index.shifted_out_to_binder(self.depth), bv) + } else { + re + } + }); self.predicates.push( ty::Binder::bind_with_vars( ty::ProjectionPredicate { diff --git a/config.toml.example b/config.toml.example index a12c4e0700c88..dee0d8f254b63 100644 --- a/config.toml.example +++ b/config.toml.example @@ -659,6 +659,10 @@ changelog-seen = 2 # Build compiler with the optimization enabled and -Zvalidate-mir, currently only for `std` #validate-mir-opts = 3 +# Copy the linker, DLLs, and various libraries from MinGW into the rustc toolchain. +# Only applies when the host or target is pc-windows-gnu. +#include-mingw-linker = true + # ============================================================================= # Options for specific targets # diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index b33fc02f49c24..3d48db8660a26 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -910,14 +910,16 @@ impl<'a> Builder<'a> { /// new artifacts, it can't be used to rely on the presence of a particular /// sysroot. /// - /// See `force_use_stage1` for documentation on what each argument is. + /// See `force_use_stage1` and `force_use_stage2` for documentation on what each argument is. pub fn compiler_for( &self, stage: u32, host: TargetSelection, target: TargetSelection, ) -> Compiler { - if self.build.force_use_stage1(Compiler { stage, host }, target) { + if self.build.force_use_stage2() { + self.compiler(2, self.config.build) + } else if self.build.force_use_stage1(Compiler { stage, host }, target) { self.compiler(1, self.config.build) } else { self.compiler(stage, host) diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 6e64bc20d2079..4f417d36511ba 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -190,6 +190,7 @@ pub struct Config { pub dist_sign_folder: Option, pub dist_upload_addr: Option, pub dist_compression_formats: Option>, + pub dist_include_mingw_linker: bool, // libstd features pub backtrace: bool, // support for RUST_BACKTRACE @@ -700,6 +701,7 @@ define_config! { src_tarball: Option = "src-tarball", missing_tools: Option = "missing-tools", compression_formats: Option> = "compression-formats", + include_mingw_linker: Option = "include-mingw-linker", } } @@ -816,6 +818,7 @@ impl Config { config.rust_codegen_backends = vec![INTERNER.intern_str("llvm")]; config.deny_warnings = true; config.bindir = "bin".into(); + config.dist_include_mingw_linker = true; // set by build.rs config.build = TargetSelection::from_user(&env!("BUILD_TRIPLE")); @@ -1299,6 +1302,7 @@ impl Config { config.dist_compression_formats = t.compression_formats; set(&mut config.rust_dist_src, t.src_tarball); set(&mut config.missing_tools, t.missing_tools); + set(&mut config.dist_include_mingw_linker, t.include_mingw_linker) } if let Some(r) = build.rustfmt { diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index d7008df41791e..c9384004100b2 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -324,7 +324,7 @@ impl Step for Mingw { /// without any extra installed software (e.g., we bundle gcc, libraries, etc). fn run(self, builder: &Builder<'_>) -> Option { let host = self.host; - if !host.ends_with("pc-windows-gnu") { + if !host.ends_with("pc-windows-gnu") || !builder.config.dist_include_mingw_linker { return None; } @@ -380,7 +380,7 @@ impl Step for Rustc { // anything requiring us to distribute a license, but it's likely the // install will *also* include the rust-mingw package, which also needs // licenses, so to be safe we just include it here in all MinGW packages. - if host.ends_with("pc-windows-gnu") { + if host.ends_with("pc-windows-gnu") && builder.config.dist_include_mingw_linker { make_win_dist(tarball.image_dir(), &tmpdir(builder), host, builder); tarball.add_dir(builder.src.join("src/etc/third-party"), "share/doc"); } diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index f4abdf1cc5758..ebd42bcf678cb 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -1209,6 +1209,15 @@ impl Build { && (self.hosts.iter().any(|h| *h == target) || target == self.build) } + /// Checks whether the `compiler` compiling for `target` should be forced to + /// use a stage2 compiler instead. + /// + /// When we download the pre-compiled version of rustc it should be forced to + /// use a stage2 compiler. + fn force_use_stage2(&self) -> bool { + self.config.download_rustc() + } + /// Given `num` in the form "a.b.c" return a "release string" which /// describes the release version number. /// diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs index 41c23ff86b25e..a127875b55d5f 100644 --- a/src/tools/compiletest/src/runtest.rs +++ b/src/tools/compiletest/src/runtest.rs @@ -983,7 +983,12 @@ impl<'test> TestCx<'test> { &["-quiet".as_ref(), "-batch".as_ref(), "-nx".as_ref(), &debugger_script]; let mut gdb = Command::new(self.config.gdb.as_ref().unwrap()); - gdb.args(debugger_opts).env("PYTHONPATH", rust_pp_module_abs_path); + let pythonpath = if let Ok(pp) = std::env::var("PYTHONPATH") { + format!("{pp}:{rust_pp_module_abs_path}") + } else { + rust_pp_module_abs_path + }; + gdb.args(debugger_opts).env("PYTHONPATH", pythonpath); debugger_run_result = self.compose_and_run(gdb, self.config.run_lib_path.to_str().unwrap(), None, None); @@ -1149,13 +1154,18 @@ impl<'test> TestCx<'test> { ) -> ProcRes { // Prepare the lldb_batchmode which executes the debugger script let lldb_script_path = rust_src_root.join("src/etc/lldb_batchmode.py"); + let pythonpath = if let Ok(pp) = std::env::var("PYTHONPATH") { + format!("{pp}:{}", self.config.lldb_python_dir.as_ref().unwrap()) + } else { + self.config.lldb_python_dir.as_ref().unwrap().to_string() + }; self.cmd2procres( Command::new(&self.config.python) .arg(&lldb_script_path) .arg(test_executable) .arg(debugger_script) .env("PYTHONUNBUFFERED", "1") // Help debugging #78665 - .env("PYTHONPATH", self.config.lldb_python_dir.as_ref().unwrap()), + .env("PYTHONPATH", pythonpath), ) } diff --git a/tests/ui/impl-trait/in-trait/default-method-binder-shifting.rs b/tests/ui/impl-trait/in-trait/default-method-binder-shifting.rs new file mode 100644 index 0000000000000..5cf90c5d93c24 --- /dev/null +++ b/tests/ui/impl-trait/in-trait/default-method-binder-shifting.rs @@ -0,0 +1,14 @@ +// check-pass + +#![feature(return_position_impl_trait_in_trait)] +//~^ WARN the feature `return_position_impl_trait_in_trait` is incomplete + +trait Trait { + type Type; + + // Check that we're adjusting bound vars correctly when installing the default + // method projection assumptions. + fn method(&self) -> impl Trait; +} + +fn main() {} diff --git a/tests/ui/impl-trait/in-trait/default-method-binder-shifting.stderr b/tests/ui/impl-trait/in-trait/default-method-binder-shifting.stderr new file mode 100644 index 0000000000000..7c7ebcdb7e717 --- /dev/null +++ b/tests/ui/impl-trait/in-trait/default-method-binder-shifting.stderr @@ -0,0 +1,11 @@ +warning: the feature `return_position_impl_trait_in_trait` is incomplete and may not be safe to use and/or cause compiler crashes + --> $DIR/default-method-binder-shifting.rs:3:12 + | +LL | #![feature(return_position_impl_trait_in_trait)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: see issue #91611 for more information + = note: `#[warn(incomplete_features)]` on by default + +warning: 1 warning emitted +