Skip to content

Commit ff223d3

Browse files
committed
Auto merge of #141309 - RalfJung:x86-simd-abi, r=tgross35,nikic,workingjubilee
x86 (32/64): go back to passing SIMD vectors by-ptr Fixes #139029 by partially reverting #135408 and going back to passing SIMD vectors by-ptr on x86. Sadly, by-val confuses the LLVM inliner so much that it's not worth it... Also fixes #141848 by no longer actually using vector registers with the "Rust" ABI. r? `@tgross35` Cc `@nikic` try-job: `test-various*` try-job: dist-i586-gnu-i586-i686-musl try-job: x86_64-gnu-nopt try-job: `x86_64-msvc*` try-job: `i686-msvc*`
2 parents 4b27a04 + f01bbfc commit ff223d3

File tree

5 files changed

+34
-51
lines changed

5 files changed

+34
-51
lines changed

compiler/rustc_monomorphize/src/mono_checks/abi_check.rs

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,6 @@ fn do_check_simd_vector_abi<'tcx>(
3535
is_call: bool,
3636
loc: impl Fn() -> (Span, HirId),
3737
) {
38-
// We check this on all functions, including those using the "Rust" ABI.
39-
// For the "Rust" ABI it would be a bug if the lint ever triggered, but better safe than sorry.
4038
let feature_def = tcx.sess.target.features_for_correct_vector_abi();
4139
let codegen_attrs = tcx.codegen_fn_attrs(def_id);
4240
let have_feature = |feat: Symbol| {
@@ -123,8 +121,9 @@ fn do_check_wasm_abi<'tcx>(
123121
is_call: bool,
124122
loc: impl Fn() -> (Span, HirId),
125123
) {
126-
// Only proceed for `extern "C" fn` on wasm32-unknown-unknown (same check as what `adjust_for_foreign_abi` uses to call `compute_wasm_abi_info`),
127-
// and only proceed if `wasm_c_abi_opt` indicates we should emit the lint.
124+
// Only proceed for `extern "C" fn` on wasm32-unknown-unknown (same check as what
125+
// `adjust_for_foreign_abi` uses to call `compute_wasm_abi_info`), and only proceed if
126+
// `wasm_c_abi_opt` indicates we should emit the lint.
128127
if !(tcx.sess.target.arch == "wasm32"
129128
&& tcx.sess.target.os == "unknown"
130129
&& tcx.wasm_c_abi_opt() == WasmCAbi::Legacy { with_lint: true }
@@ -157,8 +156,15 @@ fn check_instance_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) {
157156
else {
158157
// An error will be reported during codegen if we cannot determine the ABI of this
159158
// function.
159+
tcx.dcx().delayed_bug("ABI computation failure should lead to compilation failure");
160160
return;
161161
};
162+
// Unlike the call-site check, we do also check "Rust" ABI functions here.
163+
// This should never trigger, *except* if we start making use of vector registers
164+
// for the "Rust" ABI and the user disables those vector registers (which should trigger a
165+
// warning as that's clearly disabling a "required" target feature for this target).
166+
// Using such a function is where disabling the vector register actually can start leading
167+
// to soundness issues, so erroring here seems good.
162168
let loc = || {
163169
let def_id = instance.def_id();
164170
(
@@ -179,7 +185,8 @@ fn check_call_site_abi<'tcx>(
179185
loc: impl Fn() -> (Span, HirId) + Copy,
180186
) {
181187
if callee.fn_sig(tcx).abi().is_rustic_abi() {
182-
// we directly handle the soundness of Rust ABIs
188+
// We directly handle the soundness of Rust ABIs -- so let's skip the majority of
189+
// call sites to avoid a perf regression.
183190
return;
184191
}
185192
let typing_env = ty::TypingEnv::fully_monomorphized();

compiler/rustc_target/src/callconv/mod.rs

Lines changed: 5 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use rustc_abi::{
77
use rustc_macros::HashStable_Generic;
88

99
pub use crate::spec::AbiMap;
10-
use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, RustcAbi, WasmCAbi};
10+
use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
1111

1212
mod aarch64;
1313
mod amdgpu;
@@ -696,24 +696,6 @@ impl<'a, Ty> FnAbi<'a, Ty> {
696696
_ => {}
697697
};
698698

699-
// Decides whether we can pass the given SIMD argument via `PassMode::Direct`.
700-
// May only return `true` if the target will always pass those arguments the same way,
701-
// no matter what the user does with `-Ctarget-feature`! In other words, whatever
702-
// target features are required to pass a SIMD value in registers must be listed in
703-
// the `abi_required_features` for the current target and ABI.
704-
let can_pass_simd_directly = |arg: &ArgAbi<'_, Ty>| match &*spec.arch {
705-
// On x86, if we have SSE2 (which we have by default for x86_64), we can always pass up
706-
// to 128-bit-sized vectors.
707-
"x86" if spec.rustc_abi == Some(RustcAbi::X86Sse2) => arg.layout.size.bits() <= 128,
708-
"x86_64" if spec.rustc_abi != Some(RustcAbi::X86Softfloat) => {
709-
// FIXME once https://github.com/bytecodealliance/wasmtime/issues/10254 is fixed
710-
// accept vectors up to 128bit rather than vectors of exactly 128bit.
711-
arg.layout.size.bits() == 128
712-
}
713-
// So far, we haven't implemented this logic for any other target.
714-
_ => false,
715-
};
716-
717699
for (arg_idx, arg) in self
718700
.args
719701
.iter_mut()
@@ -813,9 +795,10 @@ impl<'a, Ty> FnAbi<'a, Ty> {
813795
// target feature sets. Some more information about this
814796
// issue can be found in #44367.
815797
//
816-
// Note that the intrinsic ABI is exempt here as those are not
817-
// real functions anyway, and the backend expects very specific types.
818-
if spec.simd_types_indirect && !can_pass_simd_directly(arg) {
798+
// We *could* do better in some cases, e.g. on x86_64 targets where SSE2 is
799+
// required. However, it turns out that that makes LLVM worse at optimizing this
800+
// code, so we pass things indirectly even there. See #139029 for more on that.
801+
if spec.simd_types_indirect {
819802
arg.make_indirect();
820803
}
821804
}

tests/codegen/abi-x86-sse.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,9 @@ trait Copy {}
2727
#[repr(simd)]
2828
pub struct Sse([f32; 4]);
2929

30-
// x86-64: <4 x float> @sse_id(<4 x float> {{[^,]*}})
31-
// x86-32: <4 x float> @sse_id(<4 x float> {{[^,]*}})
30+
// FIXME: due to #139029 we are passing them all indirectly.
31+
// x86-64: void @sse_id(ptr{{( [^,]*)?}} sret([16 x i8]){{( .*)?}}, ptr{{( [^,]*)?}})
32+
// x86-32: void @sse_id(ptr{{( [^,]*)?}} sret([16 x i8]){{( .*)?}}, ptr{{( [^,]*)?}})
3233
// x86-32-nosse: void @sse_id(ptr{{( [^,]*)?}} sret([16 x i8]){{( .*)?}}, ptr{{( [^,]*)?}})
3334
#[no_mangle]
3435
pub fn sse_id(x: Sse) -> Sse {

tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,8 @@
11
//
22
//@ compile-flags: -C no-prepopulate-passes
3-
// LLVM IR isn't very portable and the one tested here depends on the ABI
4-
// which is different between x86 (where we use SSE registers) and others.
5-
// `x86-64` and `x86-32-sse2` are identical, but compiletest does not support
6-
// taking the union of multiple `only` annotations.
7-
//@ revisions: x86-64 x86-32-sse2 other
8-
//@[x86-64] only-x86_64
9-
//@[x86-32-sse2] only-rustc_abi-x86-sse2
10-
//@[other] ignore-rustc_abi-x86-sse2
11-
//@[other] ignore-x86_64
3+
// 32bit MSVC does not align things properly so we suppress high alignment annotations (#112480)
4+
//@ ignore-i686-pc-windows-msvc
5+
//@ ignore-i686-pc-windows-gnu
126

137
#![crate_type = "lib"]
148
#![allow(non_camel_case_types)]
@@ -47,9 +41,7 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> {
4741
#[no_mangle]
4842
pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> {
4943
// CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]]
50-
// x86-32: ret <4 x float> %[[VAL:.+]]
51-
// x86-64: ret <4 x float> %[[VAL:.+]]
52-
// other: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
44+
// CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
5345
unsafe { std::mem::transmute(x) }
5446
}
5547

@@ -64,8 +56,6 @@ pub fn build_array_t(x: [f32; 4]) -> T {
6456
#[no_mangle]
6557
pub fn build_array_transmute_t(x: [f32; 4]) -> T {
6658
// CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]]
67-
// x86-32: ret <4 x float> %[[VAL:.+]]
68-
// x86-64: ret <4 x float> %[[VAL:.+]]
69-
// other: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
59+
// CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
7060
unsafe { std::mem::transmute(x) }
7161
}

tests/codegen/simd/packed-simd.rs

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,16 +30,18 @@ fn load<T, const N: usize>(v: PackedSimd<T, N>) -> FullSimd<T, N> {
3030
}
3131
}
3232

33-
// CHECK-LABEL: define <3 x float> @square_packed_full(ptr{{[a-z_ ]*}} align 4 {{[^,]*}})
33+
// CHECK-LABEL: square_packed_full
34+
// CHECK-SAME: ptr{{[a-z_ ]*}} sret([[RET_TYPE:[^)]+]]) [[RET_ALIGN:align (8|16)]]{{[^%]*}} [[RET_VREG:%[_0-9]*]]
35+
// CHECK-SAME: ptr{{[a-z_ ]*}} align 4
3436
#[no_mangle]
3537
pub fn square_packed_full(x: PackedSimd<f32, 3>) -> FullSimd<f32, 3> {
36-
// The unoptimized version of this is not very interesting to check
37-
// since `load` does not get inlined.
38-
// opt3-NEXT: start:
39-
// opt3-NEXT: load <3 x float>
38+
// CHECK-NEXT: start
39+
// noopt: alloca [[RET_TYPE]], [[RET_ALIGN]]
40+
// CHECK: load <3 x float>
4041
let x = load(x);
41-
// opt3-NEXT: [[VREG:%[a-z0-9_]+]] = fmul <3 x float>
42-
// opt3-NEXT: ret <3 x float> [[VREG:%[a-z0-9_]+]]
42+
// CHECK: [[VREG:%[a-z0-9_]+]] = fmul <3 x float>
43+
// CHECK-NEXT: store <3 x float> [[VREG]], ptr [[RET_VREG]], [[RET_ALIGN]]
44+
// CHECK-NEXT: ret void
4345
unsafe { intrinsics::simd_mul(x, x) }
4446
}
4547

0 commit comments

Comments
 (0)