@@ -157,41 +157,35 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
157
157
158
158
// Shift ops can have an RHS with a different numeric type.
159
159
if matches ! ( bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked ) {
160
- let size = u128:: from ( left_layout. size . bits ( ) ) ;
161
- // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
162
- // zero-extended form). This matches the codegen backend:
163
- // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
164
- // The overflow check is also ignorant to the sign:
165
- // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
166
- // This would behave rather strangely if we had integer types of size 256: a shift by
167
- // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
168
- // shift by -1i16 though would be considered overflowing. If we had integers of size
169
- // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
170
- // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
171
- // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
172
- // consistent results for the same value represented at different bit widths.
173
- assert ! ( size <= 128 ) ;
174
- let original_r = r;
175
- let overflow = r >= size;
176
- // The shift offset is implicitly masked to the type size, to make sure this operation
177
- // is always defined. This is the one MIR operator that does *not* directly map to a
178
- // single LLVM operation. See
179
- // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
180
- // for the corresponding truncation in our codegen backends.
181
- let r = r % size;
182
- let r = u32:: try_from ( r) . unwrap ( ) ; // we masked so this will always fit
160
+ let size = left_layout. size . bits ( ) ;
161
+ // The shift offset is implicitly masked to the type size. (This is the one MIR operator
162
+ // that does *not* directly map to a single LLVM operation.) Compute how much we
163
+ // actually shift and whether there was an overflow due to shifting too much.
164
+ let ( shift_amount, overflow) = if right_layout. abi . is_signed ( ) {
165
+ let shift_amount = self . sign_extend ( r, right_layout) as i128 ;
166
+ let overflow = shift_amount < 0 || shift_amount >= i128:: from ( size) ;
167
+ let masked_amount = ( shift_amount as u128 ) % u128:: from ( size) ;
168
+ debug_assert_eq ! ( overflow, shift_amount != ( masked_amount as i128 ) ) ;
169
+ ( masked_amount, overflow)
170
+ } else {
171
+ let shift_amount = r;
172
+ let masked_amount = shift_amount % u128:: from ( size) ;
173
+ ( masked_amount, shift_amount != masked_amount)
174
+ } ;
175
+ let shift_amount = u32:: try_from ( shift_amount) . unwrap ( ) ; // we masked so this will always fit
176
+ // Compute the shifted result.
183
177
let result = if left_layout. abi . is_signed ( ) {
184
178
let l = self . sign_extend ( l, left_layout) as i128 ;
185
179
let result = match bin_op {
186
- Shl | ShlUnchecked => l. checked_shl ( r ) . unwrap ( ) ,
187
- Shr | ShrUnchecked => l. checked_shr ( r ) . unwrap ( ) ,
180
+ Shl | ShlUnchecked => l. checked_shl ( shift_amount ) . unwrap ( ) ,
181
+ Shr | ShrUnchecked => l. checked_shr ( shift_amount ) . unwrap ( ) ,
188
182
_ => bug ! ( ) ,
189
183
} ;
190
184
result as u128
191
185
} else {
192
186
match bin_op {
193
- Shl | ShlUnchecked => l. checked_shl ( r ) . unwrap ( ) ,
194
- Shr | ShrUnchecked => l. checked_shr ( r ) . unwrap ( ) ,
187
+ Shl | ShlUnchecked => l. checked_shl ( shift_amount ) . unwrap ( ) ,
188
+ Shr | ShrUnchecked => l. checked_shr ( shift_amount ) . unwrap ( ) ,
195
189
_ => bug ! ( ) ,
196
190
}
197
191
} ;
@@ -200,7 +194,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
200
194
if overflow && let Some ( intrinsic_name) = throw_ub_on_overflow {
201
195
throw_ub_custom ! (
202
196
fluent:: const_eval_overflow_shift,
203
- val = original_r,
197
+ val = if right_layout. abi. is_signed( ) {
198
+ ( self . sign_extend( r, right_layout) as i128 ) . to_string( )
199
+ } else {
200
+ r. to_string( )
201
+ } ,
204
202
name = intrinsic_name
205
203
) ;
206
204
}
0 commit comments