@@ -2,12 +2,12 @@ use rustc_abi::{self as abi, FIRST_VARIANT};
22use  rustc_middle:: ty:: adjustment:: PointerCoercion ; 
33use  rustc_middle:: ty:: layout:: { HasTyCtxt ,  HasTypingEnv ,  LayoutOf ,  TyAndLayout } ; 
44use  rustc_middle:: ty:: { self ,  Instance ,  Ty ,  TyCtxt } ; 
5- use  rustc_middle:: { bug,  mir} ; 
5+ use  rustc_middle:: { bug,  mir,  span_bug } ; 
66use  rustc_session:: config:: OptLevel ; 
77use  tracing:: { debug,  instrument} ; 
88
99use  super :: operand:: { OperandRef ,  OperandRefBuilder ,  OperandValue } ; 
10- use  super :: place:: { PlaceRef ,  codegen_tag_value} ; 
10+ use  super :: place:: { PlaceRef ,  PlaceValue ,   codegen_tag_value} ; 
1111use  super :: { FunctionCx ,  LocalRef } ; 
1212use  crate :: common:: { IntPredicate ,  TypeKind } ; 
1313use  crate :: traits:: * ; 
@@ -229,6 +229,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
229229        operand :  OperandRef < ' tcx ,  Bx :: Value > , 
230230        cast :  TyAndLayout < ' tcx > , 
231231    )  -> OperandValue < Bx :: Value >  { 
232+         if  let  abi:: BackendRepr :: Memory  {  .. }  = cast. backend_repr 
233+             && !cast. is_zst ( ) 
234+         { 
235+             span_bug ! ( self . mir. span,  "Use `codegen_transmute` to transmute to {cast:?}" ) ; 
236+         } 
237+ 
238+         // `Layout` is interned, so we can do a cheap check for things that are 
239+         // exactly the same and thus don't need any handling. 
240+         if  abi:: Layout :: eq ( & operand. layout . layout ,  & cast. layout )  { 
241+             return  operand. val ; 
242+         } 
243+ 
232244        // Check for transmutes that are always UB. 
233245        if  operand. layout . size  != cast. size 
234246            || operand. layout . is_uninhabited ( ) 
@@ -241,11 +253,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
241253            return  OperandValue :: poison ( bx,  cast) ; 
242254        } 
243255
256+         let  cx = bx. cx ( ) ; 
244257        match  ( operand. val ,  operand. layout . backend_repr ,  cast. backend_repr )  { 
245258            _ if  cast. is_zst ( )  => OperandValue :: ZeroSized , 
246-             ( _,  _,  abi:: BackendRepr :: Memory  {  .. } )  => { 
247-                 bug ! ( "Cannot `codegen_transmute_operand` to non-ZST memory-ABI output {cast:?}" ) ; 
248-             } 
249259            ( OperandValue :: Ref ( source_place_val) ,  abi:: BackendRepr :: Memory  {  .. } ,  _)  => { 
250260                assert_eq ! ( source_place_val. llextra,  None ) ; 
251261                // The existing alignment is part of `source_place_val`, 
@@ -256,16 +266,38 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
256266                OperandValue :: Immediate ( imm) , 
257267                abi:: BackendRepr :: Scalar ( from_scalar) , 
258268                abi:: BackendRepr :: Scalar ( to_scalar) , 
259-             )  => OperandValue :: Immediate ( transmute_scalar ( bx,  imm,  from_scalar,  to_scalar) ) , 
269+             )  if  from_scalar. size ( cx)  == to_scalar. size ( cx)  => { 
270+                 OperandValue :: Immediate ( transmute_scalar ( bx,  imm,  from_scalar,  to_scalar) ) 
271+             } 
260272            ( 
261273                OperandValue :: Pair ( imm_a,  imm_b) , 
262274                abi:: BackendRepr :: ScalarPair ( in_a,  in_b) , 
263275                abi:: BackendRepr :: ScalarPair ( out_a,  out_b) , 
264-             )  => OperandValue :: Pair ( 
265-                 transmute_scalar ( bx,  imm_a,  in_a,  out_a) , 
266-                 transmute_scalar ( bx,  imm_b,  in_b,  out_b) , 
267-             ) , 
268-             _ => bug ! ( "Cannot `codegen_transmute_operand` {operand:?} to {cast:?}" ) , 
276+             )  if  in_a. size ( cx)  == out_a. size ( cx)  && in_b. size ( cx)  == out_b. size ( cx)  => { 
277+                 OperandValue :: Pair ( 
278+                     transmute_scalar ( bx,  imm_a,  in_a,  out_a) , 
279+                     transmute_scalar ( bx,  imm_b,  in_b,  out_b) , 
280+                 ) 
281+             } 
282+             _ => { 
283+                 // For any other potentially-tricky cases, make a temporary instead. 
284+                 // If anything else wants the target local to be in memory this won't 
285+                 // be hit, as `codegen_transmute` will get called directly. Thus this 
286+                 // is only for places where everything else wants the operand form, 
287+                 // and thus it's not worth making those places get it from memory. 
288+                 // 
289+                 // Notably, Scalar ⇌ ScalarPair cases go here to avoid padding 
290+                 // and endianness issues, as do SimdVector ones to avoid worrying 
291+                 // about things like f32x8 ⇌ ptrx4 that would need multiple steps. 
292+                 let  align = Ord :: max ( operand. layout . align . abi ,  cast. align . abi ) ; 
293+                 let  size = Ord :: max ( operand. layout . size ,  cast. size ) ; 
294+                 let  temp = PlaceValue :: alloca ( bx,  size,  align) ; 
295+                 bx. lifetime_start ( temp. llval ,  size) ; 
296+                 operand. val . store ( bx,  temp. with_type ( operand. layout ) ) ; 
297+                 let  val = bx. load_operand ( temp. with_type ( cast) ) . val ; 
298+                 bx. lifetime_end ( temp. llval ,  size) ; 
299+                 val
300+             } 
269301        } 
270302    } 
271303
@@ -949,37 +981,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
949981/// layout in this code when the right thing will happen anyway. 
950982pub ( crate )  fn  rvalue_creates_operand ( & self ,  rvalue :  & mir:: Rvalue < ' tcx > )  -> bool  { 
951983        match  * rvalue { 
952-             mir:: Rvalue :: Cast ( mir:: CastKind :: Transmute ,  ref  operand,  cast_ty)  => { 
953-                 let  operand_ty = operand. ty ( self . mir ,  self . cx . tcx ( ) ) ; 
954-                 let  cast_layout = self . cx . layout_of ( self . monomorphize ( cast_ty) ) ; 
955-                 let  operand_layout = self . cx . layout_of ( self . monomorphize ( operand_ty) ) ; 
956-                 match  ( operand_layout. backend_repr ,  cast_layout. backend_repr )  { 
957-                     // When the output will be in memory anyway, just use its place 
958-                     // (instead of the operand path) unless it's the trivial ZST case. 
959-                     ( _,  abi:: BackendRepr :: Memory  {  .. } )  => cast_layout. is_zst ( ) , 
960- 
961-                     // Otherwise (for a non-memory output) if the input is memory 
962-                     // then we can just read the value from the place. 
963-                     ( abi:: BackendRepr :: Memory  {  .. } ,  _)  => true , 
964- 
965-                     // When we have scalar immediates, we can only convert things 
966-                     // where the sizes match, to avoid endianness questions. 
967-                     ( abi:: BackendRepr :: Scalar ( a) ,  abi:: BackendRepr :: Scalar ( b) )  =>
968-                         a. size ( self . cx )  == b. size ( self . cx ) , 
969-                     ( abi:: BackendRepr :: ScalarPair ( a0,  a1) ,  abi:: BackendRepr :: ScalarPair ( b0,  b1) )  =>
970-                         a0. size ( self . cx )  == b0. size ( self . cx )  && a1. size ( self . cx )  == b1. size ( self . cx ) , 
971- 
972-                     // Mixing Scalars and ScalarPairs can get quite complicated when 
973-                     // padding and undef get involved, so leave that to the memory path. 
974-                     ( abi:: BackendRepr :: Scalar ( _) ,  abi:: BackendRepr :: ScalarPair ( _,  _) )  |
975-                     ( abi:: BackendRepr :: ScalarPair ( _,  _) ,  abi:: BackendRepr :: Scalar ( _) )  => false , 
976- 
977-                     // SIMD vectors aren't worth the trouble of dealing with complex 
978-                     // cases like from vectors of f32 to vectors of pointers or 
979-                     // from fat pointers to vectors of u16. (See #143194 #110021 ...) 
980-                     ( abi:: BackendRepr :: SimdVector  {  .. } ,  _)  | ( _,  abi:: BackendRepr :: SimdVector  {  .. } )  => false , 
981-                 } 
982-             } 
983984            mir:: Rvalue :: Ref ( ..)  |
984985            mir:: Rvalue :: CopyForDeref ( ..)  |
985986            mir:: Rvalue :: RawPtr ( ..)  |
0 commit comments