@@ -64,6 +64,7 @@ impl AArch64InlineAsmRegClass {
6464 neon: I8 , I16 , I32 , I64 , F16 , F32 , F64 , F128 ,
6565 VecI8 ( 8 ) , VecI16 ( 4 ) , VecI32 ( 2 ) , VecI64 ( 1 ) , VecF16 ( 4 ) , VecF32 ( 2 ) , VecF64 ( 1 ) ,
6666 VecI8 ( 16 ) , VecI16 ( 8 ) , VecI32 ( 4 ) , VecI64 ( 2 ) , VecF16 ( 8 ) , VecF32 ( 4 ) , VecF64 ( 2 ) ;
67+ // Note: When adding support for SVE vector types, they must be rejected for Arm64EC.
6768 } ,
6869 Self :: preg => & [ ] ,
6970 }
@@ -96,7 +97,7 @@ fn restricted_for_arm64ec(
9697 _is_clobber : bool ,
9798) -> Result < ( ) , & ' static str > {
9899 if arch == InlineAsmArch :: Arm64EC {
99- Err ( "x13, x14, x23, x24, x28, v16-v31 cannot be used for Arm64EC" )
100+ Err ( "x13, x14, x23, x24, x28, v16-v31, p*, ffr cannot be used for Arm64EC" )
100101 } else {
101102 Ok ( ( ) )
102103 }
@@ -165,23 +166,23 @@ def_regs! {
165166 v29: vreg = [ "v29" , "b29" , "h29" , "s29" , "d29" , "q29" , "z29" ] % restricted_for_arm64ec,
166167 v30: vreg = [ "v30" , "b30" , "h30" , "s30" , "d30" , "q30" , "z30" ] % restricted_for_arm64ec,
167168 v31: vreg = [ "v31" , "b31" , "h31" , "s31" , "d31" , "q31" , "z31" ] % restricted_for_arm64ec,
168- p0: preg = [ "p0" ] ,
169- p1: preg = [ "p1" ] ,
170- p2: preg = [ "p2" ] ,
171- p3: preg = [ "p3" ] ,
172- p4: preg = [ "p4" ] ,
173- p5: preg = [ "p5" ] ,
174- p6: preg = [ "p6" ] ,
175- p7: preg = [ "p7" ] ,
176- p8: preg = [ "p8" ] ,
177- p9: preg = [ "p9" ] ,
178- p10: preg = [ "p10" ] ,
179- p11: preg = [ "p11" ] ,
180- p12: preg = [ "p12" ] ,
181- p13: preg = [ "p13" ] ,
182- p14: preg = [ "p14" ] ,
183- p15: preg = [ "p15" ] ,
184- ffr: preg = [ "ffr" ] ,
169+ p0: preg = [ "p0" ] % restricted_for_arm64ec ,
170+ p1: preg = [ "p1" ] % restricted_for_arm64ec ,
171+ p2: preg = [ "p2" ] % restricted_for_arm64ec ,
172+ p3: preg = [ "p3" ] % restricted_for_arm64ec ,
173+ p4: preg = [ "p4" ] % restricted_for_arm64ec ,
174+ p5: preg = [ "p5" ] % restricted_for_arm64ec ,
175+ p6: preg = [ "p6" ] % restricted_for_arm64ec ,
176+ p7: preg = [ "p7" ] % restricted_for_arm64ec ,
177+ p8: preg = [ "p8" ] % restricted_for_arm64ec ,
178+ p9: preg = [ "p9" ] % restricted_for_arm64ec ,
179+ p10: preg = [ "p10" ] % restricted_for_arm64ec ,
180+ p11: preg = [ "p11" ] % restricted_for_arm64ec ,
181+ p12: preg = [ "p12" ] % restricted_for_arm64ec ,
182+ p13: preg = [ "p13" ] % restricted_for_arm64ec ,
183+ p14: preg = [ "p14" ] % restricted_for_arm64ec ,
184+ p15: preg = [ "p15" ] % restricted_for_arm64ec ,
185+ ffr: preg = [ "ffr" ] % restricted_for_arm64ec ,
185186 #error = [ "x19" , "w19" ] =>
186187 "x19 is used internally by LLVM and cannot be used as an operand for inline asm" ,
187188 #error = [ "x29" , "w29" , "fp" , "wfp" ] =>
@@ -200,12 +201,66 @@ impl AArch64InlineAsmReg {
200201 _arch : InlineAsmArch ,
201202 modifier : Option < char > ,
202203 ) -> fmt:: Result {
203- let ( prefix, index) = if ( self as u32 ) < Self :: v0 as u32 {
204- ( modifier. unwrap_or ( 'x' ) , self as u32 - Self :: x0 as u32 )
204+ let ( prefix, index) = if let Some ( index) = self . reg_index ( ) {
205+ ( modifier. unwrap_or ( 'x' ) , index)
206+ } else if let Some ( index) = self . vreg_index ( ) {
207+ ( modifier. unwrap_or ( 'v' ) , index)
205208 } else {
206- ( modifier . unwrap_or ( 'v' ) , self as u32 - Self :: v0 as u32 )
209+ return out . write_str ( self . name ( ) ) ;
207210 } ;
208211 assert ! ( index < 32 ) ;
209212 write ! ( out, "{prefix}{index}" )
210213 }
214+
215+ /// If the register is an integer register then return its index.
216+ pub fn reg_index ( self ) -> Option < u32 > {
217+ // Unlike `vreg_index`, we can't subtract `x0` to get the u32 because
218+ // `x19` and `x29` are missing and the integer constants for the
219+ // `x0`..`x30` enum variants don't all match the register number. E.g. the
220+ // integer constant for `x18` is 18, but the constant for `x20` is 19.
221+ use AArch64InlineAsmReg :: * ;
222+ Some ( match self {
223+ x0 => 0 ,
224+ x1 => 1 ,
225+ x2 => 2 ,
226+ x3 => 3 ,
227+ x4 => 4 ,
228+ x5 => 5 ,
229+ x6 => 6 ,
230+ x7 => 7 ,
231+ x8 => 8 ,
232+ x9 => 9 ,
233+ x10 => 10 ,
234+ x11 => 11 ,
235+ x12 => 12 ,
236+ x13 => 13 ,
237+ x14 => 14 ,
238+ x15 => 15 ,
239+ x16 => 16 ,
240+ x17 => 17 ,
241+ x18 => 18 ,
242+ // x19 is reserved
243+ x20 => 20 ,
244+ x21 => 21 ,
245+ x22 => 22 ,
246+ x23 => 23 ,
247+ x24 => 24 ,
248+ x25 => 25 ,
249+ x26 => 26 ,
250+ x27 => 27 ,
251+ x28 => 28 ,
252+ // x29 is reserved
253+ x30 => 30 ,
254+ _ => return None ,
255+ } )
256+ }
257+
258+ /// If the register is a vector register then return its index.
259+ pub fn vreg_index ( self ) -> Option < u32 > {
260+ use AArch64InlineAsmReg :: * ;
261+ if self as u32 >= v0 as u32 && self as u32 <= v31 as u32 {
262+ return Some ( self as u32 - v0 as u32 ) ;
263+ }
264+ None
265+ }
211266}
0 commit comments