1
1
// reference: https://github.com/espressif/clang-xtensa/commit/6fb488d2553f06029e6611cf81c6efbd45b56e47#diff-aa74ae1e1ab6b7149789237edb78e688R8450
2
2
3
3
use crate :: abi:: call:: { ArgAbi , FnAbi , Reg , Uniform } ;
4
+ use crate :: abi:: { Abi , Size } ;
4
5
5
- const NUM_ARG_GPR : u64 = 6 ;
6
+ const NUM_ARG_GPRS : u64 = 6 ;
6
7
const MAX_ARG_IN_REGS_SIZE : u64 = 4 * 32 ;
7
- // const MAX_ARG_DIRECT_SIZE: u64 = MAX_ARG_IN_REGS_SIZE;
8
8
const MAX_RET_IN_REGS_SIZE : u64 = 2 * 32 ;
9
9
10
10
fn classify_ret_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 ) {
11
- // The rules for return and argument types are the same, so defer to
12
- // classify_arg_ty.
13
- let mut remaining_gpr = 2 ;
11
+ if arg. is_ignore ( ) {
12
+ return ;
13
+ }
14
+
15
+ // The rules for return and argument types are the same,
16
+ // so defer to `classify_arg_ty`.
17
+ let mut arg_gprs_left = 2 ;
14
18
let fixed = true ;
15
- classify_arg_ty ( arg, xlen, fixed, & mut remaining_gpr ) ;
19
+ classify_arg_ty ( arg, xlen, fixed, & mut arg_gprs_left ) ;
16
20
}
17
21
18
- fn classify_arg_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 , fixed : bool , remaining_gpr : & mut u64 ) {
19
- assert ! ( * remaining_gpr <= NUM_ARG_GPR , "Arg GPR tracking underflow" ) ;
22
+ fn classify_arg_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 , fixed : bool , arg_gprs_left : & mut u64 ) {
23
+ assert ! ( * arg_gprs_left <= NUM_ARG_GPRS , "Arg GPR tracking underflow" ) ;
20
24
21
- let arg_size = arg. layout . size ;
22
- let alignment = arg. layout . align . abi ;
25
+ // Ignore empty structs/unions.
26
+ if arg. layout . is_zst ( ) {
27
+ return ;
28
+ }
29
+
30
+ let size = arg. layout . size . bits ( ) ;
31
+ let needed_align = arg. layout . align . abi . bits ( ) ;
32
+ let mut must_use_stack = false ;
23
33
24
34
// Determine the number of GPRs needed to pass the current argument
25
35
// according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
26
36
// register pairs, so may consume 3 registers.
27
- let mut required_gpr = 1u64 ;
37
+ let mut needed_arg_gprs = 1u64 ;
28
38
29
- if !fixed && alignment . bits ( ) == 2 * xlen {
30
- required_gpr = 2 + ( * remaining_gpr % 2 ) ;
31
- } else if arg_size . bits ( ) > xlen && arg_size . bits ( ) <= MAX_ARG_IN_REGS_SIZE {
32
- required_gpr = ( arg_size . bits ( ) + xlen - 1 ) / xlen;
39
+ if !fixed && needed_align == 2 * xlen {
40
+ needed_arg_gprs = 2 + ( * arg_gprs_left % 2 ) ;
41
+ } else if size > xlen && size <= MAX_ARG_IN_REGS_SIZE {
42
+ needed_arg_gprs = ( size + xlen - 1 ) / xlen;
33
43
}
34
44
35
- let mut stack_required = false ;
36
- if required_gpr > * remaining_gpr {
37
- stack_required = true ;
38
- required_gpr = * remaining_gpr;
45
+ if needed_arg_gprs > * arg_gprs_left {
46
+ must_use_stack = true ;
47
+ needed_arg_gprs = * arg_gprs_left;
39
48
}
40
- * remaining_gpr -= required_gpr ;
49
+ * arg_gprs_left -= needed_arg_gprs ;
41
50
42
- if !arg. layout . is_aggregate ( ) {
43
- // All integral types are promoted to XLen width, unless passed on the
44
- // stack.
45
- if arg_size . bits ( ) < xlen && !stack_required {
51
+ if !arg. layout . is_aggregate ( ) && ! matches ! ( arg . layout . abi , Abi :: Vector { .. } ) {
52
+ // All integral types are promoted to `xlen`
53
+ // width, unless passed on the stack.
54
+ if size < xlen && !must_use_stack {
46
55
arg. extend_integer_width_to ( xlen) ;
47
56
return ;
48
57
}
49
58
50
59
return ;
51
60
}
52
61
53
- // Aggregates which are <= 4 * 32 will be passed in registers if possible,
54
- // so coerce to integers.
55
- if arg_size. bits ( ) as u64 <= MAX_ARG_IN_REGS_SIZE {
56
- // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
57
- // required, and a 2-element XLen array if only XLen alignment is
62
+ // Aggregates which are <= 4 * 32 will be passed in
63
+ // registers if possible, so coerce to integers.
64
+ if size as u64 <= MAX_ARG_IN_REGS_SIZE {
65
+ let alignment = arg. layout . align . abi . bits ( ) ;
66
+
67
+ // Use a single `xlen` int if possible, 2 * `xlen` if 2 * `xlen` alignment
68
+ // is required, and a 2-element `xlen` array if only `xlen` alignment is
58
69
// required.
59
- if arg_size . bits ( ) <= xlen {
60
- arg. cast_to ( Uniform { unit : Reg :: i32 ( ) , total : arg_size } ) ;
70
+ if size <= xlen {
71
+ arg. cast_to ( Reg :: i32 ( ) ) ;
61
72
return ;
62
- } else if alignment. bits ( ) == 2 * xlen {
63
- arg. cast_to ( Uniform { unit : Reg :: i64 ( ) , total : arg_size } ) ;
73
+ } else if alignment == 2 * xlen {
74
+ arg. cast_to ( Reg :: i64 ( ) ) ;
64
75
return ;
65
76
} else {
66
- arg. extend_integer_width_to ( ( arg_size. bits ( ) + xlen - 1 ) / xlen) ;
77
+ let total = Size :: from_bits ( ( ( size + xlen - 1 ) / xlen) * xlen) ;
78
+ arg. cast_to ( Uniform { unit : Reg :: i32 ( ) , total } ) ;
67
79
return ;
68
80
}
69
81
}
@@ -72,20 +84,15 @@ fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64, fixed: bool, remaini
72
84
}
73
85
74
86
pub fn compute_abi_info < Ty > ( fty : & mut FnAbi < ' _ , Ty > , xlen : u64 ) {
75
- if !fty. ret . is_ignore ( ) {
76
- classify_ret_ty ( & mut fty. ret , xlen) ;
77
- }
87
+ classify_ret_ty ( & mut fty. ret , xlen) ;
78
88
79
- let return_indirect =
89
+ let is_ret_indirect =
80
90
fty. ret . is_indirect ( ) || fty. ret . layout . size . bits ( ) > MAX_RET_IN_REGS_SIZE ;
81
91
82
- let mut remaining_gpr = if return_indirect { NUM_ARG_GPR - 1 } else { NUM_ARG_GPR } ;
92
+ let mut arg_gprs_left = if is_ret_indirect { NUM_ARG_GPRS - 1 } else { NUM_ARG_GPRS } ;
83
93
84
94
for arg in & mut fty. args {
85
- if arg. is_ignore ( ) {
86
- continue ;
87
- }
88
95
let fixed = true ;
89
- classify_arg_ty ( arg, xlen, fixed, & mut remaining_gpr ) ;
96
+ classify_arg_ty ( arg, xlen, fixed, & mut arg_gprs_left ) ;
90
97
}
91
98
}
0 commit comments