From 94c0ac4a1c663bccd76163db457d96dca0104ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Ber=C3=A1nek?= Date: Wed, 30 Apr 2025 09:19:28 +0200 Subject: [PATCH] Remove `cranelift-codegen-0.82.1` benchmark --- collector/compile-benchmarks/README.md | 2 - collector/compile-benchmarks/REUSE.toml | 5 - .../.cargo_vcs_info.json | 6 - .../cranelift-codegen-0.82.1/0-println.patch | 12 - .../cranelift-codegen-0.82.1/Cargo.lock | 966 -- .../cranelift-codegen-0.82.1/Cargo.toml | 107 - .../cranelift-codegen-0.82.1/Cargo.toml.orig | 103 - .../cranelift-codegen-0.82.1/LICENSE | 220 - .../cranelift-codegen-0.82.1/README.md | 2 - .../benches/x64-evex-encoding.rs | 55 - .../cranelift-codegen-0.82.1/build.rs | 495 - .../cranelift-codegen-0.82.1/perf-config.json | 4 - .../src/binemit/mod.rs | 95 - .../src/binemit/stack_map.rs | 152 - .../cranelift-codegen-0.82.1/src/bitset.rs | 163 - .../src/cfg_printer.rs | 83 - .../cranelift-codegen-0.82.1/src/clif.isle | 1700 -- .../src/constant_hash.rs | 62 - .../cranelift-codegen-0.82.1/src/context.rs | 352 - .../cranelift-codegen-0.82.1/src/cursor.rs | 665 - .../src/data_value.rs | 309 - .../cranelift-codegen-0.82.1/src/dbg.rs | 28 - .../cranelift-codegen-0.82.1/src/dce.rs | 36 - .../src/divconst_magic_numbers.rs | 1083 -- .../src/dominator_tree.rs | 837 - .../cranelift-codegen-0.82.1/src/flowgraph.rs | 350 - .../cranelift-codegen-0.82.1/src/fx.rs | 111 - .../src/inst_predicates.rs | 80 - .../src/ir/atomic_rmw_op.rs | 72 - .../src/ir/builder.rs | 265 - .../src/ir/condcodes.rs | 410 - .../src/ir/constant.rs | 460 - .../cranelift-codegen-0.82.1/src/ir/dfg.rs | 1324 -- .../src/ir/entities.rs | 533 - .../src/ir/extfunc.rs | 474 - .../src/ir/extname.rs | 167 - .../src/ir/function.rs | 356 - .../src/ir/globalvalue.rs | 159 - .../cranelift-codegen-0.82.1/src/ir/heap.rs | 67 - .../src/ir/immediates.rs | 1330 -- .../src/ir/instructions.rs | 924 -- .../src/ir/jumptable.rs | 128 - .../cranelift-codegen-0.82.1/src/ir/layout.rs | 1324 -- .../src/ir/libcall.rs | 215 - .../src/ir/memflags.rs | 169 - .../cranelift-codegen-0.82.1/src/ir/mod.rs | 102 - .../src/ir/progpoint.rs | 164 - .../src/ir/sourceloc.rs | 66 - .../src/ir/stackslot.rs | 125 - .../cranelift-codegen-0.82.1/src/ir/table.rs | 40 - .../src/ir/trapcode.rs | 134 - .../cranelift-codegen-0.82.1/src/ir/types.rs | 625 - .../src/isa/aarch64/abi.rs | 1346 -- .../src/isa/aarch64/inst.isle | 1999 --- .../src/isa/aarch64/inst/args.rs | 755 - .../src/isa/aarch64/inst/emit.rs | 2828 ---- .../src/isa/aarch64/inst/emit_tests.rs | 6742 -------- .../src/isa/aarch64/inst/imms.rs | 1257 -- .../src/isa/aarch64/inst/mod.rs | 3676 ----- .../src/isa/aarch64/inst/regs.rs | 351 - .../src/isa/aarch64/inst/unwind.rs | 2 - .../src/isa/aarch64/inst/unwind/systemv.rs | 177 - .../src/isa/aarch64/lower.isle | 1126 -- .../src/isa/aarch64/lower.rs | 1581 -- .../src/isa/aarch64/lower/isle.rs | 289 - .../lower/isle/generated_code.manifest | 4 - .../isa/aarch64/lower/isle/generated_code.rs | 6490 -------- .../src/isa/aarch64/lower_inst.rs | 2787 ---- .../src/isa/aarch64/mod.rs | 297 - .../src/isa/aarch64/settings.rs | 9 - .../src/isa/call_conv.rs | 148 - .../cranelift-codegen-0.82.1/src/isa/mod.rs | 359 - .../src/isa/s390x/abi.rs | 803 - .../src/isa/s390x/inst.isle | 3249 ---- .../src/isa/s390x/inst/args.rs | 267 - .../src/isa/s390x/inst/emit.rs | 2159 --- .../src/isa/s390x/inst/emit_tests.rs | 8278 ---------- .../src/isa/s390x/inst/imms.rs | 231 - .../src/isa/s390x/inst/mod.rs | 2884 ---- .../src/isa/s390x/inst/regs.rs | 168 - .../src/isa/s390x/inst/unwind.rs | 2 - .../src/isa/s390x/inst/unwind/systemv.rs | 199 - .../src/isa/s390x/lower.isle | 2210 --- .../src/isa/s390x/lower.rs | 448 - .../src/isa/s390x/lower/isle.rs | 533 - .../s390x/lower/isle/generated_code.manifest | 4 - .../isa/s390x/lower/isle/generated_code.rs | 13743 ---------------- .../src/isa/s390x/mod.rs | 307 - .../src/isa/s390x/settings.rs | 9 - .../src/isa/unwind.rs | 182 - .../src/isa/unwind/systemv.rs | 271 - .../src/isa/unwind/winx64.rs | 334 - .../src/isa/x64/abi.rs | 1041 -- .../src/isa/x64/encoding/evex.rs | 403 - .../src/isa/x64/encoding/mod.rs | 60 - .../src/isa/x64/encoding/rex.rs | 506 - .../src/isa/x64/encoding/vex.rs | 2 - .../src/isa/x64/inst.isle | 2579 --- .../src/isa/x64/inst/args.rs | 1862 --- .../src/isa/x64/inst/emit.rs | 2915 ---- .../src/isa/x64/inst/emit_tests.rs | 4747 ------ .../src/isa/x64/inst/mod.rs | 3053 ---- .../src/isa/x64/inst/regs.rs | 304 - .../src/isa/x64/inst/unwind.rs | 5 - .../src/isa/x64/inst/unwind/systemv.rs | 201 - .../src/isa/x64/inst/unwind/winx64.rs | 16 - .../src/isa/x64/lower.isle | 2252 --- .../src/isa/x64/lower.rs | 4022 ----- .../src/isa/x64/lower/isle.rs | 572 - .../x64/lower/isle/generated_code.manifest | 4 - .../src/isa/x64/lower/isle/generated_code.rs | 9338 ----------- .../src/isa/x64/mod.rs | 367 - .../src/isa/x64/settings.rs | 9 - .../cranelift-codegen-0.82.1/src/iterators.rs | 93 - .../src/legalizer/globalvalue.rs | 127 - .../src/legalizer/heap.rs | 259 - .../src/legalizer/mod.rs | 363 - .../src/legalizer/table.rs | 87 - .../cranelift-codegen-0.82.1/src/lib.rs | 119 - .../cranelift-codegen-0.82.1/src/licm.rs | 245 - .../cranelift-codegen-0.82.1/src/log.rs | 39 - .../src/loop_analysis.rs | 349 - .../src/machinst/abi.rs | 245 - .../src/machinst/abi_impl.rs | 1736 -- .../src/machinst/blockorder.rs | 634 - .../src/machinst/buffer.rs | 2096 --- .../src/machinst/compile.rs | 114 - .../src/machinst/debug.rs | 525 - .../src/machinst/helpers.rs | 43 - .../src/machinst/inst_common.rs | 94 - .../src/machinst/isle.rs | 545 - .../src/machinst/lower.rs | 1375 -- .../src/machinst/mod.rs | 423 - .../src/machinst/regmapping.rs | 108 - .../src/machinst/valueregs.rs | 130 - .../src/machinst/vcode.rs | 962 -- .../src/nan_canonicalization.rs | 106 - .../cranelift-codegen-0.82.1/src/prelude.isle | 562 - .../src/preopt.peepmatic | 196 - .../src/print_errors.rs | 220 - .../src/remove_constant_phis.rs | 391 - .../cranelift-codegen-0.82.1/src/result.rs | 76 - .../src/scoped_hash_map.rs | 233 - .../cranelift-codegen-0.82.1/src/settings.rs | 581 - .../src/simple_gvn.rs | 152 - .../src/simple_preopt.rs | 1054 -- .../src/souper_harvest.rs | 581 - .../cranelift-codegen-0.82.1/src/timing.rs | 260 - .../src/unreachable_code.rs | 58 - .../src/value_label.rs | 69 - .../src/verifier/flags.rs | 161 - .../src/verifier/mod.rs | 1885 --- .../cranelift-codegen-0.82.1/src/write.rs | 697 - 153 files changed, 138471 deletions(-) delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/.cargo_vcs_info.json delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/0-println.patch delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.lock delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml.orig delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/LICENSE delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/README.md delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/benches/x64-evex-encoding.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/build.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/perf-config.json delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/stack_map.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/bitset.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cfg_printer.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/clif.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/constant_hash.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/context.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cursor.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/data_value.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dbg.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dce.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/divconst_magic_numbers.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dominator_tree.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/flowgraph.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/fx.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/inst_predicates.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/atomic_rmw_op.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/builder.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/condcodes.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/constant.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/dfg.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/entities.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extfunc.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extname.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/function.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/globalvalue.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/heap.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/immediates.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/instructions.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/jumptable.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/layout.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/libcall.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/memflags.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/progpoint.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/sourceloc.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/stackslot.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/table.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/trapcode.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/types.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/abi.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/args.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/emit.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/emit_tests.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/imms.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/regs.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/unwind.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/unwind/systemv.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/lower.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/lower.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/lower/isle.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/lower/isle/generated_code.manifest delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/lower/isle/generated_code.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/lower_inst.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/settings.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/call_conv.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/abi.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/args.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/emit.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/emit_tests.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/imms.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/regs.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/unwind.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/inst/unwind/systemv.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/lower.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/lower.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/lower/isle.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/lower/isle/generated_code.manifest delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/lower/isle/generated_code.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/s390x/settings.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/unwind.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/unwind/systemv.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/unwind/winx64.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/abi.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/encoding/evex.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/encoding/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/encoding/rex.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/encoding/vex.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/args.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/emit.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/emit_tests.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/regs.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/unwind.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/unwind/systemv.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/inst/unwind/winx64.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/lower.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/lower.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/lower/isle.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/lower/isle/generated_code.manifest delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/lower/isle/generated_code.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/x64/settings.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/iterators.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/legalizer/globalvalue.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/legalizer/heap.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/legalizer/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/legalizer/table.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/lib.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/licm.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/log.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/loop_analysis.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/abi.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/abi_impl.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/blockorder.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/buffer.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/compile.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/debug.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/helpers.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/inst_common.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/isle.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/lower.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/regmapping.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/valueregs.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/machinst/vcode.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/nan_canonicalization.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/prelude.isle delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/preopt.peepmatic delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/print_errors.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/remove_constant_phis.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/result.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/scoped_hash_map.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/settings.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/simple_gvn.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/simple_preopt.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/souper_harvest.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/timing.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/unreachable_code.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/value_label.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/verifier/flags.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/verifier/mod.rs delete mode 100644 collector/compile-benchmarks/cranelift-codegen-0.82.1/src/write.rs diff --git a/collector/compile-benchmarks/README.md b/collector/compile-benchmarks/README.md index 3e8164423..0bc715bc6 100644 --- a/collector/compile-benchmarks/README.md +++ b/collector/compile-benchmarks/README.md @@ -22,8 +22,6 @@ They mostly consist of real-world crates. part of the Rust ecosystem. - **clap-3.1.6**: A command line argument parser library. A crate used by many Rust programs. - **clap_derive-4.5.32**: The proc macro sub-crate of a command line argument parser library that is used by many Rust programs. -- **cranelift-codegen-0.82.1**: The largest crate from a code generator. Used by - wasmtime. Stresses obligation processing. - **cranelift-codegen-0.119.0**: The largest crate from a code generator. Used by wasmtime. Stresses obligation processing. - **diesel-1.4.8**: A type-safe SQL query builder. Utilizes the type system to ensure a lot of invariants. Stresses anything related to resolving trait bounds, by having a lot of trait impls for a large number of different types. - **diesel-2.2:10**: A type-safe SQL query builder. Utilizes the type system to ensure a lot of invariants. Stresses anything related to resolving trait bounds, by having a lot of trait impls for a large number of different types. diff --git a/collector/compile-benchmarks/REUSE.toml b/collector/compile-benchmarks/REUSE.toml index bc9a75cca..4ae8bf6a2 100644 --- a/collector/compile-benchmarks/REUSE.toml +++ b/collector/compile-benchmarks/REUSE.toml @@ -42,11 +42,6 @@ path = "coercions/**" SPDX-FileCopyrightText = "The Rust Project Developers (see https://thanks.rust-lang.org)" SPDX-License-Identifier = "MIT" -[[annotations]] -path = "cranelift-codegen-0.82.1/**" -SPDX-FileCopyrightText = "The Cranelift Project Developers" -SPDX-License-Identifier = "Apache-2.0 WITH LLVM-exception" - [[annotations]] path = "cranelift-codegen-0.119.0/**" SPDX-FileCopyrightText = "The Cranelift Project Developers" diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/.cargo_vcs_info.json b/collector/compile-benchmarks/cranelift-codegen-0.82.1/.cargo_vcs_info.json deleted file mode 100644 index 9bc20ffe4..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/.cargo_vcs_info.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "git": { - "sha1": "5b09b74f4c5e0fd817febd3263947ee3682759bd" - }, - "path_in_vcs": "cranelift/codegen" -} \ No newline at end of file diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/0-println.patch b/collector/compile-benchmarks/cranelift-codegen-0.82.1/0-println.patch deleted file mode 100644 index 970f57deb..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/0-println.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/src/loop_analysis.rs b/src/loop_analysis.rs -index 0e8715ae..4e44b2bd 100644 ---- a/src/loop_analysis.rs -+++ b/src/loop_analysis.rs -@@ -163,6 +163,7 @@ impl LoopAnalysis { - domtree: &DominatorTree, - layout: &Layout, - ) { -+ println!("testing"); - let mut stack: Vec = Vec::new(); - // We handle each loop header in reverse order, corresponding to a pseudo postorder - // traversal of the graph. diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.lock b/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.lock deleted file mode 100644 index 1706bd2d5..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.lock +++ /dev/null @@ -1,966 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "ahash" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" - -[[package]] -name = "aho-corasick" -version = "0.7.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" -dependencies = [ - "memchr", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "backtrace" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - -[[package]] -name = "bumpalo" -version = "3.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" - -[[package]] -name = "cast" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" -dependencies = [ - "rustc_version", -] - -[[package]] -name = "cc" -version = "1.0.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "bitflags", - "textwrap 0.11.0", - "unicode-width", -] - -[[package]] -name = "cranelift-bforest" -version = "0.82.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16922317bd7dd104d509a373887822caa0242fc1def00de66abb538db221db4" -dependencies = [ - "cranelift-entity", -] - -[[package]] -name = "cranelift-codegen" -version = "0.82.1" -dependencies = [ - "bincode", - "cranelift-bforest", - "cranelift-codegen-meta", - "cranelift-codegen-shared", - "cranelift-entity", - "cranelift-isle", - "criterion", - "gimli", - "hashbrown 0.9.1", - "log", - "miette", - "regalloc", - "serde", - "smallvec", - "souper-ir", - "target-lexicon", -] - -[[package]] -name = "cranelift-codegen-meta" -version = "0.82.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703d0ed7d3bc6c7a814ca12858175bf4e93167a3584127858c686e4b5dd6e432" -dependencies = [ - "cranelift-codegen-shared", -] - -[[package]] -name = "cranelift-codegen-shared" -version = "0.82.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80f52311e1c90de12dcf8c4b9999c6ebfd1ed360373e88c357160936844511f6" - -[[package]] -name = "cranelift-entity" -version = "0.82.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bc82ef522c1f643baf7d4d40b7c52643ee4549d8960b0e6a047daacb83f897" -dependencies = [ - "serde", -] - -[[package]] -name = "cranelift-isle" -version = "0.82.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f787aaa065700a108f9f62764f5e1224f97ff8a2e2f86ddf7071ab2981639c" -dependencies = [ - "log", - "miette", - "thiserror", -] - -[[package]] -name = "criterion" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" -dependencies = [ - "atty", - "cast", - "clap", - "criterion-plot", - "csv", - "itertools", - "lazy_static", - "num-traits", - "oorandom", - "plotters", - "rayon", - "regex", - "serde", - "serde_cbor", - "serde_derive", - "serde_json", - "tinytemplate", - "walkdir", -] - -[[package]] -name = "criterion-plot" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" -dependencies = [ - "cast", - "itertools", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "lazy_static", - "memoffset", - "scopeguard", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" -dependencies = [ - "cfg-if", - "lazy_static", -] - -[[package]] -name = "csv" -version = "1.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" -dependencies = [ - "bstr", - "csv-core", - "itoa 0.4.8", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" -dependencies = [ - "memchr", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "gimli" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" -dependencies = [ - "indexmap", -] - -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "id-arena" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" - -[[package]] -name = "indexmap" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" -dependencies = [ - "autocfg", - "hashbrown 0.11.2", -] - -[[package]] -name = "is_ci" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616cde7c720bb2bb5824a224687d8f77bfd38922027f01d825cd7453be5099fb" - -[[package]] -name = "itertools" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - -[[package]] -name = "itoa" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" - -[[package]] -name = "js-sys" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.121" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "memoffset" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" -dependencies = [ - "autocfg", -] - -[[package]] -name = "miette" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2adcfcced5d625bf90a958a82ae5b93231f57f3df1383fee28c9b5096d35ed" -dependencies = [ - "atty", - "backtrace", - "miette-derive", - "once_cell", - "owo-colors", - "supports-color", - "supports-hyperlinks", - "supports-unicode", - "terminal_size", - "textwrap 0.14.2", - "thiserror", -] - -[[package]] -name = "miette-derive" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a8b61312d367ce87956bb686731f87e4c6dd5dbc550e8f06e3c24fb1f67f" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "num-traits" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" - -[[package]] -name = "oorandom" -version = "11.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" - -[[package]] -name = "owo-colors" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e72e30578e0d0993c8ae20823dd9cff2bc5517d2f586a8aef462a581e8a03eb" - -[[package]] -name = "plotters" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" -dependencies = [ - "num-traits", - "plotters-backend", - "plotters-svg", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "plotters-backend" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" - -[[package]] -name = "plotters-svg" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" -dependencies = [ - "plotters-backend", -] - -[[package]] -name = "proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4af2ec4714533fcdf07e886f17025ace8b997b9ce51204ee69b6da831c3da57" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rayon" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" -dependencies = [ - "autocfg", - "crossbeam-deque", - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-utils", - "lazy_static", - "num_cpus", -] - -[[package]] -name = "regalloc" -version = "0.0.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62446b1d3ebf980bdc68837700af1d77b37bc430e524bf95319c6eada2a4cc02" -dependencies = [ - "log", - "rustc-hash", - "serde", - "smallvec", -] - -[[package]] -name = "regex" -version = "1.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - -[[package]] -name = "regex-syntax" -version = "0.6.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" - -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc_version" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] - -[[package]] -name = "ryu" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "semver" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d" - -[[package]] -name = "serde" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" -dependencies = [ - "itoa 1.0.1", - "ryu", - "serde", -] - -[[package]] -name = "smallvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" - -[[package]] -name = "smawk" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043" - -[[package]] -name = "souper-ir" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50c18ce33988e1973003afbaa66e6a465ad7a614dc33f246879ccc209c2c044" -dependencies = [ - "id-arena", -] - -[[package]] -name = "supports-color" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4872ced36b91d47bae8a214a683fe54e7078875b399dfa251df346c9b547d1f9" -dependencies = [ - "atty", - "is_ci", -] - -[[package]] -name = "supports-hyperlinks" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "590b34f7c5f01ecc9d78dba4b3f445f31df750a67621cf31626f3b7441ce6406" -dependencies = [ - "atty", -] - -[[package]] -name = "supports-unicode" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8b945e45b417b125a8ec51f1b7df2f8df7920367700d1f98aedd21e5735f8b2" -dependencies = [ - "atty", -] - -[[package]] -name = "syn" -version = "1.0.89" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea297be220d52398dcc07ce15a209fce436d361735ac1db700cab3b6cdfb9f54" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "target-lexicon" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fa7e55043acb85fca6b3c01485a2eeb6b69c5d21002e273c79e465f43b7ac1" - -[[package]] -name = "terminal_size" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "textwrap" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" -dependencies = [ - "smawk", - "unicode-linebreak", - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tinytemplate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "unicode-linebreak" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a52dcaab0c48d931f7cc8ef826fa51690a08e1ea55117ef26f89864f532383f" -dependencies = [ - "regex", -] - -[[package]] -name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "walkdir" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi", - "winapi-util", -] - -[[package]] -name = "wasm-bindgen" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" - -[[package]] -name = "web-sys" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml b/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml deleted file mode 100644 index 73f3b7a00..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml +++ /dev/null @@ -1,107 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2018" -name = "cranelift-codegen" -version = "0.82.1" -authors = ["The Cranelift Project Developers"] -build = "build.rs" -description = "Low-level code generator library" -documentation = "https://docs.rs/cranelift-codegen" -readme = "README.md" -keywords = ["compile", "compiler", "jit"] -categories = ["no-std"] -license = "Apache-2.0 WITH LLVM-exception" -repository = "https://github.com/bytecodealliance/wasmtime" -resolver = "2" - -[[bench]] -name = "x64-evex-encoding" -harness = false -[dependencies.bincode] -version = "1.2.1" -optional = true - -[dependencies.cranelift-bforest] -version = "0.82.1" - -[dependencies.cranelift-codegen-shared] -version = "0.82.1" - -[dependencies.cranelift-entity] -version = "0.82.1" - -[dependencies.gimli] -version = "0.26.0" -features = ["write"] -optional = true -default-features = false - -[dependencies.hashbrown] -version = "0.9.1" -optional = true - -[dependencies.log] -version = "0.4.6" -default-features = false - -[dependencies.regalloc] -version = "0.0.34" - -[dependencies.serde] -version = "1.0.94" -features = ["derive"] -optional = true - -[dependencies.smallvec] -version = "1.6.1" - -[dependencies.souper-ir] -version = "2.1.0" -optional = true - -[dependencies.target-lexicon] -version = "0.12" -[dev-dependencies.criterion] -version = "0.3" -[build-dependencies.cranelift-codegen-meta] -version = "0.82.1" - -[build-dependencies.cranelift-isle] -version = "=0.82.1" -optional = true - -[build-dependencies.miette] -version = "3" -features = ["fancy"] -optional = true - -[features] -all-arch = ["x86", "arm64", "s390x"] -arm64 = [] -completely-skip-isle-for-ci-deterministic-check = [] -core = ["hashbrown"] -default = ["std", "unwind"] -enable-serde = ["serde", "regalloc/enable-serde", "cranelift-entity/enable-serde"] -experimental_x64 = [] -rebuild-isle = ["cranelift-isle", "miette", "cranelift-codegen-meta/rebuild-isle"] -regalloc-snapshot = ["bincode", "regalloc/enable-serde"] -s390x = [] -souper-harvest = ["souper-ir", "souper-ir/stringify"] -std = [] -testing_hooks = [] -unwind = ["gimli"] -x86 = [] -[badges.maintenance] -status = "experimental" - -[workspace] diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml.orig b/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml.orig deleted file mode 100644 index de3fabfee..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/Cargo.toml.orig +++ /dev/null @@ -1,103 +0,0 @@ -[package] -authors = ["The Cranelift Project Developers"] -name = "cranelift-codegen" -version = "0.82.1" -description = "Low-level code generator library" -license = "Apache-2.0 WITH LLVM-exception" -documentation = "https://docs.rs/cranelift-codegen" -repository = "https://github.com/bytecodealliance/wasmtime" -categories = ["no-std"] -readme = "README.md" -keywords = ["compile", "compiler", "jit"] -build = "build.rs" -edition = "2018" - -[dependencies] -cranelift-codegen-shared = { path = "./shared", version = "0.82.1" } -cranelift-entity = { path = "../entity", version = "0.82.1" } -cranelift-bforest = { path = "../bforest", version = "0.82.1" } -hashbrown = { version = "0.9.1", optional = true } -target-lexicon = "0.12" -log = { version = "0.4.6", default-features = false } -serde = { version = "1.0.94", features = ["derive"], optional = true } -bincode = { version = "1.2.1", optional = true } -gimli = { version = "0.26.0", default-features = false, features = ["write"], optional = true } -smallvec = { version = "1.6.1" } -regalloc = "0.0.34" -souper-ir = { version = "2.1.0", optional = true } -# It is a goal of the cranelift-codegen crate to have minimal external dependencies. -# Please don't add any unless they are essential to the task of creating binary -# machine code. Integration tests that need external dependencies can be -# accomodated in `tests`. - -[dev-dependencies] -criterion = "0.3" - -[build-dependencies] -cranelift-codegen-meta = { path = "meta", version = "0.82.1" } -cranelift-isle = { path = "../isle/isle", version = "=0.82.1", optional = true } -miette = { version = "3", features = ["fancy"], optional = true } - -[features] -default = ["std", "unwind"] - -# The "std" feature enables use of libstd. The "core" feature enables use -# of some minimal std-like replacement libraries. At least one of these two -# features need to be enabled. -std = [] - -# The "core" features enables use of "hashbrown" since core doesn't have -# a HashMap implementation, and a workaround for Cargo #4866. -core = ["hashbrown"] - -# This enables some additional functions useful for writing tests, but which -# can significantly increase the size of the library. -testing_hooks = [] - -# This enables unwind info generation functionality. -unwind = ["gimli"] - -# ISA targets for which we should build. -# If no ISA targets are explicitly enabled, the ISA target for the host machine is enabled. -x86 = [] -arm64 = [] -s390x = [] - -# Stub feature that does nothing, for Cargo-features compatibility: the new -# backend is the default now. -experimental_x64 = [] - -# Option to enable all architectures. -all-arch = [ - "x86", - "arm64", - "s390x" -] - -# For dependent crates that want to serialize some parts of cranelift -enable-serde = [ - "serde", - "regalloc/enable-serde", - "cranelift-entity/enable-serde", -] - -# Allow snapshotting regalloc test cases. Useful only to report bad register -# allocation failures, or for regalloc.rs developers. -regalloc-snapshot = ["bincode", "regalloc/enable-serde"] - -# Enable support for the Souper harvester. -souper-harvest = ["souper-ir", "souper-ir/stringify"] - -# Recompile ISLE DSL source files into their generated Rust code. -rebuild-isle = ["cranelift-isle", "miette", "cranelift-codegen-meta/rebuild-isle"] - -# A hack to skip the ISLE-rebuild logic when testing for determinism -# with the "Meta deterministic check" CI job. -completely-skip-isle-for-ci-deterministic-check = [] - -[badges] -maintenance = { status = "experimental" } - -[[bench]] -name = "x64-evex-encoding" -harness = false diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/LICENSE b/collector/compile-benchmarks/cranelift-codegen-0.82.1/LICENSE deleted file mode 100644 index f9d81955f..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/LICENSE +++ /dev/null @@ -1,220 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ---- LLVM Exceptions to the Apache 2.0 License ---- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into an Object form of such source code, you -may redistribute such embedded portions in such Object form without complying -with the conditions of Sections 4(a), 4(b) and 4(d) of the License. - -In addition, if you combine or link compiled forms of this Software with -software that is licensed under the GPLv2 ("Combined Software") and if a -court of competent jurisdiction determines that the patent provision (Section -3), the indemnity provision (Section 9) or other Section of the License -conflicts with the conditions of the GPLv2, you may retroactively and -prospectively choose to deem waived or otherwise exclude such Section(s) of -the License, but only in their entirety and only with respect to the Combined -Software. - diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/README.md b/collector/compile-benchmarks/cranelift-codegen-0.82.1/README.md deleted file mode 100644 index 18b9756aa..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This crate contains the core Cranelift code generator. It translates code from an -intermediate representation into executable machine code. diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/benches/x64-evex-encoding.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/benches/x64-evex-encoding.rs deleted file mode 100644 index 550e88a0d..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/benches/x64-evex-encoding.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Measure instruction encoding latency using various approaches; the -//! benchmarking is feature-gated on `x86` since it only measures the encoding -//! mechanism of that backend. - -#[cfg(feature = "x86")] -mod x86 { - use cranelift_codegen::isa::x64::encoding::{ - evex::{EvexInstruction, EvexVectorLength, Register}, - rex::{LegacyPrefixes, OpcodeMap}, - }; - use criterion::{criterion_group, Criterion}; - - // Define the benchmarks. - fn x64_evex_encoding_benchmarks(c: &mut Criterion) { - let mut group = c.benchmark_group("x64 EVEX encoding"); - let rax = Register::from(0); - let rdx = Register::from(2); - - group.bench_function("EvexInstruction (builder pattern)", |b| { - let mut sink = vec![]; - b.iter(|| { - sink.clear(); - EvexInstruction::new() - .prefix(LegacyPrefixes::_66) - .map(OpcodeMap::_0F38) - .w(true) - .opcode(0x1F) - .reg(rax) - .rm(rdx) - .length(EvexVectorLength::V128) - .encode(&mut sink); - }); - }); - } - criterion_group!(benches, x64_evex_encoding_benchmarks); - - /// Using an inner module to feature-gate the benchmarks means that we must - /// manually specify how to run the benchmarks (see `criterion_main!`). - pub fn run_benchmarks() { - criterion::__warn_about_html_reports_feature(); - criterion::__warn_about_cargo_bench_support_feature(); - benches(); - Criterion::default().configure_from_args().final_summary(); - } -} - -fn main() { - #[cfg(feature = "x86")] - x86::run_benchmarks(); - - #[cfg(not(feature = "x86"))] - println!( - "Unable to run the x64-evex-encoding benchmark; the `x86` feature must be enabled in Cargo.", - ); -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/build.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/build.rs deleted file mode 100644 index 1b95bf02a..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/build.rs +++ /dev/null @@ -1,495 +0,0 @@ -// Build script. -// -// This program is run by Cargo when building cranelift-codegen. It is used to generate Rust code from -// the language definitions in the cranelift-codegen/meta directory. -// -// Environment: -// -// OUT_DIR -// Directory where generated files should be placed. -// -// TARGET -// Target triple provided by Cargo. -// -// The build script expects to be run from the directory where this build.rs file lives. The -// current directory is used to find the sources. - -use cranelift_codegen_meta as meta; - -use std::env; -use std::io::Read; -use std::process; -use std::time::Instant; - -fn main() { - let start_time = Instant::now(); - - let out_dir = env::var("OUT_DIR").expect("The OUT_DIR environment variable must be set"); - let target_triple = env::var("TARGET").expect("The TARGET environment variable must be set"); - - let isa_targets = meta::isa::Isa::all() - .iter() - .cloned() - .filter(|isa| { - let env_key = format!("CARGO_FEATURE_{}", isa.to_string().to_uppercase()); - env::var(env_key).is_ok() - }) - .collect::>(); - - let isas = if isa_targets.is_empty() { - // Try to match native target. - let target_name = target_triple.split('-').next().unwrap(); - let isa = meta::isa_from_arch(&target_name).expect("error when identifying target"); - println!("cargo:rustc-cfg=feature=\"{}\"", isa); - vec![isa] - } else { - isa_targets - }; - - let cur_dir = env::current_dir().expect("Can't access current working directory"); - let crate_dir = cur_dir.as_path(); - - println!("cargo:rerun-if-changed=build.rs"); - - if let Err(err) = meta::generate(&isas, &out_dir, crate_dir) { - eprintln!("Error: {}", err); - process::exit(1); - } - - if env::var("CRANELIFT_VERBOSE").is_ok() { - for isa in &isas { - println!("cargo:warning=Includes support for {} ISA", isa.to_string()); - } - println!( - "cargo:warning=Build step took {:?}.", - Instant::now() - start_time - ); - println!("cargo:warning=Generated files are in {}", out_dir); - } - - // The "Meta deterministic check" CI job runs this build script N - // times to ensure it produces the same output - // consistently. However, it runs the script in a fresh directory, - // without any of the source tree present; this breaks our - // manifest check (we need the ISLE source to be present). To keep - // things simple, we just disable all ISLE-related logic for this - // specific CI job. - #[cfg(not(feature = "completely-skip-isle-for-ci-deterministic-check"))] - { - maybe_rebuild_isle(crate_dir).expect("Unhandled failure in ISLE rebuild"); - } - - let pkg_version = env::var("CARGO_PKG_VERSION").unwrap(); - let mut cmd = std::process::Command::new("git"); - cmd.arg("rev-parse") - .arg("HEAD") - .stdout(std::process::Stdio::piped()) - .current_dir(env::var("CARGO_MANIFEST_DIR").unwrap()); - let version = if let Ok(mut child) = cmd.spawn() { - let mut git_rev = String::new(); - child - .stdout - .as_mut() - .unwrap() - .read_to_string(&mut git_rev) - .unwrap(); - let status = child.wait().unwrap(); - if status.success() { - let git_rev = git_rev.trim().chars().take(9).collect::(); - format!("{}-{}", pkg_version, git_rev) - } else { - // not a git repo - pkg_version - } - } else { - // git not available - pkg_version - }; - std::fs::write( - std::path::Path::new(&out_dir).join("version.rs"), - format!( - "/// Version number of this crate. \n\ - pub const VERSION: &str = \"{}\";", - version - ), - ) - .unwrap(); -} - -/// Strip the current directory from the file paths, because `islec` -/// includes them in the generated source, and this helps us maintain -/// deterministic builds that don't include those local file paths. -fn make_isle_source_path_relative( - cur_dir: &std::path::PathBuf, - filename: std::path::PathBuf, -) -> std::path::PathBuf { - if let Ok(suffix) = filename.strip_prefix(&cur_dir) { - suffix.to_path_buf() - } else { - filename - } -} - -/// A list of compilations (transformations from ISLE source to -/// generated Rust source) that exist in the repository. -/// -/// This list is used either to regenerate the Rust source in-tree (if -/// the `rebuild-isle` feature is enabled), or to verify that the ISLE -/// source in-tree corresponds to the ISLE source that was last used -/// to rebuild the Rust source (if the `rebuild-isle` feature is not -/// enabled). -#[derive(Clone, Debug)] -struct IsleCompilations { - items: Vec, -} - -#[derive(Clone, Debug)] -struct IsleCompilation { - output: std::path::PathBuf, - inputs: Vec, -} - -impl IsleCompilation { - /// Compute the manifest filename for the given generated Rust file. - fn manifest_filename(&self) -> std::path::PathBuf { - self.output.with_extension("manifest") - } - - /// Compute the content of the source manifest for all ISLE source - /// files that go into the compilation of one Rust file. - /// - /// We store this alongside the `.rs` file as - /// `.manifest` and use it to verify that a - /// rebuild was done if necessary. - fn compute_manifest(&self) -> Result> { - // We use the deprecated SipHasher from std::hash in order to verify - // that ISLE sources haven't changed since the generated source was - // last regenerated. - // - // We use this instead of a stronger and more usual content hash, like - // SHA-{160,256,512}, because it's built into the standard library and - // we don't want to pull in a separate crate. We try to keep Cranelift - // crate dependencies as intentionally small as possible. In fact, we - // used to use the `sha2` crate for SHA-512 and this turns out to be - // undesirable for downstream consumers (see #3609). - // - // Why not the recommended replacement - // `std::collections::hash_map::DefaultHasher`? Because we need the - // hash to be deterministic, both between runs (i.e., not seeded with - // random state) and across Rust versions. - // - // If `SipHasher` is ever actually removed from `std`, we'll need to - // find a new option, either a very small crate or something else - // that's built-in. - #![allow(deprecated)] - use std::fmt::Write; - use std::hash::{Hasher, SipHasher}; - - let mut manifest = String::new(); - - for filename in &self.inputs { - // Our source must be valid UTF-8 for this to work, else user - // will get an error on build. This is not expected to be an - // issue. - let content = std::fs::read_to_string(filename)?; - // On Windows, source is checked out with line-endings changed - // to `\r\n`; canonicalize the source that we hash to - // Unix-style (`\n`) so hashes will match. - let content = content.replace("\r\n", "\n"); - // One line in the manifest: . - let mut hasher = SipHasher::new_with_keys(0, 0); // fixed keys for determinism - hasher.write(content.as_bytes()); - let filename = format!("{}", filename.display()).replace("\\", "/"); - writeln!(&mut manifest, "{} {:x}", filename, hasher.finish())?; - } - - Ok(manifest) - } -} - -/// Construct the list of compilations (transformations from ISLE -/// source to generated Rust source) that exist in the repository. -fn get_isle_compilations(crate_dir: &std::path::Path) -> Result { - let cur_dir = std::env::current_dir()?; - - let clif_isle = - make_isle_source_path_relative(&cur_dir, crate_dir.join("src").join("clif.isle")); - let prelude_isle = - make_isle_source_path_relative(&cur_dir, crate_dir.join("src").join("prelude.isle")); - let src_isa_x64 = - make_isle_source_path_relative(&cur_dir, crate_dir.join("src").join("isa").join("x64")); - let src_isa_aarch64 = - make_isle_source_path_relative(&cur_dir, crate_dir.join("src").join("isa").join("aarch64")); - let src_isa_s390x = - make_isle_source_path_relative(&cur_dir, crate_dir.join("src").join("isa").join("s390x")); - - // This is a set of ISLE compilation units. - // - // The format of each entry is: - // - // (output Rust code file, input ISLE source files) - // - // There should be one entry for each backend that uses ISLE for lowering, - // and if/when we replace our peephole optimization passes with ISLE, there - // should be an entry for each of those as well. - Ok(IsleCompilations { - items: vec![ - // The x86-64 instruction selector. - IsleCompilation { - output: src_isa_x64 - .join("lower") - .join("isle") - .join("generated_code.rs"), - inputs: vec![ - clif_isle.clone(), - prelude_isle.clone(), - src_isa_x64.join("inst.isle"), - src_isa_x64.join("lower.isle"), - ], - }, - // The aarch64 instruction selector. - IsleCompilation { - output: src_isa_aarch64 - .join("lower") - .join("isle") - .join("generated_code.rs"), - inputs: vec![ - clif_isle.clone(), - prelude_isle.clone(), - src_isa_aarch64.join("inst.isle"), - src_isa_aarch64.join("lower.isle"), - ], - }, - // The s390x instruction selector. - IsleCompilation { - output: src_isa_s390x - .join("lower") - .join("isle") - .join("generated_code.rs"), - inputs: vec![ - clif_isle.clone(), - prelude_isle.clone(), - src_isa_s390x.join("inst.isle"), - src_isa_s390x.join("lower.isle"), - ], - }, - ], - }) -} - -/// Check the manifest for the ISLE generated code, which documents -/// what ISLE source went into generating the Rust, and if there is a -/// mismatch, either invoke the ISLE compiler (if we have the -/// `rebuild-isle` feature) or exit with an error (if not). -/// -/// We do this by computing a hash of the ISLE source and checking it -/// against a "manifest" that is also checked into git, alongside the -/// generated Rust. -/// -/// (Why not include the `rebuild-isle` feature by default? Because -/// the build process must not modify the checked-in source by -/// default; any checked-in source is a human-managed bit of data, and -/// we can only act as an agent of the human developer when explicitly -/// requested to do so. This manifest check is a middle ground that -/// ensures this explicit control while also avoiding the easy footgun -/// of "I changed the ISLE, why isn't the compiler updated?!".) -fn maybe_rebuild_isle( - crate_dir: &std::path::Path, -) -> Result<(), Box> { - let isle_compilations = get_isle_compilations(crate_dir)?; - let mut rebuild_compilations = vec![]; - - for compilation in &isle_compilations.items { - for file in &compilation.inputs { - println!("cargo:rerun-if-changed={}", file.display()); - } - - let manifest = - std::fs::read_to_string(compilation.manifest_filename()).unwrap_or(String::new()); - // Canonicalize Windows line-endings into Unix line-endings in - // the manifest text itself. - let manifest = manifest.replace("\r\n", "\n"); - let expected_manifest = compilation.compute_manifest()?.replace("\r\n", "\n"); - if manifest != expected_manifest { - rebuild_compilations.push((compilation, expected_manifest)); - } - } - - #[cfg(feature = "rebuild-isle")] - { - if !rebuild_compilations.is_empty() { - set_miette_hook(); - } - let mut had_error = false; - for (compilation, expected_manifest) in rebuild_compilations { - if let Err(e) = rebuild_isle(compilation, &expected_manifest) { - eprintln!("Error building ISLE files: {:?}", e); - let mut source = e.source(); - while let Some(e) = source { - eprintln!("{:?}", e); - source = e.source(); - } - had_error = true; - } - } - - if had_error { - std::process::exit(1); - } - } - - #[cfg(not(feature = "rebuild-isle"))] - { - if !rebuild_compilations.is_empty() { - for (compilation, _) in rebuild_compilations { - eprintln!(""); - eprintln!( - "Error: the ISLE source files that resulted in the generated Rust source" - ); - eprintln!(""); - eprintln!(" * {}", compilation.output.display()); - eprintln!(""); - eprintln!( - "have changed but the generated source was not rebuilt! These ISLE source" - ); - eprintln!("files are:"); - eprintln!(""); - for file in &compilation.inputs { - eprintln!(" * {}", file.display()); - } - } - - eprintln!(""); - eprintln!("Please add `--features rebuild-isle` to your `cargo build` command"); - eprintln!("if you wish to rebuild the generated source, then include these changes"); - eprintln!("in any git commits you make that include the changes to the ISLE."); - eprintln!(""); - eprintln!("For example:"); - eprintln!(""); - eprintln!(" $ cargo build -p cranelift-codegen --features rebuild-isle"); - eprintln!(""); - eprintln!("(This build script cannot do this for you by default because we cannot"); - eprintln!("modify checked-into-git source without your explicit opt-in.)"); - eprintln!(""); - std::process::exit(1); - } - } - - Ok(()) -} - -#[cfg(feature = "rebuild-isle")] -fn set_miette_hook() { - use std::sync::Once; - static SET_MIETTE_HOOK: Once = Once::new(); - SET_MIETTE_HOOK.call_once(|| { - let _ = miette::set_hook(Box::new(|_| { - Box::new( - miette::MietteHandlerOpts::new() - // This is necessary for `miette` to properly display errors - // until https://github.com/zkat/miette/issues/93 is fixed. - .force_graphical(true) - .build(), - ) - })); - }); -} - -/// Rebuild ISLE DSL source text into generated Rust code. -/// -/// NB: This must happen *after* the `cranelift-codegen-meta` functions, since -/// it consumes files generated by them. -#[cfg(feature = "rebuild-isle")] -fn rebuild_isle( - compilation: &IsleCompilation, - manifest: &str, -) -> Result<(), Box> { - use cranelift_isle as isle; - - println!("Rebuilding {}", compilation.output.display()); - - // First, remove the manifest, if any; we will recreate it - // below if the compilation is successful. Ignore error if no - // manifest was present. - let manifest_filename = compilation.manifest_filename(); - let _ = std::fs::remove_file(&manifest_filename); - - let code = (|| { - let lexer = isle::lexer::Lexer::from_files(&compilation.inputs[..])?; - let defs = isle::parser::parse(lexer)?; - isle::compile::compile(&defs) - })() - .map_err(|e| { - // Make sure to include the source snippets location info along with - // the error messages. - - let report = miette::Report::new(e); - return DebugReport(report); - - struct DebugReport(miette::Report); - - impl std::fmt::Display for DebugReport { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - self.0.handler().debug(&*self.0, f) - } - } - - impl std::fmt::Debug for DebugReport { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Display::fmt(self, f) - } - } - - impl std::error::Error for DebugReport {} - })?; - - let code = rustfmt(&code).unwrap_or_else(|e| { - println!( - "cargo:warning=Failed to run `rustfmt` on ISLE-generated code: {:?}", - e - ); - code - }); - - println!( - "Writing ISLE-generated Rust code to {}", - compilation.output.display() - ); - std::fs::write(&compilation.output, code)?; - - // Write the manifest so that, in the default build configuration - // without the `rebuild-isle` feature, we can at least verify that - // no changes were made that will not be picked up. Note that we - // only write this *after* we write the source above, so no - // manifest is produced if there was an error. - std::fs::write(&manifest_filename, manifest)?; - - return Ok(()); - - fn rustfmt(code: &str) -> std::io::Result { - use std::io::Write; - - let mut rustfmt = std::process::Command::new("rustfmt") - .stdin(std::process::Stdio::piped()) - .stdout(std::process::Stdio::piped()) - .spawn()?; - - let mut stdin = rustfmt.stdin.take().unwrap(); - stdin.write_all(code.as_bytes())?; - drop(stdin); - - let mut stdout = rustfmt.stdout.take().unwrap(); - let mut data = vec![]; - stdout.read_to_end(&mut data)?; - - let status = rustfmt.wait()?; - if !status.success() { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - format!("`rustfmt` exited with status {}", status), - )); - } - - Ok(String::from_utf8(data).expect("rustfmt always writs utf-8 to stdout")) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/perf-config.json b/collector/compile-benchmarks/cranelift-codegen-0.82.1/perf-config.json deleted file mode 100644 index f5204c638..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/perf-config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "category": "primary", - "artifact": "library" -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/mod.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/mod.rs deleted file mode 100644 index 750eaa2a2..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/mod.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! Binary machine code emission. -//! -//! The `binemit` module contains code for translating Cranelift's intermediate representation into -//! binary machine code. - -mod stack_map; - -pub use self::stack_map::StackMap; -use core::fmt; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Offset in bytes from the beginning of the function. -/// -/// Cranelift can be used as a cross compiler, so we don't want to use a type like `usize` which -/// depends on the *host* platform, not the *target* platform. -pub type CodeOffset = u32; - -/// Addend to add to the symbol value. -pub type Addend = i64; - -/// Relocation kinds for every ISA -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum Reloc { - /// absolute 4-byte - Abs4, - /// absolute 8-byte - Abs8, - /// x86 PC-relative 4-byte - X86PCRel4, - /// x86 call to PC-relative 4-byte - X86CallPCRel4, - /// x86 call to PLT-relative 4-byte - X86CallPLTRel4, - /// x86 GOT PC-relative 4-byte - X86GOTPCRel4, - /// Arm32 call target - Arm32Call, - /// Arm64 call target. Encoded as bottom 26 bits of instruction. This - /// value is sign-extended, multiplied by 4, and added to the PC of - /// the call instruction to form the destination address. - Arm64Call, - /// s390x PC-relative 4-byte offset - S390xPCRel32Dbl, - - /// Elf x86_64 32 bit signed PC relative offset to two GOT entries for GD symbol. - ElfX86_64TlsGd, - - /// Mach-O x86_64 32 bit signed PC relative offset to a `__thread_vars` entry. - MachOX86_64Tlv, - - /// AArch64 TLS GD - /// Set an ADRP immediate field to the top 21 bits of the final address. Checks for overflow. - /// This is equivalent to `R_AARCH64_TLSGD_ADR_PAGE21` in the [aaelf64](https://github.com/ARM-software/abi-aa/blob/2bcab1e3b22d55170c563c3c7940134089176746/aaelf64/aaelf64.rst#relocations-for-thread-local-storage) - Aarch64TlsGdAdrPage21, - - /// AArch64 TLS GD - /// Set the add immediate field to the low 12 bits of the final address. Does not check for overflow. - /// This is equivalent to `R_AARCH64_TLSGD_ADD_LO12_NC` in the [aaelf64](https://github.com/ARM-software/abi-aa/blob/2bcab1e3b22d55170c563c3c7940134089176746/aaelf64/aaelf64.rst#relocations-for-thread-local-storage) - Aarch64TlsGdAddLo12Nc, -} - -impl fmt::Display for Reloc { - /// Display trait implementation drops the arch, since its used in contexts where the arch is - /// already unambiguous, e.g. clif syntax with isa specified. In other contexts, use Debug. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::Abs4 => write!(f, "Abs4"), - Self::Abs8 => write!(f, "Abs8"), - Self::S390xPCRel32Dbl => write!(f, "PCRel32Dbl"), - Self::X86PCRel4 => write!(f, "PCRel4"), - Self::X86CallPCRel4 => write!(f, "CallPCRel4"), - Self::X86CallPLTRel4 => write!(f, "CallPLTRel4"), - Self::X86GOTPCRel4 => write!(f, "GOTPCRel4"), - Self::Arm32Call | Self::Arm64Call => write!(f, "Call"), - - Self::ElfX86_64TlsGd => write!(f, "ElfX86_64TlsGd"), - Self::MachOX86_64Tlv => write!(f, "MachOX86_64Tlv"), - Self::Aarch64TlsGdAdrPage21 => write!(f, "Aarch64TlsGdAdrPage21"), - Self::Aarch64TlsGdAddLo12Nc => write!(f, "Aarch64TlsGdAddLo12Nc"), - } - } -} - -/// Container for information about a vector of compiled code and its supporting read-only data. -/// -/// The code starts at offset 0 and is followed optionally by relocatable jump tables and copyable -/// (raw binary) read-only data. Any padding between sections is always part of the section that -/// precedes the boundary between the sections. -#[derive(PartialEq)] -pub struct CodeInfo { - /// Number of bytes in total. - pub total_size: CodeOffset, -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/stack_map.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/stack_map.rs deleted file mode 100644 index f9b99269a..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/binemit/stack_map.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::bitset::BitSet; -use alloc::vec::Vec; - -type Num = u32; -const NUM_BITS: usize = core::mem::size_of::() * 8; - -/// Stack maps record which words in a stack frame contain live GC references at -/// a given instruction pointer. -/// -/// Logically, a set of stack maps for a function record a table of the form: -/// -/// ```text -/// +---------------------+-------------------------------------------+ -/// | Instruction Pointer | SP-Relative Offsets of Live GC References | -/// +---------------------+-------------------------------------------+ -/// | 0x12345678 | 2, 6, 12 | -/// | 0x1234abcd | 2, 6 | -/// | ... | ... | -/// +---------------------+-------------------------------------------+ -/// ``` -/// -/// Where "instruction pointer" is an instruction pointer within the function, -/// and "offsets of live GC references" contains the offsets (in units of words) -/// from the frame's stack pointer where live GC references are stored on the -/// stack. Instruction pointers within the function that do not have an entry in -/// this table are not GC safepoints. -/// -/// Because -/// -/// * offsets of live GC references are relative from the stack pointer, and -/// * stack frames grow down from higher addresses to lower addresses, -/// -/// to get a pointer to a live reference at offset `x` within a stack frame, you -/// add `x` from the frame's stack pointer. -/// -/// For example, to calculate the pointer to the live GC reference inside "frame -/// 1" below, you would do `frame_1_sp + x`: -/// -/// ```text -/// Stack -/// +-------------------+ -/// | Frame 0 | -/// | | -/// | | | -/// | +-------------------+ <--- Frame 0's SP -/// | | Frame 1 | -/// Grows | | -/// down | | -/// | | Live GC reference | --+-- -/// | | | | -/// | | | | -/// V | | x = offset of live GC reference -/// | | | -/// | | | -/// +-------------------+ --+-- <--- Frame 1's SP -/// | Frame 2 | -/// | ... | -/// ``` -/// -/// An individual `StackMap` is associated with just one instruction pointer -/// within the function, contains the size of the stack frame, and represents -/// the stack frame as a bitmap. There is one bit per word in the stack frame, -/// and if the bit is set, then the word contains a live GC reference. -/// -/// Note that a caller's `OutgoingArg` stack slots and callee's `IncomingArg` -/// stack slots overlap, so we must choose which function's stack maps record -/// live GC references in these slots. We record the `IncomingArg`s in the -/// callee's stack map. -#[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(serde::Deserialize, serde::Serialize))] -pub struct StackMap { - bitmap: Vec>, - mapped_words: u32, -} - -impl StackMap { - /// Create a vec of Bitsets from a slice of bools. - pub fn from_slice(vec: &[bool]) -> Self { - let len = vec.len(); - let num_word = len / NUM_BITS + (len % NUM_BITS != 0) as usize; - let mut bitmap = Vec::with_capacity(num_word); - - for segment in vec.chunks(NUM_BITS) { - let mut curr_word = 0; - for (i, set) in segment.iter().enumerate() { - if *set { - curr_word |= 1 << i; - } - } - bitmap.push(BitSet(curr_word)); - } - Self { - mapped_words: len as u32, - bitmap, - } - } - - /// Returns a specified bit. - pub fn get_bit(&self, bit_index: usize) -> bool { - assert!(bit_index < NUM_BITS * self.bitmap.len()); - let word_index = bit_index / NUM_BITS; - let word_offset = (bit_index % NUM_BITS) as u8; - self.bitmap[word_index].contains(word_offset) - } - - /// Returns the raw bitmap that represents this stack map. - pub fn as_slice(&self) -> &[BitSet] { - &self.bitmap - } - - /// Returns the number of words represented by this stack map. - pub fn mapped_words(&self) -> u32 { - self.mapped_words - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn stack_maps() { - let vec: Vec = Vec::new(); - assert!(StackMap::from_slice(&vec).bitmap.is_empty()); - - let mut vec: [bool; NUM_BITS] = Default::default(); - let set_true_idx = [5, 7, 24, 31]; - - for &idx in &set_true_idx { - vec[idx] = true; - } - - let mut vec = vec.to_vec(); - assert_eq!( - vec![BitSet::(2164261024)], - StackMap::from_slice(&vec).bitmap - ); - - vec.push(false); - vec.push(true); - let res = StackMap::from_slice(&vec); - assert_eq!( - vec![BitSet::(2164261024), BitSet::(2)], - res.bitmap - ); - - assert!(res.get_bit(5)); - assert!(res.get_bit(31)); - assert!(res.get_bit(33)); - assert!(!res.get_bit(1)); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/bitset.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/bitset.rs deleted file mode 100644 index 2cb0194b5..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/bitset.rs +++ /dev/null @@ -1,163 +0,0 @@ -//! Small Bitset -//! -//! This module defines a struct `BitSet` encapsulating a bitset built over the type T. -//! T is intended to be a primitive unsigned type. Currently it can be any type between u8 and u32 -//! -//! If you would like to add support for larger bitsets in the future, you need to change the trait -//! bound Into and the u32 in the implementation of `max_bits()`. - -use core::convert::{From, Into}; -use core::mem::size_of; -use core::ops::{Add, BitOr, Shl, Sub}; - -/// A small bitset built on a single primitive integer type -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))] -pub struct BitSet(pub T); - -impl BitSet -where - T: Into - + From - + BitOr - + Shl - + Sub - + Add - + PartialEq - + Copy, -{ - /// Maximum number of bits supported by this BitSet instance - pub fn bits() -> usize { - size_of::() * 8 - } - - /// Maximum number of bits supported by any bitset instance atm. - pub fn max_bits() -> usize { - size_of::() * 8 - } - - /// Check if this BitSet contains the number num - pub fn contains(&self, num: u8) -> bool { - debug_assert!((num as usize) < Self::bits()); - debug_assert!((num as usize) < Self::max_bits()); - self.0.into() & (1 << num) != 0 - } - - /// Return the smallest number contained in the bitset or None if empty - pub fn min(&self) -> Option { - if self.0.into() == 0 { - None - } else { - Some(self.0.into().trailing_zeros() as u8) - } - } - - /// Return the largest number contained in the bitset or None if empty - pub fn max(&self) -> Option { - if self.0.into() == 0 { - None - } else { - let leading_zeroes = self.0.into().leading_zeros() as usize; - Some((Self::max_bits() - leading_zeroes - 1) as u8) - } - } - - /// Construct a BitSet with the half-open range [lo,hi) filled in - pub fn from_range(lo: u8, hi: u8) -> Self { - debug_assert!(lo <= hi); - debug_assert!((hi as usize) <= Self::bits()); - let one: T = T::from(1); - // I can't just do (one << hi) - one here as the shift may overflow - let hi_rng = if hi >= 1 { - (one << (hi - 1)) + ((one << (hi - 1)) - one) - } else { - T::from(0) - }; - - let lo_rng = (one << lo) - one; - - Self(hi_rng - lo_rng) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn contains() { - let s = BitSet::(255); - for i in 0..7 { - assert!(s.contains(i)); - } - - let s1 = BitSet::(0); - for i in 0..7 { - assert!(!s1.contains(i)); - } - - let s2 = BitSet::(127); - for i in 0..6 { - assert!(s2.contains(i)); - } - assert!(!s2.contains(7)); - - let s3 = BitSet::(2 | 4 | 64); - assert!(!s3.contains(0) && !s3.contains(3) && !s3.contains(4)); - assert!(!s3.contains(5) && !s3.contains(7)); - assert!(s3.contains(1) && s3.contains(2) && s3.contains(6)); - - let s4 = BitSet::(4 | 8 | 256 | 1024); - assert!( - !s4.contains(0) - && !s4.contains(1) - && !s4.contains(4) - && !s4.contains(5) - && !s4.contains(6) - && !s4.contains(7) - && !s4.contains(9) - && !s4.contains(11) - ); - assert!(s4.contains(2) && s4.contains(3) && s4.contains(8) && s4.contains(10)); - } - - #[test] - fn minmax() { - let s = BitSet::(255); - assert_eq!(s.min(), Some(0)); - assert_eq!(s.max(), Some(7)); - assert!(s.min() == Some(0) && s.max() == Some(7)); - let s1 = BitSet::(0); - assert!(s1.min() == None && s1.max() == None); - let s2 = BitSet::(127); - assert!(s2.min() == Some(0) && s2.max() == Some(6)); - let s3 = BitSet::(2 | 4 | 64); - assert!(s3.min() == Some(1) && s3.max() == Some(6)); - let s4 = BitSet::(4 | 8 | 256 | 1024); - assert!(s4.min() == Some(2) && s4.max() == Some(10)); - } - - #[test] - fn from_range() { - let s = BitSet::::from_range(5, 5); - assert!(s.0 == 0); - - let s = BitSet::::from_range(0, 8); - assert!(s.0 == 255); - - let s = BitSet::::from_range(0, 8); - assert!(s.0 == 255u16); - - let s = BitSet::::from_range(0, 16); - assert!(s.0 == 65535u16); - - let s = BitSet::::from_range(5, 6); - assert!(s.0 == 32u8); - - let s = BitSet::::from_range(3, 7); - assert!(s.0 == 8 | 16 | 32 | 64); - - let s = BitSet::::from_range(5, 11); - assert!(s.0 == 32 | 64 | 128 | 256 | 512 | 1024); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cfg_printer.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cfg_printer.rs deleted file mode 100644 index 843b66f27..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cfg_printer.rs +++ /dev/null @@ -1,83 +0,0 @@ -//! The `CFGPrinter` utility. - -use alloc::vec::Vec; -use core::fmt::{Display, Formatter, Result, Write}; - -use crate::entity::SecondaryMap; -use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; -use crate::ir::Function; -use crate::write::{FuncWriter, PlainWriter}; - -/// A utility for pretty-printing the CFG of a `Function`. -pub struct CFGPrinter<'a> { - func: &'a Function, - cfg: ControlFlowGraph, -} - -/// A utility for pretty-printing the CFG of a `Function`. -impl<'a> CFGPrinter<'a> { - /// Create a new CFGPrinter. - pub fn new(func: &'a Function) -> Self { - Self { - func, - cfg: ControlFlowGraph::with_function(func), - } - } - - /// Write the CFG for this function to `w`. - pub fn write(&self, w: &mut dyn Write) -> Result { - self.header(w)?; - self.block_nodes(w)?; - self.cfg_connections(w)?; - writeln!(w, "}}") - } - - fn header(&self, w: &mut dyn Write) -> Result { - writeln!(w, "digraph \"{}\" {{", self.func.name)?; - if let Some(entry) = self.func.layout.entry_block() { - writeln!(w, " {{rank=min; {}}}", entry)?; - } - Ok(()) - } - - fn block_nodes(&self, w: &mut dyn Write) -> Result { - let mut aliases = SecondaryMap::<_, Vec<_>>::new(); - for v in self.func.dfg.values() { - // VADFS returns the immediate target of an alias - if let Some(k) = self.func.dfg.value_alias_dest_for_serialization(v) { - aliases[k].push(v); - } - } - - for block in &self.func.layout { - write!(w, " {} [shape=record, label=\"{{", block)?; - crate::write::write_block_header(w, self.func, block, 4)?; - // Add all outgoing branch instructions to the label. - for inst in self.func.layout.block_likely_branches(block) { - write!(w, " | <{}>", inst)?; - PlainWriter.write_instruction(w, self.func, &aliases, inst, 0)?; - } - writeln!(w, "}}\"]")? - } - Ok(()) - } - - fn cfg_connections(&self, w: &mut dyn Write) -> Result { - for block in &self.func.layout { - for BlockPredecessor { - block: parent, - inst, - } in self.cfg.pred_iter(block) - { - writeln!(w, " {}:{} -> {}", parent, inst, block)?; - } - } - Ok(()) - } -} - -impl<'a> Display for CFGPrinter<'a> { - fn fmt(&self, f: &mut Formatter) -> Result { - self.write(f) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/clif.isle b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/clif.isle deleted file mode 100644 index afeadf23c..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/clif.isle +++ /dev/null @@ -1,1700 +0,0 @@ -;; GENERATED BY `gen_isle`. DO NOT EDIT!!! -;; -;; This ISLE file defines all the external type declarations for Cranelift's -;; data structures that ISLE will process, such as `InstructionData` and -;; `Opcode`. - -;;;; Extern type declarations for immediates ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type Block (primitive Block)) -(type Constant (primitive Constant)) -(type FuncRef (primitive FuncRef)) -(type GlobalValue (primitive GlobalValue)) -(type Heap (primitive Heap)) -(type Ieee32 (primitive Ieee32)) -(type Ieee64 (primitive Ieee64)) -(type Imm64 (primitive Imm64)) -(type Immediate (primitive Immediate)) -(type JumpTable (primitive JumpTable)) -(type MemFlags (primitive MemFlags)) -(type Offset32 (primitive Offset32)) -(type SigRef (primitive SigRef)) -(type StackSlot (primitive StackSlot)) -(type Table (primitive Table)) -(type Uimm32 (primitive Uimm32)) -(type Uimm8 (primitive Uimm8)) -(type bool (primitive bool)) - -;;;; Enumerated Immediate: AtomicRmwOp ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type AtomicRmwOp extern - (enum - Add - And - Nand - Or - Smax - Smin - Sub - Umax - Umin - Xchg - Xor - ) -) - -;;;; Enumerated Immediate: FloatCC ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type FloatCC extern - (enum - Equal - GreaterThan - GreaterThanOrEqual - LessThan - LessThanOrEqual - NotEqual - Ordered - OrderedNotEqual - Unordered - UnorderedOrEqual - UnorderedOrGreaterThan - UnorderedOrGreaterThanOrEqual - UnorderedOrLessThan - UnorderedOrLessThanOrEqual - ) -) - -;;;; Enumerated Immediate: IntCC ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type IntCC extern - (enum - Equal - NotEqual - NotOverflow - Overflow - SignedGreaterThan - SignedGreaterThanOrEqual - SignedLessThan - SignedLessThanOrEqual - UnsignedGreaterThan - UnsignedGreaterThanOrEqual - UnsignedLessThan - UnsignedLessThanOrEqual - ) -) - -;;;; Enumerated Immediate: TrapCode ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type TrapCode extern - (enum - HeapOutOfBounds - IntegerDivisionByZero - IntegerOverflow - StackOverflow - ) -) - -;;;; Value Arrays ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; ISLE representation of `[Value; 2]`. -(type ValueArray2 extern (enum)) - -(decl value_array_2 (Value Value) ValueArray2) -(extern constructor value_array_2 pack_value_array_2) -(extern extractor infallible value_array_2 unpack_value_array_2) - -;; ISLE representation of `[Value; 3]`. -(type ValueArray3 extern (enum)) - -(decl value_array_3 (Value Value Value) ValueArray3) -(extern constructor value_array_3 pack_value_array_3) -(extern extractor infallible value_array_3 unpack_value_array_3) - -;;;; `Opcode` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type Opcode extern - (enum - Jump - Brz - Brnz - BrIcmp - Brif - Brff - BrTable - Debugtrap - Trap - Trapz - ResumableTrap - Trapnz - ResumableTrapnz - Trapif - Trapff - Return - FallthroughReturn - Call - CallIndirect - FuncAddr - Splat - Swizzle - Insertlane - Extractlane - Imin - Umin - Imax - Umax - AvgRound - UaddSat - SaddSat - UsubSat - SsubSat - Load - LoadComplex - Store - StoreComplex - Uload8 - Uload8Complex - Sload8 - Sload8Complex - Istore8 - Istore8Complex - Uload16 - Uload16Complex - Sload16 - Sload16Complex - Istore16 - Istore16Complex - Uload32 - Uload32Complex - Sload32 - Sload32Complex - Istore32 - Istore32Complex - Uload8x8 - Uload8x8Complex - Sload8x8 - Sload8x8Complex - Uload16x4 - Uload16x4Complex - Sload16x4 - Sload16x4Complex - Uload32x2 - Uload32x2Complex - Sload32x2 - Sload32x2Complex - StackLoad - StackStore - StackAddr - GlobalValue - SymbolValue - TlsValue - HeapAddr - GetPinnedReg - SetPinnedReg - TableAddr - Iconst - F32const - F64const - Bconst - Vconst - ConstAddr - Shuffle - Null - Nop - Select - Selectif - SelectifSpectreGuard - Bitselect - Copy - IfcmpSp - Vsplit - Vconcat - Vselect - VanyTrue - VallTrue - VhighBits - Icmp - IcmpImm - Ifcmp - IfcmpImm - Iadd - Isub - Ineg - Iabs - Imul - Umulhi - Smulhi - SqmulRoundSat - Udiv - Sdiv - Urem - Srem - IaddImm - ImulImm - UdivImm - SdivImm - UremImm - SremImm - IrsubImm - IaddCin - IaddIfcin - IaddCout - IaddIfcout - IaddCarry - IaddIfcarry - IsubBin - IsubIfbin - IsubBout - IsubIfbout - IsubBorrow - IsubIfborrow - Band - Bor - Bxor - Bnot - BandNot - BorNot - BxorNot - BandImm - BorImm - BxorImm - Rotl - Rotr - RotlImm - RotrImm - Ishl - Ushr - Sshr - IshlImm - UshrImm - SshrImm - Bitrev - Clz - Cls - Ctz - Popcnt - Fcmp - Ffcmp - Fadd - Fsub - Fmul - Fdiv - Sqrt - Fma - Fneg - Fabs - Fcopysign - Fmin - FminPseudo - Fmax - FmaxPseudo - Ceil - Floor - Trunc - Nearest - IsNull - IsInvalid - Trueif - Trueff - Bitcast - RawBitcast - ScalarToVector - Breduce - Bextend - Bint - Bmask - Ireduce - Snarrow - Unarrow - Uunarrow - SwidenLow - SwidenHigh - UwidenLow - UwidenHigh - IaddPairwise - WideningPairwiseDotProductS - Uextend - Sextend - Fpromote - Fdemote - Fvdemote - FvpromoteLow - FcvtToUint - FcvtToUintSat - FcvtToSint - FcvtToSintSat - FcvtFromUint - FcvtFromSint - FcvtLowFromSint - Isplit - Iconcat - AtomicRmw - AtomicCas - AtomicLoad - AtomicStore - Fence - ) -) - -;;;; `InstructionData` ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(type InstructionData extern - (enum - (AtomicCas (opcode Opcode) (args ValueArray3) (flags MemFlags)) - (AtomicRmw (opcode Opcode) (args ValueArray2) (flags MemFlags) (op AtomicRmwOp)) - (Binary (opcode Opcode) (args ValueArray2)) - (BinaryImm64 (opcode Opcode) (arg Value) (imm Imm64)) - (BinaryImm8 (opcode Opcode) (arg Value) (imm Uimm8)) - (Branch (opcode Opcode) (args ValueList) (destination Block)) - (BranchFloat (opcode Opcode) (args ValueList) (cond FloatCC) (destination Block)) - (BranchIcmp (opcode Opcode) (args ValueList) (cond IntCC) (destination Block)) - (BranchInt (opcode Opcode) (args ValueList) (cond IntCC) (destination Block)) - (BranchTable (opcode Opcode) (arg Value) (destination Block) (table JumpTable)) - (Call (opcode Opcode) (args ValueList) (func_ref FuncRef)) - (CallIndirect (opcode Opcode) (args ValueList) (sig_ref SigRef)) - (CondTrap (opcode Opcode) (arg Value) (code TrapCode)) - (FloatCompare (opcode Opcode) (args ValueArray2) (cond FloatCC)) - (FloatCond (opcode Opcode) (arg Value) (cond FloatCC)) - (FloatCondTrap (opcode Opcode) (arg Value) (cond FloatCC) (code TrapCode)) - (FuncAddr (opcode Opcode) (func_ref FuncRef)) - (HeapAddr (opcode Opcode) (arg Value) (heap Heap) (imm Uimm32)) - (IntCompare (opcode Opcode) (args ValueArray2) (cond IntCC)) - (IntCompareImm (opcode Opcode) (arg Value) (cond IntCC) (imm Imm64)) - (IntCond (opcode Opcode) (arg Value) (cond IntCC)) - (IntCondTrap (opcode Opcode) (arg Value) (cond IntCC) (code TrapCode)) - (IntSelect (opcode Opcode) (args ValueArray3) (cond IntCC)) - (Jump (opcode Opcode) (args ValueList) (destination Block)) - (Load (opcode Opcode) (arg Value) (flags MemFlags) (offset Offset32)) - (LoadComplex (opcode Opcode) (args ValueList) (flags MemFlags) (offset Offset32)) - (LoadNoOffset (opcode Opcode) (arg Value) (flags MemFlags)) - (MultiAry (opcode Opcode) (args ValueList)) - (NullAry (opcode Opcode)) - (Shuffle (opcode Opcode) (args ValueArray2) (imm Immediate)) - (StackLoad (opcode Opcode) (stack_slot StackSlot) (offset Offset32)) - (StackStore (opcode Opcode) (arg Value) (stack_slot StackSlot) (offset Offset32)) - (Store (opcode Opcode) (args ValueArray2) (flags MemFlags) (offset Offset32)) - (StoreComplex (opcode Opcode) (args ValueList) (flags MemFlags) (offset Offset32)) - (StoreNoOffset (opcode Opcode) (args ValueArray2) (flags MemFlags)) - (TableAddr (opcode Opcode) (arg Value) (table Table) (offset Offset32)) - (Ternary (opcode Opcode) (args ValueArray3)) - (TernaryImm8 (opcode Opcode) (args ValueArray2) (imm Uimm8)) - (Trap (opcode Opcode) (code TrapCode)) - (Unary (opcode Opcode) (arg Value)) - (UnaryBool (opcode Opcode) (imm bool)) - (UnaryConst (opcode Opcode) (constant_handle Constant)) - (UnaryGlobalValue (opcode Opcode) (global_value GlobalValue)) - (UnaryIeee32 (opcode Opcode) (imm Ieee32)) - (UnaryIeee64 (opcode Opcode) (imm Ieee64)) - (UnaryImm (opcode Opcode) (imm Imm64)) - ) -) - -;;;; Extracting Opcode, Operands, and Immediates from `InstructionData` ;;;;;;;; - -(decl jump (Block ValueSlice) Inst) -(extractor - (jump block args) - (inst_data (InstructionData.Jump (Opcode.Jump) (value_list_slice args) block)) -) - -(decl brz (Value Block ValueSlice) Inst) -(extractor - (brz c block args) - (inst_data (InstructionData.Branch (Opcode.Brz) (unwrap_head_value_list_1 c args) block)) -) - -(decl brnz (Value Block ValueSlice) Inst) -(extractor - (brnz c block args) - (inst_data (InstructionData.Branch (Opcode.Brnz) (unwrap_head_value_list_1 c args) block)) -) - -(decl br_icmp (IntCC Value Value Block ValueSlice) Inst) -(extractor - (br_icmp Cond x y block args) - (inst_data (InstructionData.BranchIcmp (Opcode.BrIcmp) (unwrap_head_value_list_2 x y args) Cond block)) -) - -(decl brif (IntCC Value Block ValueSlice) Inst) -(extractor - (brif Cond f block args) - (inst_data (InstructionData.BranchInt (Opcode.Brif) (unwrap_head_value_list_1 f args) Cond block)) -) - -(decl brff (FloatCC Value Block ValueSlice) Inst) -(extractor - (brff Cond f block args) - (inst_data (InstructionData.BranchFloat (Opcode.Brff) (unwrap_head_value_list_1 f args) Cond block)) -) - -(decl br_table (Value Block JumpTable) Inst) -(extractor - (br_table x block JT) - (inst_data (InstructionData.BranchTable (Opcode.BrTable) x block JT)) -) - -(decl debugtrap () Inst) -(extractor - (debugtrap ) - (inst_data (InstructionData.NullAry (Opcode.Debugtrap))) -) - -(decl trap (TrapCode) Inst) -(extractor - (trap code) - (inst_data (InstructionData.Trap (Opcode.Trap) code)) -) - -(decl trapz (Value TrapCode) Inst) -(extractor - (trapz c code) - (inst_data (InstructionData.CondTrap (Opcode.Trapz) c code)) -) - -(decl resumable_trap (TrapCode) Inst) -(extractor - (resumable_trap code) - (inst_data (InstructionData.Trap (Opcode.ResumableTrap) code)) -) - -(decl trapnz (Value TrapCode) Inst) -(extractor - (trapnz c code) - (inst_data (InstructionData.CondTrap (Opcode.Trapnz) c code)) -) - -(decl resumable_trapnz (Value TrapCode) Inst) -(extractor - (resumable_trapnz c code) - (inst_data (InstructionData.CondTrap (Opcode.ResumableTrapnz) c code)) -) - -(decl trapif (IntCC Value TrapCode) Inst) -(extractor - (trapif Cond f code) - (inst_data (InstructionData.IntCondTrap (Opcode.Trapif) f Cond code)) -) - -(decl trapff (FloatCC Value TrapCode) Inst) -(extractor - (trapff Cond f code) - (inst_data (InstructionData.FloatCondTrap (Opcode.Trapff) f Cond code)) -) - -(decl return (ValueSlice) Inst) -(extractor - (return rvals) - (inst_data (InstructionData.MultiAry (Opcode.Return) (value_list_slice rvals))) -) - -(decl fallthrough_return (ValueSlice) Inst) -(extractor - (fallthrough_return rvals) - (inst_data (InstructionData.MultiAry (Opcode.FallthroughReturn) (value_list_slice rvals))) -) - -(decl call (FuncRef ValueSlice) Inst) -(extractor - (call FN args) - (inst_data (InstructionData.Call (Opcode.Call) (value_list_slice args) FN)) -) - -(decl call_indirect (SigRef Value ValueSlice) Inst) -(extractor - (call_indirect SIG callee args) - (inst_data (InstructionData.CallIndirect (Opcode.CallIndirect) (unwrap_head_value_list_1 callee args) SIG)) -) - -(decl func_addr (FuncRef) Inst) -(extractor - (func_addr FN) - (inst_data (InstructionData.FuncAddr (Opcode.FuncAddr) FN)) -) - -(decl splat (Value) Inst) -(extractor - (splat x) - (inst_data (InstructionData.Unary (Opcode.Splat) x)) -) - -(decl swizzle (Value Value) Inst) -(extractor - (swizzle x y) - (inst_data (InstructionData.Binary (Opcode.Swizzle) (value_array_2 x y))) -) - -(decl insertlane (Value Value Uimm8) Inst) -(extractor - (insertlane x y Idx) - (inst_data (InstructionData.TernaryImm8 (Opcode.Insertlane) (value_array_2 x y) Idx)) -) - -(decl extractlane (Value Uimm8) Inst) -(extractor - (extractlane x Idx) - (inst_data (InstructionData.BinaryImm8 (Opcode.Extractlane) x Idx)) -) - -(decl imin (Value Value) Inst) -(extractor - (imin x y) - (inst_data (InstructionData.Binary (Opcode.Imin) (value_array_2 x y))) -) - -(decl umin (Value Value) Inst) -(extractor - (umin x y) - (inst_data (InstructionData.Binary (Opcode.Umin) (value_array_2 x y))) -) - -(decl imax (Value Value) Inst) -(extractor - (imax x y) - (inst_data (InstructionData.Binary (Opcode.Imax) (value_array_2 x y))) -) - -(decl umax (Value Value) Inst) -(extractor - (umax x y) - (inst_data (InstructionData.Binary (Opcode.Umax) (value_array_2 x y))) -) - -(decl avg_round (Value Value) Inst) -(extractor - (avg_round x y) - (inst_data (InstructionData.Binary (Opcode.AvgRound) (value_array_2 x y))) -) - -(decl uadd_sat (Value Value) Inst) -(extractor - (uadd_sat x y) - (inst_data (InstructionData.Binary (Opcode.UaddSat) (value_array_2 x y))) -) - -(decl sadd_sat (Value Value) Inst) -(extractor - (sadd_sat x y) - (inst_data (InstructionData.Binary (Opcode.SaddSat) (value_array_2 x y))) -) - -(decl usub_sat (Value Value) Inst) -(extractor - (usub_sat x y) - (inst_data (InstructionData.Binary (Opcode.UsubSat) (value_array_2 x y))) -) - -(decl ssub_sat (Value Value) Inst) -(extractor - (ssub_sat x y) - (inst_data (InstructionData.Binary (Opcode.SsubSat) (value_array_2 x y))) -) - -(decl load (MemFlags Value Offset32) Inst) -(extractor - (load MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Load) p MemFlags Offset)) -) - -(decl load_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (load_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.LoadComplex) (value_list_slice args) MemFlags Offset)) -) - -(decl store (MemFlags Value Value Offset32) Inst) -(extractor - (store MemFlags x p Offset) - (inst_data (InstructionData.Store (Opcode.Store) (value_array_2 x p) MemFlags Offset)) -) - -(decl store_complex (MemFlags Value ValueSlice Offset32) Inst) -(extractor - (store_complex MemFlags x args Offset) - (inst_data (InstructionData.StoreComplex (Opcode.StoreComplex) (unwrap_head_value_list_1 x args) MemFlags Offset)) -) - -(decl uload8 (MemFlags Value Offset32) Inst) -(extractor - (uload8 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Uload8) p MemFlags Offset)) -) - -(decl uload8_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (uload8_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Uload8Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl sload8 (MemFlags Value Offset32) Inst) -(extractor - (sload8 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Sload8) p MemFlags Offset)) -) - -(decl sload8_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (sload8_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Sload8Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl istore8 (MemFlags Value Value Offset32) Inst) -(extractor - (istore8 MemFlags x p Offset) - (inst_data (InstructionData.Store (Opcode.Istore8) (value_array_2 x p) MemFlags Offset)) -) - -(decl istore8_complex (MemFlags Value ValueSlice Offset32) Inst) -(extractor - (istore8_complex MemFlags x args Offset) - (inst_data (InstructionData.StoreComplex (Opcode.Istore8Complex) (unwrap_head_value_list_1 x args) MemFlags Offset)) -) - -(decl uload16 (MemFlags Value Offset32) Inst) -(extractor - (uload16 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Uload16) p MemFlags Offset)) -) - -(decl uload16_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (uload16_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Uload16Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl sload16 (MemFlags Value Offset32) Inst) -(extractor - (sload16 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Sload16) p MemFlags Offset)) -) - -(decl sload16_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (sload16_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Sload16Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl istore16 (MemFlags Value Value Offset32) Inst) -(extractor - (istore16 MemFlags x p Offset) - (inst_data (InstructionData.Store (Opcode.Istore16) (value_array_2 x p) MemFlags Offset)) -) - -(decl istore16_complex (MemFlags Value ValueSlice Offset32) Inst) -(extractor - (istore16_complex MemFlags x args Offset) - (inst_data (InstructionData.StoreComplex (Opcode.Istore16Complex) (unwrap_head_value_list_1 x args) MemFlags Offset)) -) - -(decl uload32 (MemFlags Value Offset32) Inst) -(extractor - (uload32 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Uload32) p MemFlags Offset)) -) - -(decl uload32_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (uload32_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Uload32Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl sload32 (MemFlags Value Offset32) Inst) -(extractor - (sload32 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Sload32) p MemFlags Offset)) -) - -(decl sload32_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (sload32_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Sload32Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl istore32 (MemFlags Value Value Offset32) Inst) -(extractor - (istore32 MemFlags x p Offset) - (inst_data (InstructionData.Store (Opcode.Istore32) (value_array_2 x p) MemFlags Offset)) -) - -(decl istore32_complex (MemFlags Value ValueSlice Offset32) Inst) -(extractor - (istore32_complex MemFlags x args Offset) - (inst_data (InstructionData.StoreComplex (Opcode.Istore32Complex) (unwrap_head_value_list_1 x args) MemFlags Offset)) -) - -(decl uload8x8 (MemFlags Value Offset32) Inst) -(extractor - (uload8x8 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Uload8x8) p MemFlags Offset)) -) - -(decl uload8x8_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (uload8x8_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Uload8x8Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl sload8x8 (MemFlags Value Offset32) Inst) -(extractor - (sload8x8 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Sload8x8) p MemFlags Offset)) -) - -(decl sload8x8_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (sload8x8_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Sload8x8Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl uload16x4 (MemFlags Value Offset32) Inst) -(extractor - (uload16x4 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Uload16x4) p MemFlags Offset)) -) - -(decl uload16x4_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (uload16x4_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Uload16x4Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl sload16x4 (MemFlags Value Offset32) Inst) -(extractor - (sload16x4 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Sload16x4) p MemFlags Offset)) -) - -(decl sload16x4_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (sload16x4_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Sload16x4Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl uload32x2 (MemFlags Value Offset32) Inst) -(extractor - (uload32x2 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Uload32x2) p MemFlags Offset)) -) - -(decl uload32x2_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (uload32x2_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Uload32x2Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl sload32x2 (MemFlags Value Offset32) Inst) -(extractor - (sload32x2 MemFlags p Offset) - (inst_data (InstructionData.Load (Opcode.Sload32x2) p MemFlags Offset)) -) - -(decl sload32x2_complex (MemFlags ValueSlice Offset32) Inst) -(extractor - (sload32x2_complex MemFlags args Offset) - (inst_data (InstructionData.LoadComplex (Opcode.Sload32x2Complex) (value_list_slice args) MemFlags Offset)) -) - -(decl stack_load (StackSlot Offset32) Inst) -(extractor - (stack_load SS Offset) - (inst_data (InstructionData.StackLoad (Opcode.StackLoad) SS Offset)) -) - -(decl stack_store (Value StackSlot Offset32) Inst) -(extractor - (stack_store x SS Offset) - (inst_data (InstructionData.StackStore (Opcode.StackStore) x SS Offset)) -) - -(decl stack_addr (StackSlot Offset32) Inst) -(extractor - (stack_addr SS Offset) - (inst_data (InstructionData.StackLoad (Opcode.StackAddr) SS Offset)) -) - -(decl global_value (GlobalValue) Inst) -(extractor - (global_value GV) - (inst_data (InstructionData.UnaryGlobalValue (Opcode.GlobalValue) GV)) -) - -(decl symbol_value (GlobalValue) Inst) -(extractor - (symbol_value GV) - (inst_data (InstructionData.UnaryGlobalValue (Opcode.SymbolValue) GV)) -) - -(decl tls_value (GlobalValue) Inst) -(extractor - (tls_value GV) - (inst_data (InstructionData.UnaryGlobalValue (Opcode.TlsValue) GV)) -) - -(decl heap_addr (Heap Value Uimm32) Inst) -(extractor - (heap_addr H p Size) - (inst_data (InstructionData.HeapAddr (Opcode.HeapAddr) p H Size)) -) - -(decl get_pinned_reg () Inst) -(extractor - (get_pinned_reg ) - (inst_data (InstructionData.NullAry (Opcode.GetPinnedReg))) -) - -(decl set_pinned_reg (Value) Inst) -(extractor - (set_pinned_reg addr) - (inst_data (InstructionData.Unary (Opcode.SetPinnedReg) addr)) -) - -(decl table_addr (Table Value Offset32) Inst) -(extractor - (table_addr T p Offset) - (inst_data (InstructionData.TableAddr (Opcode.TableAddr) p T Offset)) -) - -(decl iconst (Imm64) Inst) -(extractor - (iconst N) - (inst_data (InstructionData.UnaryImm (Opcode.Iconst) N)) -) - -(decl f32const (Ieee32) Inst) -(extractor - (f32const N) - (inst_data (InstructionData.UnaryIeee32 (Opcode.F32const) N)) -) - -(decl f64const (Ieee64) Inst) -(extractor - (f64const N) - (inst_data (InstructionData.UnaryIeee64 (Opcode.F64const) N)) -) - -(decl bconst (bool) Inst) -(extractor - (bconst N) - (inst_data (InstructionData.UnaryBool (Opcode.Bconst) N)) -) - -(decl vconst (Constant) Inst) -(extractor - (vconst N) - (inst_data (InstructionData.UnaryConst (Opcode.Vconst) N)) -) - -(decl const_addr (Constant) Inst) -(extractor - (const_addr constant) - (inst_data (InstructionData.UnaryConst (Opcode.ConstAddr) constant)) -) - -(decl shuffle (Value Value Immediate) Inst) -(extractor - (shuffle a b mask) - (inst_data (InstructionData.Shuffle (Opcode.Shuffle) (value_array_2 a b) mask)) -) - -(decl null () Inst) -(extractor - (null ) - (inst_data (InstructionData.NullAry (Opcode.Null))) -) - -(decl nop () Inst) -(extractor - (nop ) - (inst_data (InstructionData.NullAry (Opcode.Nop))) -) - -(decl select (Value Value Value) Inst) -(extractor - (select c x y) - (inst_data (InstructionData.Ternary (Opcode.Select) (value_array_3 c x y))) -) - -(decl selectif (IntCC Value Value Value) Inst) -(extractor - (selectif cc flags x y) - (inst_data (InstructionData.IntSelect (Opcode.Selectif) (value_array_3 flags x y) cc)) -) - -(decl selectif_spectre_guard (IntCC Value Value Value) Inst) -(extractor - (selectif_spectre_guard cc flags x y) - (inst_data (InstructionData.IntSelect (Opcode.SelectifSpectreGuard) (value_array_3 flags x y) cc)) -) - -(decl bitselect (Value Value Value) Inst) -(extractor - (bitselect c x y) - (inst_data (InstructionData.Ternary (Opcode.Bitselect) (value_array_3 c x y))) -) - -(decl copy (Value) Inst) -(extractor - (copy x) - (inst_data (InstructionData.Unary (Opcode.Copy) x)) -) - -(decl ifcmp_sp (Value) Inst) -(extractor - (ifcmp_sp addr) - (inst_data (InstructionData.Unary (Opcode.IfcmpSp) addr)) -) - -(decl vsplit (Value) Inst) -(extractor - (vsplit x) - (inst_data (InstructionData.Unary (Opcode.Vsplit) x)) -) - -(decl vconcat (Value Value) Inst) -(extractor - (vconcat x y) - (inst_data (InstructionData.Binary (Opcode.Vconcat) (value_array_2 x y))) -) - -(decl vselect (Value Value Value) Inst) -(extractor - (vselect c x y) - (inst_data (InstructionData.Ternary (Opcode.Vselect) (value_array_3 c x y))) -) - -(decl vany_true (Value) Inst) -(extractor - (vany_true a) - (inst_data (InstructionData.Unary (Opcode.VanyTrue) a)) -) - -(decl vall_true (Value) Inst) -(extractor - (vall_true a) - (inst_data (InstructionData.Unary (Opcode.VallTrue) a)) -) - -(decl vhigh_bits (Value) Inst) -(extractor - (vhigh_bits a) - (inst_data (InstructionData.Unary (Opcode.VhighBits) a)) -) - -(decl icmp (IntCC Value Value) Inst) -(extractor - (icmp Cond x y) - (inst_data (InstructionData.IntCompare (Opcode.Icmp) (value_array_2 x y) Cond)) -) - -(decl icmp_imm (IntCC Value Imm64) Inst) -(extractor - (icmp_imm Cond x Y) - (inst_data (InstructionData.IntCompareImm (Opcode.IcmpImm) x Cond Y)) -) - -(decl ifcmp (Value Value) Inst) -(extractor - (ifcmp x y) - (inst_data (InstructionData.Binary (Opcode.Ifcmp) (value_array_2 x y))) -) - -(decl ifcmp_imm (Value Imm64) Inst) -(extractor - (ifcmp_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.IfcmpImm) x Y)) -) - -(decl iadd (Value Value) Inst) -(extractor - (iadd x y) - (inst_data (InstructionData.Binary (Opcode.Iadd) (value_array_2 x y))) -) - -(decl isub (Value Value) Inst) -(extractor - (isub x y) - (inst_data (InstructionData.Binary (Opcode.Isub) (value_array_2 x y))) -) - -(decl ineg (Value) Inst) -(extractor - (ineg x) - (inst_data (InstructionData.Unary (Opcode.Ineg) x)) -) - -(decl iabs (Value) Inst) -(extractor - (iabs x) - (inst_data (InstructionData.Unary (Opcode.Iabs) x)) -) - -(decl imul (Value Value) Inst) -(extractor - (imul x y) - (inst_data (InstructionData.Binary (Opcode.Imul) (value_array_2 x y))) -) - -(decl umulhi (Value Value) Inst) -(extractor - (umulhi x y) - (inst_data (InstructionData.Binary (Opcode.Umulhi) (value_array_2 x y))) -) - -(decl smulhi (Value Value) Inst) -(extractor - (smulhi x y) - (inst_data (InstructionData.Binary (Opcode.Smulhi) (value_array_2 x y))) -) - -(decl sqmul_round_sat (Value Value) Inst) -(extractor - (sqmul_round_sat x y) - (inst_data (InstructionData.Binary (Opcode.SqmulRoundSat) (value_array_2 x y))) -) - -(decl udiv (Value Value) Inst) -(extractor - (udiv x y) - (inst_data (InstructionData.Binary (Opcode.Udiv) (value_array_2 x y))) -) - -(decl sdiv (Value Value) Inst) -(extractor - (sdiv x y) - (inst_data (InstructionData.Binary (Opcode.Sdiv) (value_array_2 x y))) -) - -(decl urem (Value Value) Inst) -(extractor - (urem x y) - (inst_data (InstructionData.Binary (Opcode.Urem) (value_array_2 x y))) -) - -(decl srem (Value Value) Inst) -(extractor - (srem x y) - (inst_data (InstructionData.Binary (Opcode.Srem) (value_array_2 x y))) -) - -(decl iadd_imm (Value Imm64) Inst) -(extractor - (iadd_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.IaddImm) x Y)) -) - -(decl imul_imm (Value Imm64) Inst) -(extractor - (imul_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.ImulImm) x Y)) -) - -(decl udiv_imm (Value Imm64) Inst) -(extractor - (udiv_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.UdivImm) x Y)) -) - -(decl sdiv_imm (Value Imm64) Inst) -(extractor - (sdiv_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.SdivImm) x Y)) -) - -(decl urem_imm (Value Imm64) Inst) -(extractor - (urem_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.UremImm) x Y)) -) - -(decl srem_imm (Value Imm64) Inst) -(extractor - (srem_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.SremImm) x Y)) -) - -(decl irsub_imm (Value Imm64) Inst) -(extractor - (irsub_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.IrsubImm) x Y)) -) - -(decl iadd_cin (Value Value Value) Inst) -(extractor - (iadd_cin x y c_in) - (inst_data (InstructionData.Ternary (Opcode.IaddCin) (value_array_3 x y c_in))) -) - -(decl iadd_ifcin (Value Value Value) Inst) -(extractor - (iadd_ifcin x y c_in) - (inst_data (InstructionData.Ternary (Opcode.IaddIfcin) (value_array_3 x y c_in))) -) - -(decl iadd_cout (Value Value) Inst) -(extractor - (iadd_cout x y) - (inst_data (InstructionData.Binary (Opcode.IaddCout) (value_array_2 x y))) -) - -(decl iadd_ifcout (Value Value) Inst) -(extractor - (iadd_ifcout x y) - (inst_data (InstructionData.Binary (Opcode.IaddIfcout) (value_array_2 x y))) -) - -(decl iadd_carry (Value Value Value) Inst) -(extractor - (iadd_carry x y c_in) - (inst_data (InstructionData.Ternary (Opcode.IaddCarry) (value_array_3 x y c_in))) -) - -(decl iadd_ifcarry (Value Value Value) Inst) -(extractor - (iadd_ifcarry x y c_in) - (inst_data (InstructionData.Ternary (Opcode.IaddIfcarry) (value_array_3 x y c_in))) -) - -(decl isub_bin (Value Value Value) Inst) -(extractor - (isub_bin x y b_in) - (inst_data (InstructionData.Ternary (Opcode.IsubBin) (value_array_3 x y b_in))) -) - -(decl isub_ifbin (Value Value Value) Inst) -(extractor - (isub_ifbin x y b_in) - (inst_data (InstructionData.Ternary (Opcode.IsubIfbin) (value_array_3 x y b_in))) -) - -(decl isub_bout (Value Value) Inst) -(extractor - (isub_bout x y) - (inst_data (InstructionData.Binary (Opcode.IsubBout) (value_array_2 x y))) -) - -(decl isub_ifbout (Value Value) Inst) -(extractor - (isub_ifbout x y) - (inst_data (InstructionData.Binary (Opcode.IsubIfbout) (value_array_2 x y))) -) - -(decl isub_borrow (Value Value Value) Inst) -(extractor - (isub_borrow x y b_in) - (inst_data (InstructionData.Ternary (Opcode.IsubBorrow) (value_array_3 x y b_in))) -) - -(decl isub_ifborrow (Value Value Value) Inst) -(extractor - (isub_ifborrow x y b_in) - (inst_data (InstructionData.Ternary (Opcode.IsubIfborrow) (value_array_3 x y b_in))) -) - -(decl band (Value Value) Inst) -(extractor - (band x y) - (inst_data (InstructionData.Binary (Opcode.Band) (value_array_2 x y))) -) - -(decl bor (Value Value) Inst) -(extractor - (bor x y) - (inst_data (InstructionData.Binary (Opcode.Bor) (value_array_2 x y))) -) - -(decl bxor (Value Value) Inst) -(extractor - (bxor x y) - (inst_data (InstructionData.Binary (Opcode.Bxor) (value_array_2 x y))) -) - -(decl bnot (Value) Inst) -(extractor - (bnot x) - (inst_data (InstructionData.Unary (Opcode.Bnot) x)) -) - -(decl band_not (Value Value) Inst) -(extractor - (band_not x y) - (inst_data (InstructionData.Binary (Opcode.BandNot) (value_array_2 x y))) -) - -(decl bor_not (Value Value) Inst) -(extractor - (bor_not x y) - (inst_data (InstructionData.Binary (Opcode.BorNot) (value_array_2 x y))) -) - -(decl bxor_not (Value Value) Inst) -(extractor - (bxor_not x y) - (inst_data (InstructionData.Binary (Opcode.BxorNot) (value_array_2 x y))) -) - -(decl band_imm (Value Imm64) Inst) -(extractor - (band_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.BandImm) x Y)) -) - -(decl bor_imm (Value Imm64) Inst) -(extractor - (bor_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.BorImm) x Y)) -) - -(decl bxor_imm (Value Imm64) Inst) -(extractor - (bxor_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.BxorImm) x Y)) -) - -(decl rotl (Value Value) Inst) -(extractor - (rotl x y) - (inst_data (InstructionData.Binary (Opcode.Rotl) (value_array_2 x y))) -) - -(decl rotr (Value Value) Inst) -(extractor - (rotr x y) - (inst_data (InstructionData.Binary (Opcode.Rotr) (value_array_2 x y))) -) - -(decl rotl_imm (Value Imm64) Inst) -(extractor - (rotl_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.RotlImm) x Y)) -) - -(decl rotr_imm (Value Imm64) Inst) -(extractor - (rotr_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.RotrImm) x Y)) -) - -(decl ishl (Value Value) Inst) -(extractor - (ishl x y) - (inst_data (InstructionData.Binary (Opcode.Ishl) (value_array_2 x y))) -) - -(decl ushr (Value Value) Inst) -(extractor - (ushr x y) - (inst_data (InstructionData.Binary (Opcode.Ushr) (value_array_2 x y))) -) - -(decl sshr (Value Value) Inst) -(extractor - (sshr x y) - (inst_data (InstructionData.Binary (Opcode.Sshr) (value_array_2 x y))) -) - -(decl ishl_imm (Value Imm64) Inst) -(extractor - (ishl_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.IshlImm) x Y)) -) - -(decl ushr_imm (Value Imm64) Inst) -(extractor - (ushr_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.UshrImm) x Y)) -) - -(decl sshr_imm (Value Imm64) Inst) -(extractor - (sshr_imm x Y) - (inst_data (InstructionData.BinaryImm64 (Opcode.SshrImm) x Y)) -) - -(decl bitrev (Value) Inst) -(extractor - (bitrev x) - (inst_data (InstructionData.Unary (Opcode.Bitrev) x)) -) - -(decl clz (Value) Inst) -(extractor - (clz x) - (inst_data (InstructionData.Unary (Opcode.Clz) x)) -) - -(decl cls (Value) Inst) -(extractor - (cls x) - (inst_data (InstructionData.Unary (Opcode.Cls) x)) -) - -(decl ctz (Value) Inst) -(extractor - (ctz x) - (inst_data (InstructionData.Unary (Opcode.Ctz) x)) -) - -(decl popcnt (Value) Inst) -(extractor - (popcnt x) - (inst_data (InstructionData.Unary (Opcode.Popcnt) x)) -) - -(decl fcmp (FloatCC Value Value) Inst) -(extractor - (fcmp Cond x y) - (inst_data (InstructionData.FloatCompare (Opcode.Fcmp) (value_array_2 x y) Cond)) -) - -(decl ffcmp (Value Value) Inst) -(extractor - (ffcmp x y) - (inst_data (InstructionData.Binary (Opcode.Ffcmp) (value_array_2 x y))) -) - -(decl fadd (Value Value) Inst) -(extractor - (fadd x y) - (inst_data (InstructionData.Binary (Opcode.Fadd) (value_array_2 x y))) -) - -(decl fsub (Value Value) Inst) -(extractor - (fsub x y) - (inst_data (InstructionData.Binary (Opcode.Fsub) (value_array_2 x y))) -) - -(decl fmul (Value Value) Inst) -(extractor - (fmul x y) - (inst_data (InstructionData.Binary (Opcode.Fmul) (value_array_2 x y))) -) - -(decl fdiv (Value Value) Inst) -(extractor - (fdiv x y) - (inst_data (InstructionData.Binary (Opcode.Fdiv) (value_array_2 x y))) -) - -(decl sqrt (Value) Inst) -(extractor - (sqrt x) - (inst_data (InstructionData.Unary (Opcode.Sqrt) x)) -) - -(decl fma (Value Value Value) Inst) -(extractor - (fma x y z) - (inst_data (InstructionData.Ternary (Opcode.Fma) (value_array_3 x y z))) -) - -(decl fneg (Value) Inst) -(extractor - (fneg x) - (inst_data (InstructionData.Unary (Opcode.Fneg) x)) -) - -(decl fabs (Value) Inst) -(extractor - (fabs x) - (inst_data (InstructionData.Unary (Opcode.Fabs) x)) -) - -(decl fcopysign (Value Value) Inst) -(extractor - (fcopysign x y) - (inst_data (InstructionData.Binary (Opcode.Fcopysign) (value_array_2 x y))) -) - -(decl fmin (Value Value) Inst) -(extractor - (fmin x y) - (inst_data (InstructionData.Binary (Opcode.Fmin) (value_array_2 x y))) -) - -(decl fmin_pseudo (Value Value) Inst) -(extractor - (fmin_pseudo x y) - (inst_data (InstructionData.Binary (Opcode.FminPseudo) (value_array_2 x y))) -) - -(decl fmax (Value Value) Inst) -(extractor - (fmax x y) - (inst_data (InstructionData.Binary (Opcode.Fmax) (value_array_2 x y))) -) - -(decl fmax_pseudo (Value Value) Inst) -(extractor - (fmax_pseudo x y) - (inst_data (InstructionData.Binary (Opcode.FmaxPseudo) (value_array_2 x y))) -) - -(decl ceil (Value) Inst) -(extractor - (ceil x) - (inst_data (InstructionData.Unary (Opcode.Ceil) x)) -) - -(decl floor (Value) Inst) -(extractor - (floor x) - (inst_data (InstructionData.Unary (Opcode.Floor) x)) -) - -(decl trunc (Value) Inst) -(extractor - (trunc x) - (inst_data (InstructionData.Unary (Opcode.Trunc) x)) -) - -(decl nearest (Value) Inst) -(extractor - (nearest x) - (inst_data (InstructionData.Unary (Opcode.Nearest) x)) -) - -(decl is_null (Value) Inst) -(extractor - (is_null x) - (inst_data (InstructionData.Unary (Opcode.IsNull) x)) -) - -(decl is_invalid (Value) Inst) -(extractor - (is_invalid x) - (inst_data (InstructionData.Unary (Opcode.IsInvalid) x)) -) - -(decl trueif (IntCC Value) Inst) -(extractor - (trueif Cond f) - (inst_data (InstructionData.IntCond (Opcode.Trueif) f Cond)) -) - -(decl trueff (FloatCC Value) Inst) -(extractor - (trueff Cond f) - (inst_data (InstructionData.FloatCond (Opcode.Trueff) f Cond)) -) - -(decl bitcast (Value) Inst) -(extractor - (bitcast x) - (inst_data (InstructionData.Unary (Opcode.Bitcast) x)) -) - -(decl raw_bitcast (Value) Inst) -(extractor - (raw_bitcast x) - (inst_data (InstructionData.Unary (Opcode.RawBitcast) x)) -) - -(decl scalar_to_vector (Value) Inst) -(extractor - (scalar_to_vector s) - (inst_data (InstructionData.Unary (Opcode.ScalarToVector) s)) -) - -(decl breduce (Value) Inst) -(extractor - (breduce x) - (inst_data (InstructionData.Unary (Opcode.Breduce) x)) -) - -(decl bextend (Value) Inst) -(extractor - (bextend x) - (inst_data (InstructionData.Unary (Opcode.Bextend) x)) -) - -(decl bint (Value) Inst) -(extractor - (bint x) - (inst_data (InstructionData.Unary (Opcode.Bint) x)) -) - -(decl bmask (Value) Inst) -(extractor - (bmask x) - (inst_data (InstructionData.Unary (Opcode.Bmask) x)) -) - -(decl ireduce (Value) Inst) -(extractor - (ireduce x) - (inst_data (InstructionData.Unary (Opcode.Ireduce) x)) -) - -(decl snarrow (Value Value) Inst) -(extractor - (snarrow x y) - (inst_data (InstructionData.Binary (Opcode.Snarrow) (value_array_2 x y))) -) - -(decl unarrow (Value Value) Inst) -(extractor - (unarrow x y) - (inst_data (InstructionData.Binary (Opcode.Unarrow) (value_array_2 x y))) -) - -(decl uunarrow (Value Value) Inst) -(extractor - (uunarrow x y) - (inst_data (InstructionData.Binary (Opcode.Uunarrow) (value_array_2 x y))) -) - -(decl swiden_low (Value) Inst) -(extractor - (swiden_low x) - (inst_data (InstructionData.Unary (Opcode.SwidenLow) x)) -) - -(decl swiden_high (Value) Inst) -(extractor - (swiden_high x) - (inst_data (InstructionData.Unary (Opcode.SwidenHigh) x)) -) - -(decl uwiden_low (Value) Inst) -(extractor - (uwiden_low x) - (inst_data (InstructionData.Unary (Opcode.UwidenLow) x)) -) - -(decl uwiden_high (Value) Inst) -(extractor - (uwiden_high x) - (inst_data (InstructionData.Unary (Opcode.UwidenHigh) x)) -) - -(decl iadd_pairwise (Value Value) Inst) -(extractor - (iadd_pairwise x y) - (inst_data (InstructionData.Binary (Opcode.IaddPairwise) (value_array_2 x y))) -) - -(decl widening_pairwise_dot_product_s (Value Value) Inst) -(extractor - (widening_pairwise_dot_product_s x y) - (inst_data (InstructionData.Binary (Opcode.WideningPairwiseDotProductS) (value_array_2 x y))) -) - -(decl uextend (Value) Inst) -(extractor - (uextend x) - (inst_data (InstructionData.Unary (Opcode.Uextend) x)) -) - -(decl sextend (Value) Inst) -(extractor - (sextend x) - (inst_data (InstructionData.Unary (Opcode.Sextend) x)) -) - -(decl fpromote (Value) Inst) -(extractor - (fpromote x) - (inst_data (InstructionData.Unary (Opcode.Fpromote) x)) -) - -(decl fdemote (Value) Inst) -(extractor - (fdemote x) - (inst_data (InstructionData.Unary (Opcode.Fdemote) x)) -) - -(decl fvdemote (Value) Inst) -(extractor - (fvdemote x) - (inst_data (InstructionData.Unary (Opcode.Fvdemote) x)) -) - -(decl fvpromote_low (Value) Inst) -(extractor - (fvpromote_low a) - (inst_data (InstructionData.Unary (Opcode.FvpromoteLow) a)) -) - -(decl fcvt_to_uint (Value) Inst) -(extractor - (fcvt_to_uint x) - (inst_data (InstructionData.Unary (Opcode.FcvtToUint) x)) -) - -(decl fcvt_to_uint_sat (Value) Inst) -(extractor - (fcvt_to_uint_sat x) - (inst_data (InstructionData.Unary (Opcode.FcvtToUintSat) x)) -) - -(decl fcvt_to_sint (Value) Inst) -(extractor - (fcvt_to_sint x) - (inst_data (InstructionData.Unary (Opcode.FcvtToSint) x)) -) - -(decl fcvt_to_sint_sat (Value) Inst) -(extractor - (fcvt_to_sint_sat x) - (inst_data (InstructionData.Unary (Opcode.FcvtToSintSat) x)) -) - -(decl fcvt_from_uint (Value) Inst) -(extractor - (fcvt_from_uint x) - (inst_data (InstructionData.Unary (Opcode.FcvtFromUint) x)) -) - -(decl fcvt_from_sint (Value) Inst) -(extractor - (fcvt_from_sint x) - (inst_data (InstructionData.Unary (Opcode.FcvtFromSint) x)) -) - -(decl fcvt_low_from_sint (Value) Inst) -(extractor - (fcvt_low_from_sint x) - (inst_data (InstructionData.Unary (Opcode.FcvtLowFromSint) x)) -) - -(decl isplit (Value) Inst) -(extractor - (isplit x) - (inst_data (InstructionData.Unary (Opcode.Isplit) x)) -) - -(decl iconcat (Value Value) Inst) -(extractor - (iconcat lo hi) - (inst_data (InstructionData.Binary (Opcode.Iconcat) (value_array_2 lo hi))) -) - -(decl atomic_rmw (MemFlags AtomicRmwOp Value Value) Inst) -(extractor - (atomic_rmw MemFlags AtomicRmwOp p x) - (inst_data (InstructionData.AtomicRmw (Opcode.AtomicRmw) (value_array_2 p x) MemFlags AtomicRmwOp)) -) - -(decl atomic_cas (MemFlags Value Value Value) Inst) -(extractor - (atomic_cas MemFlags p e x) - (inst_data (InstructionData.AtomicCas (Opcode.AtomicCas) (value_array_3 p e x) MemFlags)) -) - -(decl atomic_load (MemFlags Value) Inst) -(extractor - (atomic_load MemFlags p) - (inst_data (InstructionData.LoadNoOffset (Opcode.AtomicLoad) p MemFlags)) -) - -(decl atomic_store (MemFlags Value Value) Inst) -(extractor - (atomic_store MemFlags x p) - (inst_data (InstructionData.StoreNoOffset (Opcode.AtomicStore) (value_array_2 x p) MemFlags)) -) - -(decl fence () Inst) -(extractor - (fence ) - (inst_data (InstructionData.NullAry (Opcode.Fence))) -) - diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/constant_hash.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/constant_hash.rs deleted file mode 100644 index 1de2a2edb..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/constant_hash.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! Runtime support for precomputed constant hash tables. -//! -//! The shared module with the same name can generate constant hash tables using open addressing -//! and quadratic probing. -//! -//! The hash tables are arrays that are guaranteed to: -//! -//! - Have a power-of-two size. -//! - Contain at least one empty slot. -//! -//! This module provides runtime support for lookups in these tables. - -// Re-export entities from constant_hash for simplicity of use. -pub use cranelift_codegen_shared::constant_hash::*; - -/// Trait that must be implemented by the entries in a constant hash table. -pub trait Table { - /// Get the number of entries in this table which must be a power of two. - fn len(&self) -> usize; - - /// Get the key corresponding to the entry at `idx`, or `None` if the entry is empty. - /// The `idx` must be in range. - fn key(&self, idx: usize) -> Option; -} - -/// Look for `key` in `table`. -/// -/// The provided `hash` value must have been computed from `key` using the same hash function that -/// was used to construct the table. -/// -/// Returns `Ok(idx)` with the table index containing the found entry, or `Err(idx)` with the empty -/// sentinel entry if no entry could be found. -pub fn probe + ?Sized>( - table: &T, - key: K, - hash: usize, -) -> Result { - debug_assert!(table.len().is_power_of_two()); - let mask = table.len() - 1; - - let mut idx = hash; - let mut step = 0; - - loop { - idx &= mask; - - match table.key(idx) { - None => return Err(idx), - Some(k) if k == key => return Ok(idx), - _ => {} - } - - // Quadratic probing. - step += 1; - - // When `table.len()` is a power of two, it can be proven that `idx` will visit all - // entries. This means that this loop will always terminate if the hash table has even - // one unused entry. - debug_assert!(step < table.len()); - idx += step; - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/context.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/context.rs deleted file mode 100644 index 91781d189..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/context.rs +++ /dev/null @@ -1,352 +0,0 @@ -//! Cranelift compilation context and main entry point. -//! -//! When compiling many small functions, it is important to avoid repeatedly allocating and -//! deallocating the data structures needed for compilation. The `Context` struct is used to hold -//! on to memory allocations between function compilations. -//! -//! The context does not hold a `TargetIsa` instance which has to be provided as an argument -//! instead. This is because an ISA instance is immutable and can be used by multiple compilation -//! contexts concurrently. Typically, you would have one context per compilation thread and only a -//! single ISA instance. - -use crate::binemit::CodeInfo; -use crate::dce::do_dce; -use crate::dominator_tree::DominatorTree; -use crate::flowgraph::ControlFlowGraph; -use crate::ir::Function; -use crate::isa::TargetIsa; -use crate::legalizer::simple_legalize; -use crate::licm::do_licm; -use crate::loop_analysis::LoopAnalysis; -use crate::machinst::MachCompileResult; -use crate::nan_canonicalization::do_nan_canonicalization; -use crate::remove_constant_phis::do_remove_constant_phis; -use crate::result::CodegenResult; -use crate::settings::{FlagsOrIsa, OptLevel}; -use crate::simple_gvn::do_simple_gvn; -use crate::simple_preopt::do_preopt; -use crate::timing; -use crate::unreachable_code::eliminate_unreachable_code; -use crate::verifier::{verify_context, VerifierErrors, VerifierResult}; -#[cfg(feature = "souper-harvest")] -use alloc::string::String; -use alloc::vec::Vec; - -#[cfg(feature = "souper-harvest")] -use crate::souper_harvest::do_souper_harvest; - -/// Persistent data structures and compilation pipeline. -pub struct Context { - /// The function we're compiling. - pub func: Function, - - /// The control flow graph of `func`. - pub cfg: ControlFlowGraph, - - /// Dominator tree for `func`. - pub domtree: DominatorTree, - - /// Loop analysis of `func`. - pub loop_analysis: LoopAnalysis, - - /// Result of MachBackend compilation, if computed. - pub mach_compile_result: Option, - - /// Flag: do we want a disassembly with the MachCompileResult? - pub want_disasm: bool, -} - -impl Context { - /// Allocate a new compilation context. - /// - /// The returned instance should be reused for compiling multiple functions in order to avoid - /// needless allocator thrashing. - pub fn new() -> Self { - Self::for_function(Function::new()) - } - - /// Allocate a new compilation context with an existing Function. - /// - /// The returned instance should be reused for compiling multiple functions in order to avoid - /// needless allocator thrashing. - pub fn for_function(func: Function) -> Self { - Self { - func, - cfg: ControlFlowGraph::new(), - domtree: DominatorTree::new(), - loop_analysis: LoopAnalysis::new(), - mach_compile_result: None, - want_disasm: false, - } - } - - /// Clear all data structures in this context. - pub fn clear(&mut self) { - self.func.clear(); - self.cfg.clear(); - self.domtree.clear(); - self.loop_analysis.clear(); - self.mach_compile_result = None; - self.want_disasm = false; - } - - /// Set the flag to request a disassembly when compiling with a - /// `MachBackend` backend. - pub fn set_disasm(&mut self, val: bool) { - self.want_disasm = val; - } - - /// Compile the function, and emit machine code into a `Vec`. - /// - /// Run the function through all the passes necessary to generate code for the target ISA - /// represented by `isa`, as well as the final step of emitting machine code into a - /// `Vec`. The machine code is not relocated. Instead, any relocations are emitted - /// into `relocs`. - /// - /// This function calls `compile` and `emit_to_memory`, taking care to resize `mem` as - /// needed, so it provides a safe interface. - /// - /// Returns information about the function's code and read-only data. - pub fn compile_and_emit( - &mut self, - isa: &dyn TargetIsa, - mem: &mut Vec, - ) -> CodegenResult<()> { - let info = self.compile(isa)?; - let old_len = mem.len(); - mem.resize(old_len + info.total_size as usize, 0); - let new_info = unsafe { self.emit_to_memory(mem.as_mut_ptr().add(old_len)) }; - debug_assert!(new_info == info); - Ok(()) - } - - /// Compile the function. - /// - /// Run the function through all the passes necessary to generate code for the target ISA - /// represented by `isa`. This does not include the final step of emitting machine code into a - /// code sink. - /// - /// Returns information about the function's code and read-only data. - pub fn compile(&mut self, isa: &dyn TargetIsa) -> CodegenResult { - let _tt = timing::compile(); - self.verify_if(isa)?; - - let opt_level = isa.flags().opt_level(); - log::debug!( - "Compiling (opt level {:?}):\n{}", - opt_level, - self.func.display() - ); - - self.compute_cfg(); - if opt_level != OptLevel::None { - self.preopt(isa)?; - } - if isa.flags().enable_nan_canonicalization() { - self.canonicalize_nans(isa)?; - } - - self.legalize(isa)?; - if opt_level != OptLevel::None { - self.compute_domtree(); - self.compute_loop_analysis(); - self.licm(isa)?; - self.simple_gvn(isa)?; - } - - self.compute_domtree(); - self.eliminate_unreachable_code(isa)?; - if opt_level != OptLevel::None { - self.dce(isa)?; - } - - self.remove_constant_phis(isa)?; - - let result = isa.compile_function(&self.func, self.want_disasm)?; - let info = result.code_info(); - self.mach_compile_result = Some(result); - Ok(info) - } - - /// Emit machine code directly into raw memory. - /// - /// Write all of the function's machine code to the memory at `mem`. The size of the machine - /// code is returned by `compile` above. - /// - /// The machine code is not relocated. Instead, any relocations are emitted into `relocs`. - /// - /// # Safety - /// - /// This function is unsafe since it does not perform bounds checking on the memory buffer, - /// and it can't guarantee that the `mem` pointer is valid. - /// - /// Returns information about the emitted code and data. - #[deny(unsafe_op_in_unsafe_fn)] - pub unsafe fn emit_to_memory(&self, mem: *mut u8) -> CodeInfo { - let _tt = timing::binemit(); - let result = self - .mach_compile_result - .as_ref() - .expect("only using mach backend now"); - let info = result.code_info(); - - let mem = unsafe { std::slice::from_raw_parts_mut(mem, info.total_size as usize) }; - mem.copy_from_slice(result.buffer.data()); - - info - } - - /// If available, return information about the code layout in the - /// final machine code: the offsets (in bytes) of each basic-block - /// start, and all basic-block edges. - pub fn get_code_bb_layout(&self) -> Option<(Vec, Vec<(usize, usize)>)> { - if let Some(result) = self.mach_compile_result.as_ref() { - Some(( - result.bb_starts.iter().map(|&off| off as usize).collect(), - result - .bb_edges - .iter() - .map(|&(from, to)| (from as usize, to as usize)) - .collect(), - )) - } else { - None - } - } - - /// Creates unwind information for the function. - /// - /// Returns `None` if the function has no unwind information. - #[cfg(feature = "unwind")] - pub fn create_unwind_info( - &self, - isa: &dyn TargetIsa, - ) -> CodegenResult> { - let unwind_info_kind = isa.unwind_info_kind(); - let result = self.mach_compile_result.as_ref().unwrap(); - isa.emit_unwind_info(result, unwind_info_kind) - } - - /// Run the verifier on the function. - /// - /// Also check that the dominator tree and control flow graph are consistent with the function. - pub fn verify<'a, FOI: Into>>(&self, fisa: FOI) -> VerifierResult<()> { - let mut errors = VerifierErrors::default(); - let _ = verify_context(&self.func, &self.cfg, &self.domtree, fisa, &mut errors); - - if errors.is_empty() { - Ok(()) - } else { - Err(errors) - } - } - - /// Run the verifier only if the `enable_verifier` setting is true. - pub fn verify_if<'a, FOI: Into>>(&self, fisa: FOI) -> CodegenResult<()> { - let fisa = fisa.into(); - if fisa.flags.enable_verifier() { - self.verify(fisa)?; - } - Ok(()) - } - - /// Perform dead-code elimination on the function. - pub fn dce<'a, FOI: Into>>(&mut self, fisa: FOI) -> CodegenResult<()> { - do_dce(&mut self.func, &mut self.domtree); - self.verify_if(fisa)?; - Ok(()) - } - - /// Perform constant-phi removal on the function. - pub fn remove_constant_phis<'a, FOI: Into>>( - &mut self, - fisa: FOI, - ) -> CodegenResult<()> { - do_remove_constant_phis(&mut self.func, &mut self.domtree); - self.verify_if(fisa)?; - Ok(()) - } - - /// Perform pre-legalization rewrites on the function. - pub fn preopt(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> { - do_preopt(&mut self.func, &mut self.cfg, isa); - self.verify_if(isa)?; - Ok(()) - } - - /// Perform NaN canonicalizing rewrites on the function. - pub fn canonicalize_nans(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> { - do_nan_canonicalization(&mut self.func); - self.verify_if(isa) - } - - /// Run the legalizer for `isa` on the function. - pub fn legalize(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> { - // Legalization invalidates the domtree and loop_analysis by mutating the CFG. - // TODO: Avoid doing this when legalization doesn't actually mutate the CFG. - self.domtree.clear(); - self.loop_analysis.clear(); - - // Run some specific legalizations only. - simple_legalize(&mut self.func, &mut self.cfg, isa); - self.verify_if(isa) - } - - /// Compute the control flow graph. - pub fn compute_cfg(&mut self) { - self.cfg.compute(&self.func) - } - - /// Compute dominator tree. - pub fn compute_domtree(&mut self) { - self.domtree.compute(&self.func, &self.cfg) - } - - /// Compute the loop analysis. - pub fn compute_loop_analysis(&mut self) { - self.loop_analysis - .compute(&self.func, &self.cfg, &self.domtree) - } - - /// Compute the control flow graph and dominator tree. - pub fn flowgraph(&mut self) { - self.compute_cfg(); - self.compute_domtree() - } - - /// Perform simple GVN on the function. - pub fn simple_gvn<'a, FOI: Into>>(&mut self, fisa: FOI) -> CodegenResult<()> { - do_simple_gvn(&mut self.func, &mut self.domtree); - self.verify_if(fisa) - } - - /// Perform LICM on the function. - pub fn licm(&mut self, isa: &dyn TargetIsa) -> CodegenResult<()> { - do_licm( - &mut self.func, - &mut self.cfg, - &mut self.domtree, - &mut self.loop_analysis, - ); - self.verify_if(isa) - } - - /// Perform unreachable code elimination. - pub fn eliminate_unreachable_code<'a, FOI>(&mut self, fisa: FOI) -> CodegenResult<()> - where - FOI: Into>, - { - eliminate_unreachable_code(&mut self.func, &mut self.cfg, &self.domtree); - self.verify_if(fisa) - } - - /// Harvest candidate left-hand sides for superoptimization with Souper. - #[cfg(feature = "souper-harvest")] - pub fn souper_harvest( - &mut self, - out: &mut std::sync::mpsc::Sender, - ) -> CodegenResult<()> { - do_souper_harvest(&self.func, out); - Ok(()) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cursor.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cursor.rs deleted file mode 100644 index 2dc8ce7a2..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/cursor.rs +++ /dev/null @@ -1,665 +0,0 @@ -//! Cursor library. -//! -//! This module defines cursor data types that can be used for inserting instructions. - -use crate::ir; - -/// The possible positions of a cursor. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum CursorPosition { - /// Cursor is not pointing anywhere. No instructions can be inserted. - Nowhere, - /// Cursor is pointing at an existing instruction. - /// New instructions will be inserted *before* the current instruction. - At(ir::Inst), - /// Cursor is before the beginning of a block. No instructions can be inserted. Calling - /// `next_inst()` will move to the first instruction in the block. - Before(ir::Block), - /// Cursor is pointing after the end of a block. - /// New instructions will be appended to the block. - After(ir::Block), -} - -/// All cursor types implement the `Cursor` which provides common navigation operations. -pub trait Cursor { - /// Get the current cursor position. - fn position(&self) -> CursorPosition; - - /// Set the current position. - fn set_position(&mut self, pos: CursorPosition); - - /// Get the source location that should be assigned to new instructions. - fn srcloc(&self) -> ir::SourceLoc; - - /// Set the source location that should be assigned to new instructions. - fn set_srcloc(&mut self, srcloc: ir::SourceLoc); - - /// Borrow a reference to the function layout that this cursor is navigating. - fn layout(&self) -> &ir::Layout; - - /// Borrow a mutable reference to the function layout that this cursor is navigating. - fn layout_mut(&mut self) -> &mut ir::Layout; - - /// Exchange this cursor for one with a set source location. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, SourceLoc}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, srcloc: SourceLoc) { - /// let mut pos = FuncCursor::new(func).with_srcloc(srcloc); - /// - /// // Use `pos`... - /// } - /// ``` - fn with_srcloc(mut self, srcloc: ir::SourceLoc) -> Self - where - Self: Sized, - { - self.set_srcloc(srcloc); - self - } - - /// Rebuild this cursor positioned at `pos`. - fn at_position(mut self, pos: CursorPosition) -> Self - where - Self: Sized, - { - self.set_position(pos); - self - } - - /// Rebuild this cursor positioned at `inst`. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, inst: Inst) { - /// let mut pos = FuncCursor::new(func).at_inst(inst); - /// - /// // Use `pos`... - /// } - /// ``` - fn at_inst(mut self, inst: ir::Inst) -> Self - where - Self: Sized, - { - self.goto_inst(inst); - self - } - - /// Rebuild this cursor positioned at the first insertion point for `block`. - /// This differs from `at_first_inst` in that it doesn't assume that any - /// instructions have been inserted into `block` yet. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, block: Block) { - /// let mut pos = FuncCursor::new(func).at_first_insertion_point(block); - /// - /// // Use `pos`... - /// } - /// ``` - fn at_first_insertion_point(mut self, block: ir::Block) -> Self - where - Self: Sized, - { - self.goto_first_insertion_point(block); - self - } - - /// Rebuild this cursor positioned at the first instruction in `block`. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, block: Block) { - /// let mut pos = FuncCursor::new(func).at_first_inst(block); - /// - /// // Use `pos`... - /// } - /// ``` - fn at_first_inst(mut self, block: ir::Block) -> Self - where - Self: Sized, - { - self.goto_first_inst(block); - self - } - - /// Rebuild this cursor positioned at the last instruction in `block`. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, block: Block) { - /// let mut pos = FuncCursor::new(func).at_last_inst(block); - /// - /// // Use `pos`... - /// } - /// ``` - fn at_last_inst(mut self, block: ir::Block) -> Self - where - Self: Sized, - { - self.goto_last_inst(block); - self - } - - /// Rebuild this cursor positioned after `inst`. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, inst: Inst) { - /// let mut pos = FuncCursor::new(func).after_inst(inst); - /// - /// // Use `pos`... - /// } - /// ``` - fn after_inst(mut self, inst: ir::Inst) -> Self - where - Self: Sized, - { - self.goto_after_inst(inst); - self - } - - /// Rebuild this cursor positioned at the top of `block`. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, block: Block) { - /// let mut pos = FuncCursor::new(func).at_top(block); - /// - /// // Use `pos`... - /// } - /// ``` - fn at_top(mut self, block: ir::Block) -> Self - where - Self: Sized, - { - self.goto_top(block); - self - } - - /// Rebuild this cursor positioned at the bottom of `block`. - /// - /// This is intended to be used as a builder method: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block, Inst}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function, block: Block) { - /// let mut pos = FuncCursor::new(func).at_bottom(block); - /// - /// // Use `pos`... - /// } - /// ``` - fn at_bottom(mut self, block: ir::Block) -> Self - where - Self: Sized, - { - self.goto_bottom(block); - self - } - - /// Get the block corresponding to the current position. - fn current_block(&self) -> Option { - use self::CursorPosition::*; - match self.position() { - Nowhere => None, - At(inst) => self.layout().inst_block(inst), - Before(block) | After(block) => Some(block), - } - } - - /// Get the instruction corresponding to the current position, if any. - fn current_inst(&self) -> Option { - use self::CursorPosition::*; - match self.position() { - At(inst) => Some(inst), - _ => None, - } - } - - /// Go to the position after a specific instruction, which must be inserted - /// in the layout. New instructions will be inserted after `inst`. - fn goto_after_inst(&mut self, inst: ir::Inst) { - debug_assert!(self.layout().inst_block(inst).is_some()); - let new_pos = if let Some(next) = self.layout().next_inst(inst) { - CursorPosition::At(next) - } else { - CursorPosition::After( - self.layout() - .inst_block(inst) - .expect("current instruction removed?"), - ) - }; - self.set_position(new_pos); - } - - /// Go to a specific instruction which must be inserted in the layout. - /// New instructions will be inserted before `inst`. - fn goto_inst(&mut self, inst: ir::Inst) { - debug_assert!(self.layout().inst_block(inst).is_some()); - self.set_position(CursorPosition::At(inst)); - } - - /// Go to the position for inserting instructions at the beginning of `block`, - /// which unlike `goto_first_inst` doesn't assume that any instructions have - /// been inserted into `block` yet. - fn goto_first_insertion_point(&mut self, block: ir::Block) { - if let Some(inst) = self.layout().first_inst(block) { - self.goto_inst(inst); - } else { - self.goto_bottom(block); - } - } - - /// Go to the first instruction in `block`. - fn goto_first_inst(&mut self, block: ir::Block) { - let inst = self.layout().first_inst(block).expect("Empty block"); - self.goto_inst(inst); - } - - /// Go to the last instruction in `block`. - fn goto_last_inst(&mut self, block: ir::Block) { - let inst = self.layout().last_inst(block).expect("Empty block"); - self.goto_inst(inst); - } - - /// Go to the top of `block` which must be inserted into the layout. - /// At this position, instructions cannot be inserted, but `next_inst()` will move to the first - /// instruction in `block`. - fn goto_top(&mut self, block: ir::Block) { - debug_assert!(self.layout().is_block_inserted(block)); - self.set_position(CursorPosition::Before(block)); - } - - /// Go to the bottom of `block` which must be inserted into the layout. - /// At this position, inserted instructions will be appended to `block`. - fn goto_bottom(&mut self, block: ir::Block) { - debug_assert!(self.layout().is_block_inserted(block)); - self.set_position(CursorPosition::After(block)); - } - - /// Go to the top of the next block in layout order and return it. - /// - /// - If the cursor wasn't pointing at anything, go to the top of the first block in the - /// function. - /// - If there are no more blocks, leave the cursor pointing at nothing and return `None`. - /// - /// # Examples - /// - /// The `next_block()` method is intended for iterating over the blocks in layout order: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function) { - /// let mut cursor = FuncCursor::new(func); - /// while let Some(block) = cursor.next_block() { - /// // Edit block. - /// } - /// } - /// ``` - fn next_block(&mut self) -> Option { - let next = if let Some(block) = self.current_block() { - self.layout().next_block(block) - } else { - self.layout().entry_block() - }; - self.set_position(match next { - Some(block) => CursorPosition::Before(block), - None => CursorPosition::Nowhere, - }); - next - } - - /// Go to the bottom of the previous block in layout order and return it. - /// - /// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the - /// function. - /// - If there are no more blocks, leave the cursor pointing at nothing and return `None`. - /// - /// # Examples - /// - /// The `prev_block()` method is intended for iterating over the blocks in backwards layout order: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function) { - /// let mut cursor = FuncCursor::new(func); - /// while let Some(block) = cursor.prev_block() { - /// // Edit block. - /// } - /// } - /// ``` - fn prev_block(&mut self) -> Option { - let prev = if let Some(block) = self.current_block() { - self.layout().prev_block(block) - } else { - self.layout().last_block() - }; - self.set_position(match prev { - Some(block) => CursorPosition::After(block), - None => CursorPosition::Nowhere, - }); - prev - } - - /// Move to the next instruction in the same block and return it. - /// - /// - If the cursor was positioned before a block, go to the first instruction in that block. - /// - If there are no more instructions in the block, go to the `After(block)` position and return - /// `None`. - /// - If the cursor wasn't pointing anywhere, keep doing that. - /// - /// This method will never move the cursor to a different block. - /// - /// # Examples - /// - /// The `next_inst()` method is intended for iterating over the instructions in a block like - /// this: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_block(func: &mut Function, block: Block) { - /// let mut cursor = FuncCursor::new(func).at_top(block); - /// while let Some(inst) = cursor.next_inst() { - /// // Edit instructions... - /// } - /// } - /// ``` - /// The loop body can insert and remove instructions via the cursor. - /// - /// Iterating over all the instructions in a function looks like this: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_func(func: &mut Function) { - /// let mut cursor = FuncCursor::new(func); - /// while let Some(block) = cursor.next_block() { - /// while let Some(inst) = cursor.next_inst() { - /// // Edit instructions... - /// } - /// } - /// } - /// ``` - fn next_inst(&mut self) -> Option { - use self::CursorPosition::*; - match self.position() { - Nowhere | After(..) => None, - At(inst) => { - if let Some(next) = self.layout().next_inst(inst) { - self.set_position(At(next)); - Some(next) - } else { - let pos = After( - self.layout() - .inst_block(inst) - .expect("current instruction removed?"), - ); - self.set_position(pos); - None - } - } - Before(block) => { - if let Some(next) = self.layout().first_inst(block) { - self.set_position(At(next)); - Some(next) - } else { - self.set_position(After(block)); - None - } - } - } - } - - /// Move to the previous instruction in the same block and return it. - /// - /// - If the cursor was positioned after a block, go to the last instruction in that block. - /// - If there are no more instructions in the block, go to the `Before(block)` position and return - /// `None`. - /// - If the cursor wasn't pointing anywhere, keep doing that. - /// - /// This method will never move the cursor to a different block. - /// - /// # Examples - /// - /// The `prev_inst()` method is intended for iterating backwards over the instructions in an - /// block like this: - /// - /// ``` - /// # use cranelift_codegen::ir::{Function, Block}; - /// # use cranelift_codegen::cursor::{Cursor, FuncCursor}; - /// fn edit_block(func: &mut Function, block: Block) { - /// let mut cursor = FuncCursor::new(func).at_bottom(block); - /// while let Some(inst) = cursor.prev_inst() { - /// // Edit instructions... - /// } - /// } - /// ``` - fn prev_inst(&mut self) -> Option { - use self::CursorPosition::*; - match self.position() { - Nowhere | Before(..) => None, - At(inst) => { - if let Some(prev) = self.layout().prev_inst(inst) { - self.set_position(At(prev)); - Some(prev) - } else { - let pos = Before( - self.layout() - .inst_block(inst) - .expect("current instruction removed?"), - ); - self.set_position(pos); - None - } - } - After(block) => { - if let Some(prev) = self.layout().last_inst(block) { - self.set_position(At(prev)); - Some(prev) - } else { - self.set_position(Before(block)); - None - } - } - } - } - - /// Insert an instruction at the current position. - /// - /// - If pointing at an instruction, the new instruction is inserted before the current - /// instruction. - /// - If pointing at the bottom of a block, the new instruction is appended to the block. - /// - Otherwise panic. - /// - /// In either case, the cursor is not moved, such that repeated calls to `insert_inst()` causes - /// instructions to appear in insertion order in the block. - fn insert_inst(&mut self, inst: ir::Inst) { - use self::CursorPosition::*; - match self.position() { - Nowhere | Before(..) => panic!("Invalid insert_inst position"), - At(cur) => self.layout_mut().insert_inst(inst, cur), - After(block) => self.layout_mut().append_inst(inst, block), - } - } - - /// Remove the instruction under the cursor. - /// - /// The cursor is left pointing at the position following the current instruction. - /// - /// Return the instruction that was removed. - fn remove_inst(&mut self) -> ir::Inst { - let inst = self.current_inst().expect("No instruction to remove"); - self.next_inst(); - self.layout_mut().remove_inst(inst); - inst - } - - /// Remove the instruction under the cursor. - /// - /// The cursor is left pointing at the position preceding the current instruction. - /// - /// Return the instruction that was removed. - fn remove_inst_and_step_back(&mut self) -> ir::Inst { - let inst = self.current_inst().expect("No instruction to remove"); - self.prev_inst(); - self.layout_mut().remove_inst(inst); - inst - } - - /// Insert a block at the current position and switch to it. - /// - /// As far as possible, this method behaves as if the block header were an instruction inserted - /// at the current position. - /// - /// - If the cursor is pointing at an existing instruction, *the current block is split in two* - /// and the current instruction becomes the first instruction in the inserted block. - /// - If the cursor points at the bottom of a block, the new block is inserted after the current - /// one, and moved to the bottom of the new block where instructions can be appended. - /// - If the cursor points to the top of a block, the new block is inserted above the current one. - /// - If the cursor is not pointing at anything, the new block is placed last in the layout. - /// - /// This means that it is always valid to call this method, and it always leaves the cursor in - /// a state that will insert instructions into the new block. - fn insert_block(&mut self, new_block: ir::Block) { - use self::CursorPosition::*; - match self.position() { - At(inst) => { - self.layout_mut().split_block(new_block, inst); - // All other cases move to `After(block)`, but in this case we'll stay `At(inst)`. - return; - } - Nowhere => self.layout_mut().append_block(new_block), - Before(block) => self.layout_mut().insert_block(new_block, block), - After(block) => self.layout_mut().insert_block_after(new_block, block), - } - // For everything but `At(inst)` we end up appending to the new block. - self.set_position(After(new_block)); - } -} - -/// Function cursor. -/// -/// A `FuncCursor` holds a mutable reference to a whole `ir::Function` while keeping a position -/// too. The function can be re-borrowed by accessing the public `cur.func` member. -/// -/// This cursor is for use before legalization. The inserted instructions are not given an -/// encoding. -pub struct FuncCursor<'f> { - pos: CursorPosition, - srcloc: ir::SourceLoc, - - /// The referenced function. - pub func: &'f mut ir::Function, -} - -impl<'f> FuncCursor<'f> { - /// Create a new `FuncCursor` pointing nowhere. - pub fn new(func: &'f mut ir::Function) -> Self { - Self { - pos: CursorPosition::Nowhere, - srcloc: Default::default(), - func, - } - } - - /// Use the source location of `inst` for future instructions. - pub fn use_srcloc(&mut self, inst: ir::Inst) { - self.srcloc = self.func.srclocs[inst]; - } - - /// Create an instruction builder that inserts an instruction at the current position. - pub fn ins(&mut self) -> ir::InsertBuilder<&mut FuncCursor<'f>> { - ir::InsertBuilder::new(self) - } -} - -impl<'f> Cursor for FuncCursor<'f> { - fn position(&self) -> CursorPosition { - self.pos - } - - fn set_position(&mut self, pos: CursorPosition) { - self.pos = pos - } - - fn srcloc(&self) -> ir::SourceLoc { - self.srcloc - } - - fn set_srcloc(&mut self, srcloc: ir::SourceLoc) { - self.srcloc = srcloc; - } - - fn layout(&self) -> &ir::Layout { - &self.func.layout - } - - fn layout_mut(&mut self) -> &mut ir::Layout { - &mut self.func.layout - } -} - -impl<'c, 'f> ir::InstInserterBase<'c> for &'c mut FuncCursor<'f> { - fn data_flow_graph(&self) -> &ir::DataFlowGraph { - &self.func.dfg - } - - fn data_flow_graph_mut(&mut self) -> &mut ir::DataFlowGraph { - &mut self.func.dfg - } - - fn insert_built_inst(self, inst: ir::Inst) -> &'c mut ir::DataFlowGraph { - // TODO: Remove this assertion once #796 is fixed. - #[cfg(debug_assertions)] - { - if let CursorPosition::At(_) = self.position() { - if let Some(curr) = self.current_inst() { - if let Some(prev) = self.layout().prev_inst(curr) { - let prev_op = self.data_flow_graph()[prev].opcode(); - let inst_op = self.data_flow_graph()[inst].opcode(); - let curr_op = self.data_flow_graph()[curr].opcode(); - if prev_op.is_branch() - && !prev_op.is_terminator() - && !inst_op.is_terminator() - { - panic!( - "Inserting instruction {} after {}, and before {}", - inst_op, prev_op, curr_op - ) - } - }; - }; - }; - } - self.insert_inst(inst); - if !self.srcloc.is_default() { - self.func.srclocs[inst] = self.srcloc; - } - &mut self.func.dfg - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/data_value.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/data_value.rs deleted file mode 100644 index 13aa23767..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/data_value.rs +++ /dev/null @@ -1,309 +0,0 @@ -//! This module gives users to instantiate values that Cranelift understands. These values are used, -//! for example, during interpretation and for wrapping immediates. -use crate::ir::immediates::{Ieee32, Ieee64, Offset32}; -use crate::ir::{types, ConstantData, Type}; -use core::convert::TryInto; -use core::fmt::{self, Display, Formatter}; - -/// Represent a data value. Where [Value] is an SSA reference, [DataValue] is the type + value -/// that would be referred to by a [Value]. -/// -/// [Value]: crate::ir::Value -#[allow(missing_docs)] -#[derive(Clone, Debug, PartialEq, PartialOrd)] -pub enum DataValue { - B(bool), - I8(i8), - I16(i16), - I32(i32), - I64(i64), - I128(i128), - U8(u8), - U16(u16), - U32(u32), - U64(u64), - U128(u128), - F32(Ieee32), - F64(Ieee64), - V128([u8; 16]), -} - -impl DataValue { - /// Try to cast an immediate integer (a wrapped `i64` on most Cranelift instructions) to the - /// given Cranelift [Type]. - pub fn from_integer(imm: i128, ty: Type) -> Result { - match ty { - types::I8 => Ok(DataValue::I8(imm as i8)), - types::I16 => Ok(DataValue::I16(imm as i16)), - types::I32 => Ok(DataValue::I32(imm as i32)), - types::I64 => Ok(DataValue::I64(imm as i64)), - types::I128 => Ok(DataValue::I128(imm)), - _ => Err(DataValueCastFailure::FromInteger(imm, ty)), - } - } - - /// Return the Cranelift IR [Type] for this [DataValue]. - pub fn ty(&self) -> Type { - match self { - DataValue::B(_) => types::B8, // A default type. - DataValue::I8(_) | DataValue::U8(_) => types::I8, - DataValue::I16(_) | DataValue::U16(_) => types::I16, - DataValue::I32(_) | DataValue::U32(_) => types::I32, - DataValue::I64(_) | DataValue::U64(_) => types::I64, - DataValue::I128(_) | DataValue::U128(_) => types::I128, - DataValue::F32(_) => types::F32, - DataValue::F64(_) => types::F64, - DataValue::V128(_) => types::I8X16, // A default type. - } - } - - /// Return true if the value is a vector (i.e. `DataValue::V128`). - pub fn is_vector(&self) -> bool { - match self { - DataValue::V128(_) => true, - _ => false, - } - } - - /// Return true if the value is a bool (i.e. `DataValue::B`). - pub fn is_bool(&self) -> bool { - match self { - DataValue::B(_) => true, - _ => false, - } - } - - /// Write a [DataValue] to a slice. - /// - /// # Panics: - /// - /// Panics if the slice does not have enough space to accommodate the [DataValue] - pub fn write_to_slice(&self, dst: &mut [u8]) { - match self { - DataValue::B(true) => dst[..16].copy_from_slice(&[u8::MAX; 16][..]), - DataValue::B(false) => dst[..16].copy_from_slice(&[0; 16][..]), - DataValue::I8(i) => dst[..1].copy_from_slice(&i.to_ne_bytes()[..]), - DataValue::I16(i) => dst[..2].copy_from_slice(&i.to_ne_bytes()[..]), - DataValue::I32(i) => dst[..4].copy_from_slice(&i.to_ne_bytes()[..]), - DataValue::I64(i) => dst[..8].copy_from_slice(&i.to_ne_bytes()[..]), - DataValue::I128(i) => dst[..16].copy_from_slice(&i.to_ne_bytes()[..]), - DataValue::F32(f) => dst[..4].copy_from_slice(&f.bits().to_ne_bytes()[..]), - DataValue::F64(f) => dst[..8].copy_from_slice(&f.bits().to_ne_bytes()[..]), - DataValue::V128(v) => dst[..16].copy_from_slice(&v[..]), - _ => unimplemented!(), - }; - } - - /// Read a [DataValue] from a slice using a given [Type]. - /// - /// # Panics: - /// - /// Panics if the slice does not have enough space to accommodate the [DataValue] - pub fn read_from_slice(src: &[u8], ty: Type) -> Self { - match ty { - types::I8 => DataValue::I8(i8::from_ne_bytes(src[..1].try_into().unwrap())), - types::I16 => DataValue::I16(i16::from_ne_bytes(src[..2].try_into().unwrap())), - types::I32 => DataValue::I32(i32::from_ne_bytes(src[..4].try_into().unwrap())), - types::I64 => DataValue::I64(i64::from_ne_bytes(src[..8].try_into().unwrap())), - types::I128 => DataValue::I128(i128::from_ne_bytes(src[..16].try_into().unwrap())), - types::F32 => DataValue::F32(Ieee32::with_bits(u32::from_ne_bytes( - src[..4].try_into().unwrap(), - ))), - types::F64 => DataValue::F64(Ieee64::with_bits(u64::from_ne_bytes( - src[..8].try_into().unwrap(), - ))), - _ if ty.is_bool() => { - // Only `ty.bytes()` are guaranteed to be written - // so we can only test the first n bytes of `src` - - let size = ty.bytes() as usize; - DataValue::B(src[..size].iter().any(|&i| i != 0)) - } - _ if ty.is_vector() && ty.bytes() == 16 => { - DataValue::V128(src[..16].try_into().unwrap()) - } - _ => unimplemented!(), - } - } - - /// Write a [DataValue] to a memory location. - pub unsafe fn write_value_to(&self, p: *mut u128) { - // Since `DataValue` does not have type info for bools we always - // write out a full 16 byte slot. - let size = match self.ty() { - ty if ty.is_bool() => 16, - ty => ty.bytes() as usize, - }; - - self.write_to_slice(std::slice::from_raw_parts_mut(p as *mut u8, size)); - } - - /// Read a [DataValue] from a memory location using a given [Type]. - pub unsafe fn read_value_from(p: *const u128, ty: Type) -> Self { - DataValue::read_from_slice( - std::slice::from_raw_parts(p as *const u8, ty.bytes() as usize), - ty, - ) - } -} - -/// Record failures to cast [DataValue]. -#[derive(Debug, PartialEq)] -#[allow(missing_docs)] -pub enum DataValueCastFailure { - TryInto(Type, Type), - FromInteger(i128, Type), -} - -// This is manually implementing Error and Display instead of using thiserror to reduce the amount -// of dependencies used by Cranelift. -impl std::error::Error for DataValueCastFailure {} - -impl Display for DataValueCastFailure { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - match self { - DataValueCastFailure::TryInto(from, to) => { - write!( - f, - "unable to cast data value of type {} to type {}", - from, to - ) - } - DataValueCastFailure::FromInteger(val, to) => { - write!( - f, - "unable to cast i64({}) to a data value of type {}", - val, to - ) - } - } - } -} - -/// Helper for creating conversion implementations for [DataValue]. -macro_rules! build_conversion_impl { - ( $rust_ty:ty, $data_value_ty:ident, $cranelift_ty:ident ) => { - impl From<$rust_ty> for DataValue { - fn from(data: $rust_ty) -> Self { - DataValue::$data_value_ty(data) - } - } - - impl TryInto<$rust_ty> for DataValue { - type Error = DataValueCastFailure; - fn try_into(self) -> Result<$rust_ty, Self::Error> { - if let DataValue::$data_value_ty(v) = self { - Ok(v) - } else { - Err(DataValueCastFailure::TryInto( - self.ty(), - types::$cranelift_ty, - )) - } - } - } - }; -} -build_conversion_impl!(bool, B, B8); -build_conversion_impl!(i8, I8, I8); -build_conversion_impl!(i16, I16, I16); -build_conversion_impl!(i32, I32, I32); -build_conversion_impl!(i64, I64, I64); -build_conversion_impl!(i128, I128, I128); -build_conversion_impl!(u8, U8, I8); -build_conversion_impl!(u16, U16, I16); -build_conversion_impl!(u32, U32, I32); -build_conversion_impl!(u64, U64, I64); -build_conversion_impl!(u128, U128, I128); -build_conversion_impl!(Ieee32, F32, F32); -build_conversion_impl!(Ieee64, F64, F64); -build_conversion_impl!([u8; 16], V128, I8X16); -impl From for DataValue { - fn from(o: Offset32) -> Self { - DataValue::from(Into::::into(o)) - } -} - -impl Display for DataValue { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - DataValue::B(dv) => write!(f, "{}", dv), - DataValue::I8(dv) => write!(f, "{}", dv), - DataValue::I16(dv) => write!(f, "{}", dv), - DataValue::I32(dv) => write!(f, "{}", dv), - DataValue::I64(dv) => write!(f, "{}", dv), - DataValue::I128(dv) => write!(f, "{}", dv), - DataValue::U8(dv) => write!(f, "{}", dv), - DataValue::U16(dv) => write!(f, "{}", dv), - DataValue::U32(dv) => write!(f, "{}", dv), - DataValue::U64(dv) => write!(f, "{}", dv), - DataValue::U128(dv) => write!(f, "{}", dv), - // The Ieee* wrappers here print the expected syntax. - DataValue::F32(dv) => write!(f, "{}", dv), - DataValue::F64(dv) => write!(f, "{}", dv), - // Again, for syntax consistency, use ConstantData, which in this case displays as hex. - DataValue::V128(dv) => write!(f, "{}", ConstantData::from(&dv[..])), - } - } -} - -/// Helper structure for printing bracket-enclosed vectors of [DataValue]s. -/// - for empty vectors, display `[]` -/// - for single item vectors, display `42`, e.g. -/// - for multiple item vectors, display `[42, 43, 44]`, e.g. -pub struct DisplayDataValues<'a>(pub &'a [DataValue]); - -impl<'a> Display for DisplayDataValues<'a> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - if self.0.len() == 1 { - write!(f, "{}", self.0[0]) - } else { - write!(f, "[")?; - write_data_value_list(f, &self.0)?; - write!(f, "]") - } - } -} - -/// Helper function for displaying `Vec`. -pub fn write_data_value_list(f: &mut Formatter<'_>, list: &[DataValue]) -> fmt::Result { - match list.len() { - 0 => Ok(()), - 1 => write!(f, "{}", list[0]), - _ => { - write!(f, "{}", list[0])?; - for dv in list.iter().skip(1) { - write!(f, ", {}", dv)?; - } - Ok(()) - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn type_conversions() { - assert_eq!(DataValue::B(true).ty(), types::B8); - assert_eq!( - TryInto::::try_into(DataValue::B(false)).unwrap(), - false - ); - assert_eq!( - TryInto::::try_into(DataValue::B(false)).unwrap_err(), - DataValueCastFailure::TryInto(types::B8, types::I32) - ); - - assert_eq!(DataValue::V128([0; 16]).ty(), types::I8X16); - assert_eq!( - TryInto::<[u8; 16]>::try_into(DataValue::V128([0; 16])).unwrap(), - [0; 16] - ); - assert_eq!( - TryInto::::try_into(DataValue::V128([0; 16])).unwrap_err(), - DataValueCastFailure::TryInto(types::I8X16, types::I32) - ); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dbg.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dbg.rs deleted file mode 100644 index 1d814ceed..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dbg.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! Debug tracing helpers. -use core::fmt; - -/// Prefix added to the log file names, just before the thread name or id. -pub static LOG_FILENAME_PREFIX: &str = "cranelift.dbg."; - -/// Helper for printing lists. -pub struct DisplayList<'a, T>(pub &'a [T]) -where - T: 'a + fmt::Display; - -impl<'a, T> fmt::Display for DisplayList<'a, T> -where - T: 'a + fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.0.split_first() { - None => write!(f, "[]"), - Some((first, rest)) => { - write!(f, "[{}", first)?; - for x in rest { - write!(f, ", {}", x)?; - } - write!(f, "]") - } - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dce.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dce.rs deleted file mode 100644 index e3e855806..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dce.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! A Dead-Code Elimination (DCE) pass. -//! -//! Dead code here means instructions that have no side effects and have no -//! result values used by other instructions. - -use crate::cursor::{Cursor, FuncCursor}; -use crate::dominator_tree::DominatorTree; -use crate::entity::EntityRef; -use crate::inst_predicates::{any_inst_results_used, has_side_effect}; -use crate::ir::Function; -use crate::timing; - -/// Perform DCE on `func`. -pub fn do_dce(func: &mut Function, domtree: &mut DominatorTree) { - let _tt = timing::dce(); - debug_assert!(domtree.is_valid()); - - let mut live = vec![false; func.dfg.num_values()]; - for &block in domtree.cfg_postorder() { - let mut pos = FuncCursor::new(func).at_bottom(block); - while let Some(inst) = pos.prev_inst() { - { - if has_side_effect(pos.func, inst) - || any_inst_results_used(inst, &live, &pos.func.dfg) - { - for arg in pos.func.dfg.inst_args(inst) { - let v = pos.func.dfg.resolve_aliases(*arg); - live[v.index()] = true; - } - continue; - } - } - pos.remove_inst(); - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/divconst_magic_numbers.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/divconst_magic_numbers.rs deleted file mode 100644 index af45444c4..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/divconst_magic_numbers.rs +++ /dev/null @@ -1,1083 +0,0 @@ -//! Compute "magic numbers" for division-by-constants transformations. -//! -//! Math helpers for division by (non-power-of-2) constants. This is based -//! on the presentation in "Hacker's Delight" by Henry Warren, 2003. There -//! are four cases: {unsigned, signed} x {32 bit, 64 bit}. The word size -//! makes little difference, but the signed-vs-unsigned aspect has a large -//! effect. Therefore everything is presented in the order U32 U64 S32 S64 -//! so as to emphasise the similarity of the U32 and U64 cases and the S32 -//! and S64 cases. - -// Structures to hold the "magic numbers" computed. - -#[derive(PartialEq, Debug)] -pub struct MU32 { - pub mul_by: u32, - pub do_add: bool, - pub shift_by: i32, -} - -#[derive(PartialEq, Debug)] -pub struct MU64 { - pub mul_by: u64, - pub do_add: bool, - pub shift_by: i32, -} - -#[derive(PartialEq, Debug)] -pub struct MS32 { - pub mul_by: i32, - pub shift_by: i32, -} - -#[derive(PartialEq, Debug)] -pub struct MS64 { - pub mul_by: i64, - pub shift_by: i32, -} - -// The actual "magic number" generators follow. - -pub fn magic_u32(d: u32) -> MU32 { - debug_assert_ne!(d, 0); - debug_assert_ne!(d, 1); // d==1 generates out of range shifts. - - let mut do_add: bool = false; - let mut p: i32 = 31; - let nc: u32 = 0xFFFFFFFFu32 - u32::wrapping_neg(d) % d; - let mut q1: u32 = 0x80000000u32 / nc; - let mut r1: u32 = 0x80000000u32 - q1 * nc; - let mut q2: u32 = 0x7FFFFFFFu32 / d; - let mut r2: u32 = 0x7FFFFFFFu32 - q2 * d; - loop { - p = p + 1; - if r1 >= nc - r1 { - q1 = u32::wrapping_add(u32::wrapping_mul(2, q1), 1); - r1 = u32::wrapping_sub(u32::wrapping_mul(2, r1), nc); - } else { - q1 = u32::wrapping_mul(2, q1); - r1 = 2 * r1; - } - if r2 + 1 >= d - r2 { - if q2 >= 0x7FFFFFFFu32 { - do_add = true; - } - q2 = 2 * q2 + 1; - r2 = u32::wrapping_sub(u32::wrapping_add(u32::wrapping_mul(2, r2), 1), d); - } else { - if q2 >= 0x80000000u32 { - do_add = true; - } - q2 = u32::wrapping_mul(2, q2); - r2 = 2 * r2 + 1; - } - let delta: u32 = d - 1 - r2; - if !(p < 64 && (q1 < delta || (q1 == delta && r1 == 0))) { - break; - } - } - - MU32 { - mul_by: q2 + 1, - do_add, - shift_by: p - 32, - } -} - -pub fn magic_u64(d: u64) -> MU64 { - debug_assert_ne!(d, 0); - debug_assert_ne!(d, 1); // d==1 generates out of range shifts. - - let mut do_add: bool = false; - let mut p: i32 = 63; - let nc: u64 = 0xFFFFFFFFFFFFFFFFu64 - u64::wrapping_neg(d) % d; - let mut q1: u64 = 0x8000000000000000u64 / nc; - let mut r1: u64 = 0x8000000000000000u64 - q1 * nc; - let mut q2: u64 = 0x7FFFFFFFFFFFFFFFu64 / d; - let mut r2: u64 = 0x7FFFFFFFFFFFFFFFu64 - q2 * d; - loop { - p = p + 1; - if r1 >= nc - r1 { - q1 = u64::wrapping_add(u64::wrapping_mul(2, q1), 1); - r1 = u64::wrapping_sub(u64::wrapping_mul(2, r1), nc); - } else { - q1 = u64::wrapping_mul(2, q1); - r1 = 2 * r1; - } - if r2 + 1 >= d - r2 { - if q2 >= 0x7FFFFFFFFFFFFFFFu64 { - do_add = true; - } - q2 = 2 * q2 + 1; - r2 = u64::wrapping_sub(u64::wrapping_add(u64::wrapping_mul(2, r2), 1), d); - } else { - if q2 >= 0x8000000000000000u64 { - do_add = true; - } - q2 = u64::wrapping_mul(2, q2); - r2 = 2 * r2 + 1; - } - let delta: u64 = d - 1 - r2; - if !(p < 128 && (q1 < delta || (q1 == delta && r1 == 0))) { - break; - } - } - - MU64 { - mul_by: q2 + 1, - do_add, - shift_by: p - 64, - } -} - -pub fn magic_s32(d: i32) -> MS32 { - debug_assert_ne!(d, -1); - debug_assert_ne!(d, 0); - debug_assert_ne!(d, 1); - let two31: u32 = 0x80000000u32; - let mut p: i32 = 31; - let ad: u32 = i32::wrapping_abs(d) as u32; - let t: u32 = two31 + ((d as u32) >> 31); - let anc: u32 = u32::wrapping_sub(t - 1, t % ad); - let mut q1: u32 = two31 / anc; - let mut r1: u32 = two31 - q1 * anc; - let mut q2: u32 = two31 / ad; - let mut r2: u32 = two31 - q2 * ad; - loop { - p = p + 1; - q1 = 2 * q1; - r1 = 2 * r1; - if r1 >= anc { - q1 = q1 + 1; - r1 = r1 - anc; - } - q2 = 2 * q2; - r2 = 2 * r2; - if r2 >= ad { - q2 = q2 + 1; - r2 = r2 - ad; - } - let delta: u32 = ad - r2; - if !(q1 < delta || (q1 == delta && r1 == 0)) { - break; - } - } - - MS32 { - mul_by: (if d < 0 { - u32::wrapping_neg(q2 + 1) - } else { - q2 + 1 - }) as i32, - shift_by: p - 32, - } -} - -pub fn magic_s64(d: i64) -> MS64 { - debug_assert_ne!(d, -1); - debug_assert_ne!(d, 0); - debug_assert_ne!(d, 1); - let two63: u64 = 0x8000000000000000u64; - let mut p: i32 = 63; - let ad: u64 = i64::wrapping_abs(d) as u64; - let t: u64 = two63 + ((d as u64) >> 63); - let anc: u64 = u64::wrapping_sub(t - 1, t % ad); - let mut q1: u64 = two63 / anc; - let mut r1: u64 = two63 - q1 * anc; - let mut q2: u64 = two63 / ad; - let mut r2: u64 = two63 - q2 * ad; - loop { - p = p + 1; - q1 = 2 * q1; - r1 = 2 * r1; - if r1 >= anc { - q1 = q1 + 1; - r1 = r1 - anc; - } - q2 = 2 * q2; - r2 = 2 * r2; - if r2 >= ad { - q2 = q2 + 1; - r2 = r2 - ad; - } - let delta: u64 = ad - r2; - if !(q1 < delta || (q1 == delta && r1 == 0)) { - break; - } - } - - MS64 { - mul_by: (if d < 0 { - u64::wrapping_neg(q2 + 1) - } else { - q2 + 1 - }) as i64, - shift_by: p - 64, - } -} - -#[cfg(test)] -mod tests { - use super::{magic_s32, magic_s64, magic_u32, magic_u64}; - use super::{MS32, MS64, MU32, MU64}; - - fn make_mu32(mul_by: u32, do_add: bool, shift_by: i32) -> MU32 { - MU32 { - mul_by, - do_add, - shift_by, - } - } - - fn make_mu64(mul_by: u64, do_add: bool, shift_by: i32) -> MU64 { - MU64 { - mul_by, - do_add, - shift_by, - } - } - - fn make_ms32(mul_by: i32, shift_by: i32) -> MS32 { - MS32 { mul_by, shift_by } - } - - fn make_ms64(mul_by: i64, shift_by: i32) -> MS64 { - MS64 { mul_by, shift_by } - } - - #[test] - fn test_magic_u32() { - assert_eq!(magic_u32(2u32), make_mu32(0x80000000u32, false, 0)); - assert_eq!(magic_u32(3u32), make_mu32(0xaaaaaaabu32, false, 1)); - assert_eq!(magic_u32(4u32), make_mu32(0x40000000u32, false, 0)); - assert_eq!(magic_u32(5u32), make_mu32(0xcccccccdu32, false, 2)); - assert_eq!(magic_u32(6u32), make_mu32(0xaaaaaaabu32, false, 2)); - assert_eq!(magic_u32(7u32), make_mu32(0x24924925u32, true, 3)); - assert_eq!(magic_u32(9u32), make_mu32(0x38e38e39u32, false, 1)); - assert_eq!(magic_u32(10u32), make_mu32(0xcccccccdu32, false, 3)); - assert_eq!(magic_u32(11u32), make_mu32(0xba2e8ba3u32, false, 3)); - assert_eq!(magic_u32(12u32), make_mu32(0xaaaaaaabu32, false, 3)); - assert_eq!(magic_u32(25u32), make_mu32(0x51eb851fu32, false, 3)); - assert_eq!(magic_u32(125u32), make_mu32(0x10624dd3u32, false, 3)); - assert_eq!(magic_u32(625u32), make_mu32(0xd1b71759u32, false, 9)); - assert_eq!(magic_u32(1337u32), make_mu32(0x88233b2bu32, true, 11)); - assert_eq!(magic_u32(65535u32), make_mu32(0x80008001u32, false, 15)); - assert_eq!(magic_u32(65536u32), make_mu32(0x00010000u32, false, 0)); - assert_eq!(magic_u32(65537u32), make_mu32(0xffff0001u32, false, 16)); - assert_eq!(magic_u32(31415927u32), make_mu32(0x445b4553u32, false, 23)); - assert_eq!( - magic_u32(0xdeadbeefu32), - make_mu32(0x93275ab3u32, false, 31) - ); - assert_eq!( - magic_u32(0xfffffffdu32), - make_mu32(0x40000001u32, false, 30) - ); - assert_eq!(magic_u32(0xfffffffeu32), make_mu32(0x00000003u32, true, 32)); - assert_eq!( - magic_u32(0xffffffffu32), - make_mu32(0x80000001u32, false, 31) - ); - } - - #[test] - fn test_magic_u64() { - assert_eq!(magic_u64(2u64), make_mu64(0x8000000000000000u64, false, 0)); - assert_eq!(magic_u64(3u64), make_mu64(0xaaaaaaaaaaaaaaabu64, false, 1)); - assert_eq!(magic_u64(4u64), make_mu64(0x4000000000000000u64, false, 0)); - assert_eq!(magic_u64(5u64), make_mu64(0xcccccccccccccccdu64, false, 2)); - assert_eq!(magic_u64(6u64), make_mu64(0xaaaaaaaaaaaaaaabu64, false, 2)); - assert_eq!(magic_u64(7u64), make_mu64(0x2492492492492493u64, true, 3)); - assert_eq!(magic_u64(9u64), make_mu64(0xe38e38e38e38e38fu64, false, 3)); - assert_eq!(magic_u64(10u64), make_mu64(0xcccccccccccccccdu64, false, 3)); - assert_eq!(magic_u64(11u64), make_mu64(0x2e8ba2e8ba2e8ba3u64, false, 1)); - assert_eq!(magic_u64(12u64), make_mu64(0xaaaaaaaaaaaaaaabu64, false, 3)); - assert_eq!(magic_u64(25u64), make_mu64(0x47ae147ae147ae15u64, true, 5)); - assert_eq!(magic_u64(125u64), make_mu64(0x0624dd2f1a9fbe77u64, true, 7)); - assert_eq!( - magic_u64(625u64), - make_mu64(0x346dc5d63886594bu64, false, 7) - ); - assert_eq!( - magic_u64(1337u64), - make_mu64(0xc4119d952866a139u64, false, 10) - ); - assert_eq!( - magic_u64(31415927u64), - make_mu64(0x116d154b9c3d2f85u64, true, 25) - ); - assert_eq!( - magic_u64(0x00000000deadbeefu64), - make_mu64(0x93275ab2dfc9094bu64, false, 31) - ); - assert_eq!( - magic_u64(0x00000000fffffffdu64), - make_mu64(0x8000000180000005u64, false, 31) - ); - assert_eq!( - magic_u64(0x00000000fffffffeu64), - make_mu64(0x0000000200000005u64, true, 32) - ); - assert_eq!( - magic_u64(0x00000000ffffffffu64), - make_mu64(0x8000000080000001u64, false, 31) - ); - assert_eq!( - magic_u64(0x0000000100000000u64), - make_mu64(0x0000000100000000u64, false, 0) - ); - assert_eq!( - magic_u64(0x0000000100000001u64), - make_mu64(0xffffffff00000001u64, false, 32) - ); - assert_eq!( - magic_u64(0x0ddc0ffeebadf00du64), - make_mu64(0x2788e9d394b77da1u64, true, 60) - ); - assert_eq!( - magic_u64(0xfffffffffffffffdu64), - make_mu64(0x4000000000000001u64, false, 62) - ); - assert_eq!( - magic_u64(0xfffffffffffffffeu64), - make_mu64(0x0000000000000003u64, true, 64) - ); - assert_eq!( - magic_u64(0xffffffffffffffffu64), - make_mu64(0x8000000000000001u64, false, 63) - ); - } - - #[test] - fn test_magic_s32() { - assert_eq!( - magic_s32(-0x80000000i32), - make_ms32(0x7fffffffu32 as i32, 30) - ); - assert_eq!( - magic_s32(-0x7FFFFFFFi32), - make_ms32(0xbfffffffu32 as i32, 29) - ); - assert_eq!( - magic_s32(-0x7FFFFFFEi32), - make_ms32(0x7ffffffdu32 as i32, 30) - ); - assert_eq!(magic_s32(-31415927i32), make_ms32(0xbba4baadu32 as i32, 23)); - assert_eq!(magic_s32(-1337i32), make_ms32(0x9df73135u32 as i32, 9)); - assert_eq!(magic_s32(-256i32), make_ms32(0x7fffffffu32 as i32, 7)); - assert_eq!(magic_s32(-5i32), make_ms32(0x99999999u32 as i32, 1)); - assert_eq!(magic_s32(-3i32), make_ms32(0x55555555u32 as i32, 1)); - assert_eq!(magic_s32(-2i32), make_ms32(0x7fffffffu32 as i32, 0)); - assert_eq!(magic_s32(2i32), make_ms32(0x80000001u32 as i32, 0)); - assert_eq!(magic_s32(3i32), make_ms32(0x55555556u32 as i32, 0)); - assert_eq!(magic_s32(4i32), make_ms32(0x80000001u32 as i32, 1)); - assert_eq!(magic_s32(5i32), make_ms32(0x66666667u32 as i32, 1)); - assert_eq!(magic_s32(6i32), make_ms32(0x2aaaaaabu32 as i32, 0)); - assert_eq!(magic_s32(7i32), make_ms32(0x92492493u32 as i32, 2)); - assert_eq!(magic_s32(9i32), make_ms32(0x38e38e39u32 as i32, 1)); - assert_eq!(magic_s32(10i32), make_ms32(0x66666667u32 as i32, 2)); - assert_eq!(magic_s32(11i32), make_ms32(0x2e8ba2e9u32 as i32, 1)); - assert_eq!(magic_s32(12i32), make_ms32(0x2aaaaaabu32 as i32, 1)); - assert_eq!(magic_s32(25i32), make_ms32(0x51eb851fu32 as i32, 3)); - assert_eq!(magic_s32(125i32), make_ms32(0x10624dd3u32 as i32, 3)); - assert_eq!(magic_s32(625i32), make_ms32(0x68db8badu32 as i32, 8)); - assert_eq!(magic_s32(1337i32), make_ms32(0x6208cecbu32 as i32, 9)); - assert_eq!(magic_s32(31415927i32), make_ms32(0x445b4553u32 as i32, 23)); - assert_eq!( - magic_s32(0x7ffffffei32), - make_ms32(0x80000003u32 as i32, 30) - ); - assert_eq!( - magic_s32(0x7fffffffi32), - make_ms32(0x40000001u32 as i32, 29) - ); - } - - #[test] - fn test_magic_s64() { - assert_eq!( - magic_s64(-0x8000000000000000i64), - make_ms64(0x7fffffffffffffffu64 as i64, 62) - ); - assert_eq!( - magic_s64(-0x7FFFFFFFFFFFFFFFi64), - make_ms64(0xbfffffffffffffffu64 as i64, 61) - ); - assert_eq!( - magic_s64(-0x7FFFFFFFFFFFFFFEi64), - make_ms64(0x7ffffffffffffffdu64 as i64, 62) - ); - assert_eq!( - magic_s64(-0x0ddC0ffeeBadF00di64), - make_ms64(0x6c3b8b1635a4412fu64 as i64, 59) - ); - assert_eq!( - magic_s64(-0x100000001i64), - make_ms64(0x800000007fffffffu64 as i64, 31) - ); - assert_eq!( - magic_s64(-0x100000000i64), - make_ms64(0x7fffffffffffffffu64 as i64, 31) - ); - assert_eq!( - magic_s64(-0xFFFFFFFFi64), - make_ms64(0x7fffffff7fffffffu64 as i64, 31) - ); - assert_eq!( - magic_s64(-0xFFFFFFFEi64), - make_ms64(0x7ffffffefffffffdu64 as i64, 31) - ); - assert_eq!( - magic_s64(-0xFFFFFFFDi64), - make_ms64(0x7ffffffe7ffffffbu64 as i64, 31) - ); - assert_eq!( - magic_s64(-0xDeadBeefi64), - make_ms64(0x6cd8a54d2036f6b5u64 as i64, 31) - ); - assert_eq!( - magic_s64(-31415927i64), - make_ms64(0x7749755a31e1683du64 as i64, 24) - ); - assert_eq!( - magic_s64(-1337i64), - make_ms64(0x9df731356bccaf63u64 as i64, 9) - ); - assert_eq!( - magic_s64(-256i64), - make_ms64(0x7fffffffffffffffu64 as i64, 7) - ); - assert_eq!(magic_s64(-5i64), make_ms64(0x9999999999999999u64 as i64, 1)); - assert_eq!(magic_s64(-3i64), make_ms64(0x5555555555555555u64 as i64, 1)); - assert_eq!(magic_s64(-2i64), make_ms64(0x7fffffffffffffffu64 as i64, 0)); - assert_eq!(magic_s64(2i64), make_ms64(0x8000000000000001u64 as i64, 0)); - assert_eq!(magic_s64(3i64), make_ms64(0x5555555555555556u64 as i64, 0)); - assert_eq!(magic_s64(4i64), make_ms64(0x8000000000000001u64 as i64, 1)); - assert_eq!(magic_s64(5i64), make_ms64(0x6666666666666667u64 as i64, 1)); - assert_eq!(magic_s64(6i64), make_ms64(0x2aaaaaaaaaaaaaabu64 as i64, 0)); - assert_eq!(magic_s64(7i64), make_ms64(0x4924924924924925u64 as i64, 1)); - assert_eq!(magic_s64(9i64), make_ms64(0x1c71c71c71c71c72u64 as i64, 0)); - assert_eq!(magic_s64(10i64), make_ms64(0x6666666666666667u64 as i64, 2)); - assert_eq!(magic_s64(11i64), make_ms64(0x2e8ba2e8ba2e8ba3u64 as i64, 1)); - assert_eq!(magic_s64(12i64), make_ms64(0x2aaaaaaaaaaaaaabu64 as i64, 1)); - assert_eq!(magic_s64(25i64), make_ms64(0xa3d70a3d70a3d70bu64 as i64, 4)); - assert_eq!( - magic_s64(125i64), - make_ms64(0x20c49ba5e353f7cfu64 as i64, 4) - ); - assert_eq!( - magic_s64(625i64), - make_ms64(0x346dc5d63886594bu64 as i64, 7) - ); - assert_eq!( - magic_s64(1337i64), - make_ms64(0x6208ceca9433509du64 as i64, 9) - ); - assert_eq!( - magic_s64(31415927i64), - make_ms64(0x88b68aa5ce1e97c3u64 as i64, 24) - ); - assert_eq!( - magic_s64(0x00000000deadbeefi64), - make_ms64(0x93275ab2dfc9094bu64 as i64, 31) - ); - assert_eq!( - magic_s64(0x00000000fffffffdi64), - make_ms64(0x8000000180000005u64 as i64, 31) - ); - assert_eq!( - magic_s64(0x00000000fffffffei64), - make_ms64(0x8000000100000003u64 as i64, 31) - ); - assert_eq!( - magic_s64(0x00000000ffffffffi64), - make_ms64(0x8000000080000001u64 as i64, 31) - ); - assert_eq!( - magic_s64(0x0000000100000000i64), - make_ms64(0x8000000000000001u64 as i64, 31) - ); - assert_eq!( - magic_s64(0x0000000100000001i64), - make_ms64(0x7fffffff80000001u64 as i64, 31) - ); - assert_eq!( - magic_s64(0x0ddc0ffeebadf00di64), - make_ms64(0x93c474e9ca5bbed1u64 as i64, 59) - ); - assert_eq!( - magic_s64(0x7ffffffffffffffdi64), - make_ms64(0x2000000000000001u64 as i64, 60) - ); - assert_eq!( - magic_s64(0x7ffffffffffffffei64), - make_ms64(0x8000000000000003u64 as i64, 62) - ); - assert_eq!( - magic_s64(0x7fffffffffffffffi64), - make_ms64(0x4000000000000001u64 as i64, 61) - ); - } - - #[test] - fn test_magic_generators_dont_panic() { - // The point of this is to check that the magic number generators - // don't panic with integer wraparounds, especially at boundary cases - // for their arguments. The actual results are thrown away, although - // we force `total` to be used, so that rustc can't optimise the - // entire computation away. - - // Testing UP magic_u32 - let mut total: u64 = 0; - for x in 2..(200 * 1000u32) { - let m = magic_u32(x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - total = total + (if m.do_add { 123 } else { 456 }); - } - assert_eq!(total, 2481999609); - - total = 0; - // Testing MIDPOINT magic_u32 - for x in 0x8000_0000u32 - 10 * 1000u32..0x8000_0000u32 + 10 * 1000u32 { - let m = magic_u32(x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - total = total + (if m.do_add { 123 } else { 456 }); - } - assert_eq!(total, 2399809723); - - total = 0; - // Testing DOWN magic_u32 - for x in 0..(200 * 1000u32) { - let m = magic_u32(0xFFFF_FFFFu32 - x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - total = total + (if m.do_add { 123 } else { 456 }); - } - assert_eq!(total, 271138267); - - // Testing UP magic_u64 - total = 0; - for x in 2..(200 * 1000u64) { - let m = magic_u64(x); - total = total ^ m.mul_by; - total = total + (m.shift_by as u64); - total = total + (if m.do_add { 123 } else { 456 }); - } - assert_eq!(total, 7430004086976261161); - - total = 0; - // Testing MIDPOINT magic_u64 - for x in 0x8000_0000_0000_0000u64 - 10 * 1000u64..0x8000_0000_0000_0000u64 + 10 * 1000u64 { - let m = magic_u64(x); - total = total ^ m.mul_by; - total = total + (m.shift_by as u64); - total = total + (if m.do_add { 123 } else { 456 }); - } - assert_eq!(total, 10312117246769520603); - - // Testing DOWN magic_u64 - total = 0; - for x in 0..(200 * 1000u64) { - let m = magic_u64(0xFFFF_FFFF_FFFF_FFFFu64 - x); - total = total ^ m.mul_by; - total = total + (m.shift_by as u64); - total = total + (if m.do_add { 123 } else { 456 }); - } - assert_eq!(total, 1126603594357269734); - - // Testing UP magic_s32 - total = 0; - for x in 0..(200 * 1000i32) { - let m = magic_s32(-0x8000_0000i32 + x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - } - assert_eq!(total, 18446744069953376812); - - total = 0; - // Testing MIDPOINT magic_s32 - for x in 0..(200 * 1000i32) { - let x2 = -100 * 1000i32 + x; - if x2 != -1 && x2 != 0 && x2 != 1 { - let m = magic_s32(x2); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - } - } - assert_eq!(total, 351839350); - - // Testing DOWN magic_s32 - total = 0; - for x in 0..(200 * 1000i32) { - let m = magic_s32(0x7FFF_FFFFi32 - x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - } - assert_eq!(total, 18446744072916880714); - - // Testing UP magic_s64 - total = 0; - for x in 0..(200 * 1000i64) { - let m = magic_s64(-0x8000_0000_0000_0000i64 + x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - } - assert_eq!(total, 17929885647724831014); - - total = 0; - // Testing MIDPOINT magic_s64 - for x in 0..(200 * 1000i64) { - let x2 = -100 * 1000i64 + x; - if x2 != -1 && x2 != 0 && x2 != 1 { - let m = magic_s64(x2); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - } - } - assert_eq!(total, 18106042338125661964); - - // Testing DOWN magic_s64 - total = 0; - for x in 0..(200 * 1000i64) { - let m = magic_s64(0x7FFF_FFFF_FFFF_FFFFi64 - x); - total = total ^ (m.mul_by as u64); - total = total + (m.shift_by as u64); - } - assert_eq!(total, 563301797155560970); - } - - #[test] - fn test_magic_generators_give_correct_numbers() { - // For a variety of values for both `n` and `d`, compute the magic - // numbers for `d`, and in effect interpret them so as to compute - // `n / d`. Check that that equals the value of `n / d` computed - // directly by the hardware. This serves to check that the magic - // number generates work properly. In total, 50,148,000 tests are - // done. - - // Some constants - const MIN_U32: u32 = 0; - const MAX_U32: u32 = 0xFFFF_FFFFu32; - const MAX_U32_HALF: u32 = 0x8000_0000u32; // more or less - - const MIN_S32: i32 = 0x8000_0000u32 as i32; - const MAX_S32: i32 = 0x7FFF_FFFFu32 as i32; - - const MIN_U64: u64 = 0; - const MAX_U64: u64 = 0xFFFF_FFFF_FFFF_FFFFu64; - const MAX_U64_HALF: u64 = 0x8000_0000_0000_0000u64; // ditto - - const MIN_S64: i64 = 0x8000_0000_0000_0000u64 as i64; - const MAX_S64: i64 = 0x7FFF_FFFF_FFFF_FFFFu64 as i64; - - // These generate reference results for signed/unsigned 32/64 bit - // division, rounding towards zero. - fn div_u32(x: u32, y: u32) -> u32 { - return x / y; - } - fn div_s32(x: i32, y: i32) -> i32 { - return x / y; - } - fn div_u64(x: u64, y: u64) -> u64 { - return x / y; - } - fn div_s64(x: i64, y: i64) -> i64 { - return x / y; - } - - // Returns the high half of a 32 bit unsigned widening multiply. - fn mulhw_u32(x: u32, y: u32) -> u32 { - let x64: u64 = x as u64; - let y64: u64 = y as u64; - let r64: u64 = x64 * y64; - (r64 >> 32) as u32 - } - - // Returns the high half of a 32 bit signed widening multiply. - fn mulhw_s32(x: i32, y: i32) -> i32 { - let x64: i64 = x as i64; - let y64: i64 = y as i64; - let r64: i64 = x64 * y64; - (r64 >> 32) as i32 - } - - // Returns the high half of a 64 bit unsigned widening multiply. - fn mulhw_u64(x: u64, y: u64) -> u64 { - let t0: u64 = x & 0xffffffffu64; - let t1: u64 = x >> 32; - let t2: u64 = y & 0xffffffffu64; - let t3: u64 = y >> 32; - let t4: u64 = t0 * t2; - let t5: u64 = t1 * t2 + (t4 >> 32); - let t6: u64 = t5 & 0xffffffffu64; - let t7: u64 = t5 >> 32; - let t8: u64 = t0 * t3 + t6; - let t9: u64 = t1 * t3 + t7 + (t8 >> 32); - t9 - } - - // Returns the high half of a 64 bit signed widening multiply. - fn mulhw_s64(x: i64, y: i64) -> i64 { - let t0: u64 = x as u64 & 0xffffffffu64; - let t1: i64 = x >> 32; - let t2: u64 = y as u64 & 0xffffffffu64; - let t3: i64 = y >> 32; - let t4: u64 = t0 * t2; - let t5: i64 = t1 * t2 as i64 + (t4 >> 32) as i64; - let t6: u64 = t5 as u64 & 0xffffffffu64; - let t7: i64 = t5 >> 32; - let t8: i64 = t0 as i64 * t3 + t6 as i64; - let t9: i64 = t1 * t3 + t7 + (t8 >> 32); - t9 - } - - // Compute the magic numbers for `d` and then use them to compute and - // check `n / d` for around 1000 values of `n`, using unsigned 32-bit - // division. - fn test_magic_u32_inner(d: u32, n_tests_done: &mut i32) { - // Advance the numerator (the `n` in `n / d`) so as to test - // densely near the range ends (and, in the signed variants, near - // zero) but not so densely away from those regions. - fn advance_n_u32(x: u32) -> u32 { - if x < MIN_U32 + 110 { - return x + 1; - } - if x < MIN_U32 + 1700 { - return x + 23; - } - if x < MAX_U32 - 1700 { - let xd: f64 = (x as f64) * 1.06415927; - return if xd >= (MAX_U32 - 1700) as f64 { - MAX_U32 - 1700 - } else { - xd as u32 - }; - } - if x < MAX_U32 - 110 { - return x + 23; - } - u32::wrapping_add(x, 1) - } - - let magic: MU32 = magic_u32(d); - let mut n: u32 = MIN_U32; - loop { - *n_tests_done += 1; - // Compute and check `q = n / d` using `magic`. - let mut q: u32 = mulhw_u32(n, magic.mul_by); - if magic.do_add { - assert!(magic.shift_by >= 1 && magic.shift_by <= 32); - let mut t: u32 = n - q; - t >>= 1; - t = t + q; - q = t >> (magic.shift_by - 1); - } else { - assert!(magic.shift_by >= 0 && magic.shift_by <= 31); - q >>= magic.shift_by; - } - - assert_eq!(q, div_u32(n, d)); - - n = advance_n_u32(n); - if n == MIN_U32 { - break; - } - } - } - - // Compute the magic numbers for `d` and then use them to compute and - // check `n / d` for around 1000 values of `n`, using signed 32-bit - // division. - fn test_magic_s32_inner(d: i32, n_tests_done: &mut i32) { - // See comment on advance_n_u32 above. - fn advance_n_s32(x: i32) -> i32 { - if x >= 0 && x <= 29 { - return x + 1; - } - if x < MIN_S32 + 110 { - return x + 1; - } - if x < MIN_S32 + 1700 { - return x + 23; - } - if x < MAX_S32 - 1700 { - let mut xd: f64 = x as f64; - xd = if xd < 0.0 { - xd / 1.06415927 - } else { - xd * 1.06415927 - }; - return if xd >= (MAX_S32 - 1700) as f64 { - MAX_S32 - 1700 - } else { - xd as i32 - }; - } - if x < MAX_S32 - 110 { - return x + 23; - } - if x == MAX_S32 { - return MIN_S32; - } - x + 1 - } - - let magic: MS32 = magic_s32(d); - let mut n: i32 = MIN_S32; - loop { - *n_tests_done += 1; - // Compute and check `q = n / d` using `magic`. - let mut q: i32 = mulhw_s32(n, magic.mul_by); - if d > 0 && magic.mul_by < 0 { - q = q + n; - } else if d < 0 && magic.mul_by > 0 { - q = q - n; - } - assert!(magic.shift_by >= 0 && magic.shift_by <= 31); - q = q >> magic.shift_by; - let mut t: u32 = q as u32; - t = t >> 31; - q = q + (t as i32); - - assert_eq!(q, div_s32(n, d)); - - n = advance_n_s32(n); - if n == MIN_S32 { - break; - } - } - } - - // Compute the magic numbers for `d` and then use them to compute and - // check `n / d` for around 1000 values of `n`, using unsigned 64-bit - // division. - fn test_magic_u64_inner(d: u64, n_tests_done: &mut i32) { - // See comment on advance_n_u32 above. - fn advance_n_u64(x: u64) -> u64 { - if x < MIN_U64 + 110 { - return x + 1; - } - if x < MIN_U64 + 1700 { - return x + 23; - } - if x < MAX_U64 - 1700 { - let xd: f64 = (x as f64) * 1.06415927; - return if xd >= (MAX_U64 - 1700) as f64 { - MAX_U64 - 1700 - } else { - xd as u64 - }; - } - if x < MAX_U64 - 110 { - return x + 23; - } - u64::wrapping_add(x, 1) - } - - let magic: MU64 = magic_u64(d); - let mut n: u64 = MIN_U64; - loop { - *n_tests_done += 1; - // Compute and check `q = n / d` using `magic`. - let mut q = mulhw_u64(n, magic.mul_by); - if magic.do_add { - assert!(magic.shift_by >= 1 && magic.shift_by <= 64); - let mut t: u64 = n - q; - t >>= 1; - t = t + q; - q = t >> (magic.shift_by - 1); - } else { - assert!(magic.shift_by >= 0 && magic.shift_by <= 63); - q >>= magic.shift_by; - } - - assert_eq!(q, div_u64(n, d)); - - n = advance_n_u64(n); - if n == MIN_U64 { - break; - } - } - } - - // Compute the magic numbers for `d` and then use them to compute and - // check `n / d` for around 1000 values of `n`, using signed 64-bit - // division. - fn test_magic_s64_inner(d: i64, n_tests_done: &mut i32) { - // See comment on advance_n_u32 above. - fn advance_n_s64(x: i64) -> i64 { - if x >= 0 && x <= 29 { - return x + 1; - } - if x < MIN_S64 + 110 { - return x + 1; - } - if x < MIN_S64 + 1700 { - return x + 23; - } - if x < MAX_S64 - 1700 { - let mut xd: f64 = x as f64; - xd = if xd < 0.0 { - xd / 1.06415927 - } else { - xd * 1.06415927 - }; - return if xd >= (MAX_S64 - 1700) as f64 { - MAX_S64 - 1700 - } else { - xd as i64 - }; - } - if x < MAX_S64 - 110 { - return x + 23; - } - if x == MAX_S64 { - return MIN_S64; - } - x + 1 - } - - let magic: MS64 = magic_s64(d); - let mut n: i64 = MIN_S64; - loop { - *n_tests_done += 1; - // Compute and check `q = n / d` using `magic`. */ - let mut q: i64 = mulhw_s64(n, magic.mul_by); - if d > 0 && magic.mul_by < 0 { - q = q + n; - } else if d < 0 && magic.mul_by > 0 { - q = q - n; - } - assert!(magic.shift_by >= 0 && magic.shift_by <= 63); - q = q >> magic.shift_by; - let mut t: u64 = q as u64; - t = t >> 63; - q = q + (t as i64); - - assert_eq!(q, div_s64(n, d)); - - n = advance_n_s64(n); - if n == MIN_S64 { - break; - } - } - } - - // Using all the above support machinery, actually run the tests. - - let mut n_tests_done: i32 = 0; - - // u32 division tests - { - // 2 .. 3k - let mut d: u32 = 2; - for _ in 0..3 * 1000 { - test_magic_u32_inner(d, &mut n_tests_done); - d += 1; - } - - // across the midpoint: midpoint - 3k .. midpoint + 3k - d = MAX_U32_HALF - 3 * 1000; - for _ in 0..2 * 3 * 1000 { - test_magic_u32_inner(d, &mut n_tests_done); - d += 1; - } - - // MAX_U32 - 3k .. MAX_U32 (in reverse order) - d = MAX_U32; - for _ in 0..3 * 1000 { - test_magic_u32_inner(d, &mut n_tests_done); - d -= 1; - } - } - - // s32 division tests - { - // MIN_S32 .. MIN_S32 + 3k - let mut d: i32 = MIN_S32; - for _ in 0..3 * 1000 { - test_magic_s32_inner(d, &mut n_tests_done); - d += 1; - } - - // -3k .. -2 (in reverse order) - d = -2; - for _ in 0..3 * 1000 { - test_magic_s32_inner(d, &mut n_tests_done); - d -= 1; - } - - // 2 .. 3k - d = 2; - for _ in 0..3 * 1000 { - test_magic_s32_inner(d, &mut n_tests_done); - d += 1; - } - - // MAX_S32 - 3k .. MAX_S32 (in reverse order) - d = MAX_S32; - for _ in 0..3 * 1000 { - test_magic_s32_inner(d, &mut n_tests_done); - d -= 1; - } - } - - // u64 division tests - { - // 2 .. 3k - let mut d: u64 = 2; - for _ in 0..3 * 1000 { - test_magic_u64_inner(d, &mut n_tests_done); - d += 1; - } - - // across the midpoint: midpoint - 3k .. midpoint + 3k - d = MAX_U64_HALF - 3 * 1000; - for _ in 0..2 * 3 * 1000 { - test_magic_u64_inner(d, &mut n_tests_done); - d += 1; - } - - // mAX_U64 - 3000 .. mAX_U64 (in reverse order) - d = MAX_U64; - for _ in 0..3 * 1000 { - test_magic_u64_inner(d, &mut n_tests_done); - d -= 1; - } - } - - // s64 division tests - { - // MIN_S64 .. MIN_S64 + 3k - let mut d: i64 = MIN_S64; - for _ in 0..3 * 1000 { - test_magic_s64_inner(d, &mut n_tests_done); - d += 1; - } - - // -3k .. -2 (in reverse order) - d = -2; - for _ in 0..3 * 1000 { - test_magic_s64_inner(d, &mut n_tests_done); - d -= 1; - } - - // 2 .. 3k - d = 2; - for _ in 0..3 * 1000 { - test_magic_s64_inner(d, &mut n_tests_done); - d += 1; - } - - // MAX_S64 - 3k .. MAX_S64 (in reverse order) - d = MAX_S64; - for _ in 0..3 * 1000 { - test_magic_s64_inner(d, &mut n_tests_done); - d -= 1; - } - } - assert_eq!(n_tests_done, 50_148_000); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dominator_tree.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dominator_tree.rs deleted file mode 100644 index 5077354f7..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/dominator_tree.rs +++ /dev/null @@ -1,837 +0,0 @@ -//! A Dominator Tree represented as mappings of Blocks to their immediate dominator. - -use crate::entity::SecondaryMap; -use crate::flowgraph::{BlockPredecessor, ControlFlowGraph}; -use crate::ir::instructions::BranchInfo; -use crate::ir::{Block, ExpandedProgramPoint, Function, Inst, Layout, ProgramOrder, Value}; -use crate::packed_option::PackedOption; -use crate::timing; -use alloc::vec::Vec; -use core::cmp; -use core::cmp::Ordering; -use core::mem; - -/// RPO numbers are not first assigned in a contiguous way but as multiples of STRIDE, to leave -/// room for modifications of the dominator tree. -const STRIDE: u32 = 4; - -/// Special RPO numbers used during `compute_postorder`. -const DONE: u32 = 1; -const SEEN: u32 = 2; - -/// Dominator tree node. We keep one of these per block. -#[derive(Clone, Default)] -struct DomNode { - /// Number of this node in a reverse post-order traversal of the CFG, starting from 1. - /// This number is monotonic in the reverse postorder but not contiguous, since we leave - /// holes for later localized modifications of the dominator tree. - /// Unreachable nodes get number 0, all others are positive. - rpo_number: u32, - - /// The immediate dominator of this block, represented as the branch or jump instruction at the - /// end of the dominating basic block. - /// - /// This is `None` for unreachable blocks and the entry block which doesn't have an immediate - /// dominator. - idom: PackedOption, -} - -/// The dominator tree for a single function. -pub struct DominatorTree { - nodes: SecondaryMap, - - /// CFG post-order of all reachable blocks. - postorder: Vec, - - /// Scratch memory used by `compute_postorder()`. - stack: Vec, - - valid: bool, -} - -/// Methods for querying the dominator tree. -impl DominatorTree { - /// Is `block` reachable from the entry block? - pub fn is_reachable(&self, block: Block) -> bool { - self.nodes[block].rpo_number != 0 - } - - /// Get the CFG post-order of blocks that was used to compute the dominator tree. - /// - /// Note that this post-order is not updated automatically when the CFG is modified. It is - /// computed from scratch and cached by `compute()`. - pub fn cfg_postorder(&self) -> &[Block] { - debug_assert!(self.is_valid()); - &self.postorder - } - - /// Returns the immediate dominator of `block`. - /// - /// The immediate dominator of a basic block is a basic block which we represent by - /// the branch or jump instruction at the end of the basic block. This does not have to be the - /// terminator of its block. - /// - /// A branch or jump is said to *dominate* `block` if all control flow paths from the function - /// entry to `block` must go through the branch. - /// - /// The *immediate dominator* is the dominator that is closest to `block`. All other dominators - /// also dominate the immediate dominator. - /// - /// This returns `None` if `block` is not reachable from the entry block, or if it is the entry block - /// which has no dominators. - pub fn idom(&self, block: Block) -> Option { - self.nodes[block].idom.into() - } - - /// Compare two blocks relative to the reverse post-order. - fn rpo_cmp_block(&self, a: Block, b: Block) -> Ordering { - self.nodes[a].rpo_number.cmp(&self.nodes[b].rpo_number) - } - - /// Compare two program points relative to a reverse post-order traversal of the control-flow - /// graph. - /// - /// Return `Ordering::Less` if `a` comes before `b` in the RPO. - /// - /// If `a` and `b` belong to the same block, compare their relative position in the block. - pub fn rpo_cmp(&self, a: A, b: B, layout: &Layout) -> Ordering - where - A: Into, - B: Into, - { - let a = a.into(); - let b = b.into(); - self.rpo_cmp_block(layout.pp_block(a), layout.pp_block(b)) - .then(layout.cmp(a, b)) - } - - /// Returns `true` if `a` dominates `b`. - /// - /// This means that every control-flow path from the function entry to `b` must go through `a`. - /// - /// Dominance is ill defined for unreachable blocks. This function can always determine - /// dominance for instructions in the same block, but otherwise returns `false` if either block - /// is unreachable. - /// - /// An instruction is considered to dominate itself. - pub fn dominates(&self, a: A, b: B, layout: &Layout) -> bool - where - A: Into, - B: Into, - { - let a = a.into(); - let b = b.into(); - match a { - ExpandedProgramPoint::Block(block_a) => { - a == b || self.last_dominator(block_a, b, layout).is_some() - } - ExpandedProgramPoint::Inst(inst_a) => { - let block_a = layout - .inst_block(inst_a) - .expect("Instruction not in layout."); - match self.last_dominator(block_a, b, layout) { - Some(last) => layout.cmp(inst_a, last) != Ordering::Greater, - None => false, - } - } - } - } - - /// Find the last instruction in `a` that dominates `b`. - /// If no instructions in `a` dominate `b`, return `None`. - pub fn last_dominator(&self, a: Block, b: B, layout: &Layout) -> Option - where - B: Into, - { - let (mut block_b, mut inst_b) = match b.into() { - ExpandedProgramPoint::Block(block) => (block, None), - ExpandedProgramPoint::Inst(inst) => ( - layout.inst_block(inst).expect("Instruction not in layout."), - Some(inst), - ), - }; - let rpo_a = self.nodes[a].rpo_number; - - // Run a finger up the dominator tree from b until we see a. - // Do nothing if b is unreachable. - while rpo_a < self.nodes[block_b].rpo_number { - let idom = match self.idom(block_b) { - Some(idom) => idom, - None => return None, // a is unreachable, so we climbed past the entry - }; - block_b = layout.inst_block(idom).expect("Dominator got removed."); - inst_b = Some(idom); - } - if a == block_b { - inst_b - } else { - None - } - } - - /// Compute the common dominator of two basic blocks. - /// - /// Both basic blocks are assumed to be reachable. - pub fn common_dominator( - &self, - mut a: BlockPredecessor, - mut b: BlockPredecessor, - layout: &Layout, - ) -> BlockPredecessor { - loop { - match self.rpo_cmp_block(a.block, b.block) { - Ordering::Less => { - // `a` comes before `b` in the RPO. Move `b` up. - let idom = self.nodes[b.block].idom.expect("Unreachable basic block?"); - b = BlockPredecessor::new( - layout.inst_block(idom).expect("Dangling idom instruction"), - idom, - ); - } - Ordering::Greater => { - // `b` comes before `a` in the RPO. Move `a` up. - let idom = self.nodes[a.block].idom.expect("Unreachable basic block?"); - a = BlockPredecessor::new( - layout.inst_block(idom).expect("Dangling idom instruction"), - idom, - ); - } - Ordering::Equal => break, - } - } - - debug_assert_eq!( - a.block, b.block, - "Unreachable block passed to common_dominator?" - ); - - // We're in the same block. The common dominator is the earlier instruction. - if layout.cmp(a.inst, b.inst) == Ordering::Less { - a - } else { - b - } - } -} - -impl DominatorTree { - /// Allocate a new blank dominator tree. Use `compute` to compute the dominator tree for a - /// function. - pub fn new() -> Self { - Self { - nodes: SecondaryMap::new(), - postorder: Vec::new(), - stack: Vec::new(), - valid: false, - } - } - - /// Allocate and compute a dominator tree. - pub fn with_function(func: &Function, cfg: &ControlFlowGraph) -> Self { - let block_capacity = func.layout.block_capacity(); - let mut domtree = Self { - nodes: SecondaryMap::with_capacity(block_capacity), - postorder: Vec::with_capacity(block_capacity), - stack: Vec::new(), - valid: false, - }; - domtree.compute(func, cfg); - domtree - } - - /// Reset and compute a CFG post-order and dominator tree. - pub fn compute(&mut self, func: &Function, cfg: &ControlFlowGraph) { - let _tt = timing::domtree(); - debug_assert!(cfg.is_valid()); - self.compute_postorder(func); - self.compute_domtree(func, cfg); - self.valid = true; - } - - /// Clear the data structures used to represent the dominator tree. This will leave the tree in - /// a state where `is_valid()` returns false. - pub fn clear(&mut self) { - self.nodes.clear(); - self.postorder.clear(); - debug_assert!(self.stack.is_empty()); - self.valid = false; - } - - /// Check if the dominator tree is in a valid state. - /// - /// Note that this doesn't perform any kind of validity checks. It simply checks if the - /// `compute()` method has been called since the last `clear()`. It does not check that the - /// dominator tree is consistent with the CFG. - pub fn is_valid(&self) -> bool { - self.valid - } - - /// Reset all internal data structures and compute a post-order of the control flow graph. - /// - /// This leaves `rpo_number == 1` for all reachable blocks, 0 for unreachable ones. - fn compute_postorder(&mut self, func: &Function) { - self.clear(); - self.nodes.resize(func.dfg.num_blocks()); - - // This algorithm is a depth first traversal (DFT) of the control flow graph, computing a - // post-order of the blocks that are reachable form the entry block. A DFT post-order is not - // unique. The specific order we get is controlled by two factors: - // - // 1. The order each node's children are visited, and - // 2. The method used for pruning graph edges to get a tree. - // - // There are two ways of viewing the CFG as a graph: - // - // 1. Each block is a node, with outgoing edges for all the branches in the block. - // 2. Each basic block is a node, with outgoing edges for the single branch at the end of - // the BB. (A block is a linear sequence of basic blocks). - // - // The first graph is a contraction of the second one. We want to compute a block post-order - // that is compatible both graph interpretations. That is, if you compute a BB post-order - // and then remove those BBs that do not correspond to block headers, you get a post-order of - // the block graph. - // - // Node child order: - // - // In the BB graph, we always go down the fall-through path first and follow the branch - // destination second. - // - // In the block graph, this is equivalent to visiting block successors in a bottom-up - // order, starting from the destination of the block's terminating jump, ending at the - // destination of the first branch in the block. - // - // Edge pruning: - // - // In the BB graph, we keep an edge to a block the first time we visit the *source* side - // of the edge. Any subsequent edges to the same block are pruned. - // - // The equivalent tree is reached in the block graph by keeping the first edge to a block - // in a top-down traversal of the successors. (And then visiting edges in a bottom-up - // order). - // - // This pruning method makes it possible to compute the DFT without storing lots of - // information about the progress through a block. - - // During this algorithm only, use `rpo_number` to hold the following state: - // - // 0: block has not yet been reached in the pre-order. - // SEEN: block has been pushed on the stack but successors not yet pushed. - // DONE: Successors pushed. - - match func.layout.entry_block() { - Some(block) => { - self.stack.push(block); - self.nodes[block].rpo_number = SEEN; - } - None => return, - } - - while let Some(block) = self.stack.pop() { - match self.nodes[block].rpo_number { - SEEN => { - // This is the first time we pop the block, so we need to scan its successors and - // then revisit it. - self.nodes[block].rpo_number = DONE; - self.stack.push(block); - self.push_successors(func, block); - } - DONE => { - // This is the second time we pop the block, so all successors have been - // processed. - self.postorder.push(block); - } - _ => unreachable!(), - } - } - } - - /// Push `block` successors onto `self.stack`, filtering out those that have already been seen. - /// - /// The successors are pushed in program order which is important to get a split-invariant - /// post-order. Split-invariant means that if a block is split in two, we get the same - /// post-order except for the insertion of the new block header at the split point. - fn push_successors(&mut self, func: &Function, block: Block) { - for inst in func.layout.block_likely_branches(block) { - match func.dfg.analyze_branch(inst) { - BranchInfo::SingleDest(succ, _) => self.push_if_unseen(succ), - BranchInfo::Table(jt, dest) => { - for succ in func.jump_tables[jt].iter() { - self.push_if_unseen(*succ); - } - if let Some(dest) = dest { - self.push_if_unseen(dest); - } - } - BranchInfo::NotABranch => {} - } - } - } - - /// Push `block` onto `self.stack` if it has not already been seen. - fn push_if_unseen(&mut self, block: Block) { - if self.nodes[block].rpo_number == 0 { - self.nodes[block].rpo_number = SEEN; - self.stack.push(block); - } - } - - /// Build a dominator tree from a control flow graph using Keith D. Cooper's - /// "Simple, Fast Dominator Algorithm." - fn compute_domtree(&mut self, func: &Function, cfg: &ControlFlowGraph) { - // During this algorithm, `rpo_number` has the following values: - // - // 0: block is not reachable. - // 1: block is reachable, but has not yet been visited during the first pass. This is set by - // `compute_postorder`. - // 2+: block is reachable and has an assigned RPO number. - - // We'll be iterating over a reverse post-order of the CFG, skipping the entry block. - let (entry_block, postorder) = match self.postorder.as_slice().split_last() { - Some((&eb, rest)) => (eb, rest), - None => return, - }; - debug_assert_eq!(Some(entry_block), func.layout.entry_block()); - - // Do a first pass where we assign RPO numbers to all reachable nodes. - self.nodes[entry_block].rpo_number = 2 * STRIDE; - for (rpo_idx, &block) in postorder.iter().rev().enumerate() { - // Update the current node and give it an RPO number. - // The entry block got 2, the rest start at 3 by multiples of STRIDE to leave - // room for future dominator tree modifications. - // - // Since `compute_idom` will only look at nodes with an assigned RPO number, the - // function will never see an uninitialized predecessor. - // - // Due to the nature of the post-order traversal, every node we visit will have at - // least one predecessor that has previously been visited during this RPO. - self.nodes[block] = DomNode { - idom: self.compute_idom(block, cfg, &func.layout).into(), - rpo_number: (rpo_idx as u32 + 3) * STRIDE, - } - } - - // Now that we have RPO numbers for everything and initial immediate dominator estimates, - // iterate until convergence. - // - // If the function is free of irreducible control flow, this will exit after one iteration. - let mut changed = true; - while changed { - changed = false; - for &block in postorder.iter().rev() { - let idom = self.compute_idom(block, cfg, &func.layout).into(); - if self.nodes[block].idom != idom { - self.nodes[block].idom = idom; - changed = true; - } - } - } - } - - // Compute the immediate dominator for `block` using the current `idom` states for the reachable - // nodes. - fn compute_idom(&self, block: Block, cfg: &ControlFlowGraph, layout: &Layout) -> Inst { - // Get an iterator with just the reachable, already visited predecessors to `block`. - // Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't - // been visited yet, 0 for unreachable blocks. - let mut reachable_preds = cfg - .pred_iter(block) - .filter(|&BlockPredecessor { block: pred, .. }| self.nodes[pred].rpo_number > 1); - - // The RPO must visit at least one predecessor before this node. - let mut idom = reachable_preds - .next() - .expect("block node must have one reachable predecessor"); - - for pred in reachable_preds { - idom = self.common_dominator(idom, pred, layout); - } - - idom.inst - } -} - -/// Optional pre-order information that can be computed for a dominator tree. -/// -/// This data structure is computed from a `DominatorTree` and provides: -/// -/// - A forward traversable dominator tree through the `children()` iterator. -/// - An ordering of blocks according to a dominator tree pre-order. -/// - Constant time dominance checks at the block granularity. -/// -/// The information in this auxiliary data structure is not easy to update when the control flow -/// graph changes, which is why it is kept separate. -pub struct DominatorTreePreorder { - nodes: SecondaryMap, - - // Scratch memory used by `compute_postorder()`. - stack: Vec, -} - -#[derive(Default, Clone)] -struct ExtraNode { - /// First child node in the domtree. - child: PackedOption, - - /// Next sibling node in the domtree. This linked list is ordered according to the CFG RPO. - sibling: PackedOption, - - /// Sequence number for this node in a pre-order traversal of the dominator tree. - /// Unreachable blocks have number 0, the entry block is 1. - pre_number: u32, - - /// Maximum `pre_number` for the sub-tree of the dominator tree that is rooted at this node. - /// This is always >= `pre_number`. - pre_max: u32, -} - -/// Creating and computing the dominator tree pre-order. -impl DominatorTreePreorder { - /// Create a new blank `DominatorTreePreorder`. - pub fn new() -> Self { - Self { - nodes: SecondaryMap::new(), - stack: Vec::new(), - } - } - - /// Recompute this data structure to match `domtree`. - pub fn compute(&mut self, domtree: &DominatorTree, layout: &Layout) { - self.nodes.clear(); - debug_assert_eq!(self.stack.len(), 0); - - // Step 1: Populate the child and sibling links. - // - // By following the CFG post-order and pushing to the front of the lists, we make sure that - // sibling lists are ordered according to the CFG reverse post-order. - for &block in domtree.cfg_postorder() { - if let Some(idom_inst) = domtree.idom(block) { - let idom = layout.pp_block(idom_inst); - let sib = mem::replace(&mut self.nodes[idom].child, block.into()); - self.nodes[block].sibling = sib; - } else { - // The only block without an immediate dominator is the entry. - self.stack.push(block); - } - } - - // Step 2. Assign pre-order numbers from a DFS of the dominator tree. - debug_assert!(self.stack.len() <= 1); - let mut n = 0; - while let Some(block) = self.stack.pop() { - n += 1; - let node = &mut self.nodes[block]; - node.pre_number = n; - node.pre_max = n; - if let Some(n) = node.sibling.expand() { - self.stack.push(n); - } - if let Some(n) = node.child.expand() { - self.stack.push(n); - } - } - - // Step 3. Propagate the `pre_max` numbers up the tree. - // The CFG post-order is topologically ordered w.r.t. dominance so a node comes after all - // its dominator tree children. - for &block in domtree.cfg_postorder() { - if let Some(idom_inst) = domtree.idom(block) { - let idom = layout.pp_block(idom_inst); - let pre_max = cmp::max(self.nodes[block].pre_max, self.nodes[idom].pre_max); - self.nodes[idom].pre_max = pre_max; - } - } - } -} - -/// An iterator that enumerates the direct children of a block in the dominator tree. -pub struct ChildIter<'a> { - dtpo: &'a DominatorTreePreorder, - next: PackedOption, -} - -impl<'a> Iterator for ChildIter<'a> { - type Item = Block; - - fn next(&mut self) -> Option { - let n = self.next.expand(); - if let Some(block) = n { - self.next = self.dtpo.nodes[block].sibling; - } - n - } -} - -/// Query interface for the dominator tree pre-order. -impl DominatorTreePreorder { - /// Get an iterator over the direct children of `block` in the dominator tree. - /// - /// These are the block's whose immediate dominator is an instruction in `block`, ordered according - /// to the CFG reverse post-order. - pub fn children(&self, block: Block) -> ChildIter { - ChildIter { - dtpo: self, - next: self.nodes[block].child, - } - } - - /// Fast, constant time dominance check with block granularity. - /// - /// This computes the same result as `domtree.dominates(a, b)`, but in guaranteed fast constant - /// time. This is less general than the `DominatorTree` method because it only works with block - /// program points. - /// - /// A block is considered to dominate itself. - pub fn dominates(&self, a: Block, b: Block) -> bool { - let na = &self.nodes[a]; - let nb = &self.nodes[b]; - na.pre_number <= nb.pre_number && na.pre_max >= nb.pre_max - } - - /// Compare two blocks according to the dominator pre-order. - pub fn pre_cmp_block(&self, a: Block, b: Block) -> Ordering { - self.nodes[a].pre_number.cmp(&self.nodes[b].pre_number) - } - - /// Compare two program points according to the dominator tree pre-order. - /// - /// This ordering of program points have the property that given a program point, pp, all the - /// program points dominated by pp follow immediately and contiguously after pp in the order. - pub fn pre_cmp(&self, a: A, b: B, layout: &Layout) -> Ordering - where - A: Into, - B: Into, - { - let a = a.into(); - let b = b.into(); - self.pre_cmp_block(layout.pp_block(a), layout.pp_block(b)) - .then(layout.cmp(a, b)) - } - - /// Compare two value defs according to the dominator tree pre-order. - /// - /// Two values defined at the same program point are compared according to their parameter or - /// result order. - /// - /// This is a total ordering of the values in the function. - pub fn pre_cmp_def(&self, a: Value, b: Value, func: &Function) -> Ordering { - let da = func.dfg.value_def(a); - let db = func.dfg.value_def(b); - self.pre_cmp(da, db, &func.layout) - .then_with(|| da.num().cmp(&db.num())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cursor::{Cursor, FuncCursor}; - use crate::flowgraph::ControlFlowGraph; - use crate::ir::types::*; - use crate::ir::{Function, InstBuilder, TrapCode}; - - #[test] - fn empty() { - let func = Function::new(); - let cfg = ControlFlowGraph::with_function(&func); - debug_assert!(cfg.is_valid()); - let dtree = DominatorTree::with_function(&func, &cfg); - assert_eq!(0, dtree.nodes.keys().count()); - assert_eq!(dtree.cfg_postorder(), &[]); - - let mut dtpo = DominatorTreePreorder::new(); - dtpo.compute(&dtree, &func.layout); - } - - #[test] - fn unreachable_node() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let v0 = func.dfg.append_block_param(block0, I32); - let block1 = func.dfg.make_block(); - let block2 = func.dfg.make_block(); - - let mut cur = FuncCursor::new(&mut func); - - cur.insert_block(block0); - cur.ins().brnz(v0, block2, &[]); - cur.ins().trap(TrapCode::User(0)); - - cur.insert_block(block1); - let v1 = cur.ins().iconst(I32, 1); - let v2 = cur.ins().iadd(v0, v1); - cur.ins().jump(block0, &[v2]); - - cur.insert_block(block2); - cur.ins().return_(&[v0]); - - let cfg = ControlFlowGraph::with_function(cur.func); - let dt = DominatorTree::with_function(cur.func, &cfg); - - // Fall-through-first, prune-at-source DFT: - // - // block0 { - // brnz block2 { - // trap - // block2 { - // return - // } block2 - // } block0 - assert_eq!(dt.cfg_postorder(), &[block2, block0]); - - let v2_def = cur.func.dfg.value_def(v2).unwrap_inst(); - assert!(!dt.dominates(v2_def, block0, &cur.func.layout)); - assert!(!dt.dominates(block0, v2_def, &cur.func.layout)); - - let mut dtpo = DominatorTreePreorder::new(); - dtpo.compute(&dt, &cur.func.layout); - assert!(dtpo.dominates(block0, block0)); - assert!(!dtpo.dominates(block0, block1)); - assert!(dtpo.dominates(block0, block2)); - assert!(!dtpo.dominates(block1, block0)); - assert!(dtpo.dominates(block1, block1)); - assert!(!dtpo.dominates(block1, block2)); - assert!(!dtpo.dominates(block2, block0)); - assert!(!dtpo.dominates(block2, block1)); - assert!(dtpo.dominates(block2, block2)); - } - - #[test] - fn non_zero_entry_block() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let block1 = func.dfg.make_block(); - let block2 = func.dfg.make_block(); - let block3 = func.dfg.make_block(); - let cond = func.dfg.append_block_param(block3, I32); - - let mut cur = FuncCursor::new(&mut func); - - cur.insert_block(block3); - let jmp_block3_block1 = cur.ins().jump(block1, &[]); - - cur.insert_block(block1); - let br_block1_block0 = cur.ins().brnz(cond, block0, &[]); - let jmp_block1_block2 = cur.ins().jump(block2, &[]); - - cur.insert_block(block2); - cur.ins().jump(block0, &[]); - - cur.insert_block(block0); - - let cfg = ControlFlowGraph::with_function(cur.func); - let dt = DominatorTree::with_function(cur.func, &cfg); - - // Fall-through-first, prune-at-source DFT: - // - // block3 { - // block3:jump block1 { - // block1 { - // block1:brnz block0 { - // block1:jump block2 { - // block2 { - // block2:jump block0 (seen) - // } block2 - // } block1:jump block2 - // block0 { - // } block0 - // } block1:brnz block0 - // } block1 - // } block3:jump block1 - // } block3 - - assert_eq!(dt.cfg_postorder(), &[block2, block0, block1, block3]); - - assert_eq!(cur.func.layout.entry_block().unwrap(), block3); - assert_eq!(dt.idom(block3), None); - assert_eq!(dt.idom(block1).unwrap(), jmp_block3_block1); - assert_eq!(dt.idom(block2).unwrap(), jmp_block1_block2); - assert_eq!(dt.idom(block0).unwrap(), br_block1_block0); - - assert!(dt.dominates(br_block1_block0, br_block1_block0, &cur.func.layout)); - assert!(!dt.dominates(br_block1_block0, jmp_block3_block1, &cur.func.layout)); - assert!(dt.dominates(jmp_block3_block1, br_block1_block0, &cur.func.layout)); - - assert_eq!( - dt.rpo_cmp(block3, block3, &cur.func.layout), - Ordering::Equal - ); - assert_eq!(dt.rpo_cmp(block3, block1, &cur.func.layout), Ordering::Less); - assert_eq!( - dt.rpo_cmp(block3, jmp_block3_block1, &cur.func.layout), - Ordering::Less - ); - assert_eq!( - dt.rpo_cmp(jmp_block3_block1, jmp_block1_block2, &cur.func.layout), - Ordering::Less - ); - } - - #[test] - fn backwards_layout() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let block1 = func.dfg.make_block(); - let block2 = func.dfg.make_block(); - - let mut cur = FuncCursor::new(&mut func); - - cur.insert_block(block0); - let jmp02 = cur.ins().jump(block2, &[]); - - cur.insert_block(block1); - let trap = cur.ins().trap(TrapCode::User(5)); - - cur.insert_block(block2); - let jmp21 = cur.ins().jump(block1, &[]); - - let cfg = ControlFlowGraph::with_function(cur.func); - let dt = DominatorTree::with_function(cur.func, &cfg); - - assert_eq!(cur.func.layout.entry_block(), Some(block0)); - assert_eq!(dt.idom(block0), None); - assert_eq!(dt.idom(block1), Some(jmp21)); - assert_eq!(dt.idom(block2), Some(jmp02)); - - assert!(dt.dominates(block0, block0, &cur.func.layout)); - assert!(dt.dominates(block0, jmp02, &cur.func.layout)); - assert!(dt.dominates(block0, block1, &cur.func.layout)); - assert!(dt.dominates(block0, trap, &cur.func.layout)); - assert!(dt.dominates(block0, block2, &cur.func.layout)); - assert!(dt.dominates(block0, jmp21, &cur.func.layout)); - - assert!(!dt.dominates(jmp02, block0, &cur.func.layout)); - assert!(dt.dominates(jmp02, jmp02, &cur.func.layout)); - assert!(dt.dominates(jmp02, block1, &cur.func.layout)); - assert!(dt.dominates(jmp02, trap, &cur.func.layout)); - assert!(dt.dominates(jmp02, block2, &cur.func.layout)); - assert!(dt.dominates(jmp02, jmp21, &cur.func.layout)); - - assert!(!dt.dominates(block1, block0, &cur.func.layout)); - assert!(!dt.dominates(block1, jmp02, &cur.func.layout)); - assert!(dt.dominates(block1, block1, &cur.func.layout)); - assert!(dt.dominates(block1, trap, &cur.func.layout)); - assert!(!dt.dominates(block1, block2, &cur.func.layout)); - assert!(!dt.dominates(block1, jmp21, &cur.func.layout)); - - assert!(!dt.dominates(trap, block0, &cur.func.layout)); - assert!(!dt.dominates(trap, jmp02, &cur.func.layout)); - assert!(!dt.dominates(trap, block1, &cur.func.layout)); - assert!(dt.dominates(trap, trap, &cur.func.layout)); - assert!(!dt.dominates(trap, block2, &cur.func.layout)); - assert!(!dt.dominates(trap, jmp21, &cur.func.layout)); - - assert!(!dt.dominates(block2, block0, &cur.func.layout)); - assert!(!dt.dominates(block2, jmp02, &cur.func.layout)); - assert!(dt.dominates(block2, block1, &cur.func.layout)); - assert!(dt.dominates(block2, trap, &cur.func.layout)); - assert!(dt.dominates(block2, block2, &cur.func.layout)); - assert!(dt.dominates(block2, jmp21, &cur.func.layout)); - - assert!(!dt.dominates(jmp21, block0, &cur.func.layout)); - assert!(!dt.dominates(jmp21, jmp02, &cur.func.layout)); - assert!(dt.dominates(jmp21, block1, &cur.func.layout)); - assert!(dt.dominates(jmp21, trap, &cur.func.layout)); - assert!(!dt.dominates(jmp21, block2, &cur.func.layout)); - assert!(dt.dominates(jmp21, jmp21, &cur.func.layout)); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/flowgraph.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/flowgraph.rs deleted file mode 100644 index 9c6ccbaea..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/flowgraph.rs +++ /dev/null @@ -1,350 +0,0 @@ -//! A control flow graph represented as mappings of basic blocks to their predecessors -//! and successors. -//! -//! Successors are represented as basic blocks while predecessors are represented by basic -//! blocks. Basic blocks are denoted by tuples of block and branch/jump instructions. Each -//! predecessor tuple corresponds to the end of a basic block. -//! -//! ```c -//! Block0: -//! ... ; beginning of basic block -//! -//! ... -//! -//! brz vx, Block1 ; end of basic block -//! -//! ... ; beginning of basic block -//! -//! ... -//! -//! jmp Block2 ; end of basic block -//! ``` -//! -//! Here `Block1` and `Block2` would each have a single predecessor denoted as `(Block0, brz)` -//! and `(Block0, jmp Block2)` respectively. - -use crate::bforest; -use crate::entity::SecondaryMap; -use crate::ir::instructions::BranchInfo; -use crate::ir::{Block, Function, Inst}; -use crate::timing; -use core::mem; - -/// A basic block denoted by its enclosing Block and last instruction. -#[derive(Debug, PartialEq, Eq)] -pub struct BlockPredecessor { - /// Enclosing Block key. - pub block: Block, - /// Last instruction in the basic block. - pub inst: Inst, -} - -impl BlockPredecessor { - /// Convenient method to construct new BlockPredecessor. - pub fn new(block: Block, inst: Inst) -> Self { - Self { block, inst } - } -} - -/// A container for the successors and predecessors of some Block. -#[derive(Clone, Default)] -struct CFGNode { - /// Instructions that can branch or jump to this block. - /// - /// This maps branch instruction -> predecessor block which is redundant since the block containing - /// the branch instruction is available from the `layout.inst_block()` method. We store the - /// redundant information because: - /// - /// 1. Many `pred_iter()` consumers want the block anyway, so it is handily available. - /// 2. The `invalidate_block_successors()` may be called *after* branches have been removed from - /// their block, but we still need to remove them form the old block predecessor map. - /// - /// The redundant block stored here is always consistent with the CFG successor lists, even after - /// the IR has been edited. - pub predecessors: bforest::Map, - - /// Set of blocks that are the targets of branches and jumps in this block. - /// The set is ordered by block number, indicated by the `()` comparator type. - pub successors: bforest::Set, -} - -/// The Control Flow Graph maintains a mapping of blocks to their predecessors -/// and successors where predecessors are basic blocks and successors are -/// basic blocks. -pub struct ControlFlowGraph { - data: SecondaryMap, - pred_forest: bforest::MapForest, - succ_forest: bforest::SetForest, - valid: bool, -} - -impl ControlFlowGraph { - /// Allocate a new blank control flow graph. - pub fn new() -> Self { - Self { - data: SecondaryMap::new(), - valid: false, - pred_forest: bforest::MapForest::new(), - succ_forest: bforest::SetForest::new(), - } - } - - /// Clear all data structures in this control flow graph. - pub fn clear(&mut self) { - self.data.clear(); - self.pred_forest.clear(); - self.succ_forest.clear(); - self.valid = false; - } - - /// Allocate and compute the control flow graph for `func`. - pub fn with_function(func: &Function) -> Self { - let mut cfg = Self::new(); - cfg.compute(func); - cfg - } - - /// Compute the control flow graph of `func`. - /// - /// This will clear and overwrite any information already stored in this data structure. - pub fn compute(&mut self, func: &Function) { - let _tt = timing::flowgraph(); - self.clear(); - self.data.resize(func.dfg.num_blocks()); - - for block in &func.layout { - self.compute_block(func, block); - } - - self.valid = true; - } - - fn compute_block(&mut self, func: &Function, block: Block) { - for inst in func.layout.block_likely_branches(block) { - match func.dfg.analyze_branch(inst) { - BranchInfo::SingleDest(dest, _) => { - self.add_edge(block, inst, dest); - } - BranchInfo::Table(jt, dest) => { - if let Some(dest) = dest { - self.add_edge(block, inst, dest); - } - for dest in func.jump_tables[jt].iter() { - self.add_edge(block, inst, *dest); - } - } - BranchInfo::NotABranch => {} - } - } - } - - fn invalidate_block_successors(&mut self, block: Block) { - // Temporarily take ownership because we need mutable access to self.data inside the loop. - // Unfortunately borrowck cannot see that our mut accesses to predecessors don't alias - // our iteration over successors. - let mut successors = mem::replace(&mut self.data[block].successors, Default::default()); - for succ in successors.iter(&self.succ_forest) { - self.data[succ] - .predecessors - .retain(&mut self.pred_forest, |_, &mut e| e != block); - } - successors.clear(&mut self.succ_forest); - } - - /// Recompute the control flow graph of `block`. - /// - /// This is for use after modifying instructions within a specific block. It recomputes all edges - /// from `block` while leaving edges to `block` intact. Its functionality a subset of that of the - /// more expensive `compute`, and should be used when we know we don't need to recompute the CFG - /// from scratch, but rather that our changes have been restricted to specific blocks. - pub fn recompute_block(&mut self, func: &Function, block: Block) { - debug_assert!(self.is_valid()); - self.invalidate_block_successors(block); - self.compute_block(func, block); - } - - fn add_edge(&mut self, from: Block, from_inst: Inst, to: Block) { - self.data[from] - .successors - .insert(to, &mut self.succ_forest, &()); - self.data[to] - .predecessors - .insert(from_inst, from, &mut self.pred_forest, &()); - } - - /// Get an iterator over the CFG predecessors to `block`. - pub fn pred_iter(&self, block: Block) -> PredIter { - PredIter(self.data[block].predecessors.iter(&self.pred_forest)) - } - - /// Get an iterator over the CFG successors to `block`. - pub fn succ_iter(&self, block: Block) -> SuccIter { - debug_assert!(self.is_valid()); - self.data[block].successors.iter(&self.succ_forest) - } - - /// Check if the CFG is in a valid state. - /// - /// Note that this doesn't perform any kind of validity checks. It simply checks if the - /// `compute()` method has been called since the last `clear()`. It does not check that the - /// CFG is consistent with the function. - pub fn is_valid(&self) -> bool { - self.valid - } -} - -/// An iterator over block predecessors. The iterator type is `BlockPredecessor`. -/// -/// Each predecessor is an instruction that branches to the block. -pub struct PredIter<'a>(bforest::MapIter<'a, Inst, Block>); - -impl<'a> Iterator for PredIter<'a> { - type Item = BlockPredecessor; - - fn next(&mut self) -> Option { - self.0.next().map(|(i, e)| BlockPredecessor::new(e, i)) - } -} - -/// An iterator over block successors. The iterator type is `Block`. -pub type SuccIter<'a> = bforest::SetIter<'a, Block>; - -#[cfg(test)] -mod tests { - use super::*; - use crate::cursor::{Cursor, FuncCursor}; - use crate::ir::{types, Function, InstBuilder}; - use alloc::vec::Vec; - - #[test] - fn empty() { - let func = Function::new(); - ControlFlowGraph::with_function(&func); - } - - #[test] - fn no_predecessors() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let block1 = func.dfg.make_block(); - let block2 = func.dfg.make_block(); - func.layout.append_block(block0); - func.layout.append_block(block1); - func.layout.append_block(block2); - - let cfg = ControlFlowGraph::with_function(&func); - - let mut fun_blocks = func.layout.blocks(); - for block in func.layout.blocks() { - assert_eq!(block, fun_blocks.next().unwrap()); - assert_eq!(cfg.pred_iter(block).count(), 0); - assert_eq!(cfg.succ_iter(block).count(), 0); - } - } - - #[test] - fn branches_and_jumps() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let cond = func.dfg.append_block_param(block0, types::I32); - let block1 = func.dfg.make_block(); - let block2 = func.dfg.make_block(); - - let br_block0_block2; - let br_block1_block1; - let jmp_block0_block1; - let jmp_block1_block2; - - { - let mut cur = FuncCursor::new(&mut func); - - cur.insert_block(block0); - br_block0_block2 = cur.ins().brnz(cond, block2, &[]); - jmp_block0_block1 = cur.ins().jump(block1, &[]); - - cur.insert_block(block1); - br_block1_block1 = cur.ins().brnz(cond, block1, &[]); - jmp_block1_block2 = cur.ins().jump(block2, &[]); - - cur.insert_block(block2); - } - - let mut cfg = ControlFlowGraph::with_function(&func); - - { - let block0_predecessors = cfg.pred_iter(block0).collect::>(); - let block1_predecessors = cfg.pred_iter(block1).collect::>(); - let block2_predecessors = cfg.pred_iter(block2).collect::>(); - - let block0_successors = cfg.succ_iter(block0).collect::>(); - let block1_successors = cfg.succ_iter(block1).collect::>(); - let block2_successors = cfg.succ_iter(block2).collect::>(); - - assert_eq!(block0_predecessors.len(), 0); - assert_eq!(block1_predecessors.len(), 2); - assert_eq!(block2_predecessors.len(), 2); - - assert_eq!( - block1_predecessors.contains(&BlockPredecessor::new(block0, jmp_block0_block1)), - true - ); - assert_eq!( - block1_predecessors.contains(&BlockPredecessor::new(block1, br_block1_block1)), - true - ); - assert_eq!( - block2_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block2)), - true - ); - assert_eq!( - block2_predecessors.contains(&BlockPredecessor::new(block1, jmp_block1_block2)), - true - ); - - assert_eq!(block0_successors, [block1, block2]); - assert_eq!(block1_successors, [block1, block2]); - assert_eq!(block2_successors, []); - } - - // Change some instructions and recompute block0 - func.dfg.replace(br_block0_block2).brnz(cond, block1, &[]); - func.dfg.replace(jmp_block0_block1).return_(&[]); - cfg.recompute_block(&mut func, block0); - let br_block0_block1 = br_block0_block2; - - { - let block0_predecessors = cfg.pred_iter(block0).collect::>(); - let block1_predecessors = cfg.pred_iter(block1).collect::>(); - let block2_predecessors = cfg.pred_iter(block2).collect::>(); - - let block0_successors = cfg.succ_iter(block0); - let block1_successors = cfg.succ_iter(block1); - let block2_successors = cfg.succ_iter(block2); - - assert_eq!(block0_predecessors.len(), 0); - assert_eq!(block1_predecessors.len(), 2); - assert_eq!(block2_predecessors.len(), 1); - - assert_eq!( - block1_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block1)), - true - ); - assert_eq!( - block1_predecessors.contains(&BlockPredecessor::new(block1, br_block1_block1)), - true - ); - assert_eq!( - block2_predecessors.contains(&BlockPredecessor::new(block0, br_block0_block2)), - false - ); - assert_eq!( - block2_predecessors.contains(&BlockPredecessor::new(block1, jmp_block1_block2)), - true - ); - - assert_eq!(block0_successors.collect::>(), [block1]); - assert_eq!(block1_successors.collect::>(), [block1, block2]); - assert_eq!(block2_successors.collect::>(), []); - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/fx.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/fx.rs deleted file mode 100644 index 36eb62df9..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/fx.rs +++ /dev/null @@ -1,111 +0,0 @@ -// This file is taken from the Rust compiler: src/librustc_data_structures/fx.rs - -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::{HashMap, HashSet}; -use core::default::Default; -use core::hash::{BuildHasherDefault, Hash, Hasher}; -use core::ops::BitXor; - -pub type FxHashMap = HashMap>; -pub type FxHashSet = HashSet>; - -#[allow(non_snake_case)] -pub fn FxHashMap() -> FxHashMap { - HashMap::default() -} - -#[allow(non_snake_case)] -pub fn FxHashSet() -> FxHashSet { - HashSet::default() -} - -/// A speedy hash algorithm for use within rustc. The hashmap in liballoc -/// by default uses SipHash which isn't quite as speedy as we want. In the -/// compiler we're not really worried about DOS attempts, so we use a fast -/// non-cryptographic hash. -/// -/// This is the same as the algorithm used by Firefox -- which is a homespun -/// one not based on any widely-known algorithm -- though modified to produce -/// 64-bit hash values instead of 32-bit hash values. It consistently -/// out-performs an FNV-based hash within rustc itself -- the collision rate is -/// similar or slightly worse than FNV, but the speed of the hash function -/// itself is much higher because it works on up to 8 bytes at a time. -pub struct FxHasher { - hash: usize, -} - -#[cfg(target_pointer_width = "32")] -const K: usize = 0x9e3779b9; -#[cfg(target_pointer_width = "64")] -const K: usize = 0x517cc1b727220a95; - -impl Default for FxHasher { - #[inline] - fn default() -> Self { - Self { hash: 0 } - } -} - -impl FxHasher { - #[inline] - fn add_to_hash(&mut self, i: usize) { - self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); - } -} - -impl Hasher for FxHasher { - #[inline] - fn write(&mut self, bytes: &[u8]) { - for byte in bytes { - let i = *byte; - self.add_to_hash(i as usize); - } - } - - #[inline] - fn write_u8(&mut self, i: u8) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u16(&mut self, i: u16) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u32(&mut self, i: u32) { - self.add_to_hash(i as usize); - } - - #[cfg(target_pointer_width = "32")] - #[inline] - fn write_u64(&mut self, i: u64) { - self.add_to_hash(i as usize); - self.add_to_hash((i >> 32) as usize); - } - - #[cfg(target_pointer_width = "64")] - #[inline] - fn write_u64(&mut self, i: u64) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_usize(&mut self, i: usize) { - self.add_to_hash(i); - } - - #[inline] - fn finish(&self) -> u64 { - self.hash as u64 - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/inst_predicates.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/inst_predicates.rs deleted file mode 100644 index 518487af2..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/inst_predicates.rs +++ /dev/null @@ -1,80 +0,0 @@ -//! Instruction predicates/properties, shared by various analyses. - -use crate::ir::{DataFlowGraph, Function, Inst, InstructionData, Opcode}; -use crate::machinst::ty_bits; -use cranelift_entity::EntityRef; - -/// Preserve instructions with used result values. -pub fn any_inst_results_used(inst: Inst, live: &[bool], dfg: &DataFlowGraph) -> bool { - dfg.inst_results(inst).iter().any(|v| live[v.index()]) -} - -/// Test whether the given opcode is unsafe to even consider as side-effect-free. -fn trivially_has_side_effects(opcode: Opcode) -> bool { - opcode.is_call() - || opcode.is_branch() - || opcode.is_terminator() - || opcode.is_return() - || opcode.can_trap() - || opcode.other_side_effects() - || opcode.can_store() -} - -/// Load instructions without the `notrap` flag are defined to trap when -/// operating on inaccessible memory, so we can't treat them as side-effect-free even if the loaded -/// value is unused. -fn is_load_with_defined_trapping(opcode: Opcode, data: &InstructionData) -> bool { - if !opcode.can_load() { - return false; - } - match *data { - InstructionData::StackLoad { .. } => false, - InstructionData::Load { flags, .. } => !flags.notrap(), - _ => true, - } -} - -/// Does the given instruction have any side-effect that would preclude it from being removed when -/// its value is unused? -pub fn has_side_effect(func: &Function, inst: Inst) -> bool { - let data = &func.dfg[inst]; - let opcode = data.opcode(); - trivially_has_side_effects(opcode) || is_load_with_defined_trapping(opcode, data) -} - -/// Does the given instruction have any side-effect as per [has_side_effect], or else is a load, -/// but not the get_pinned_reg opcode? -pub fn has_lowering_side_effect(func: &Function, inst: Inst) -> bool { - let op = func.dfg[inst].opcode(); - op != Opcode::GetPinnedReg && (has_side_effect(func, inst) || op.can_load()) -} - -/// Is the given instruction a constant value (`iconst`, `fconst`, `bconst`) that can be -/// represented in 64 bits? -pub fn is_constant_64bit(func: &Function, inst: Inst) -> Option { - let data = &func.dfg[inst]; - if data.opcode() == Opcode::Null { - return Some(0); - } - match data { - &InstructionData::UnaryImm { imm, .. } => Some(imm.bits() as u64), - &InstructionData::UnaryIeee32 { imm, .. } => Some(imm.bits() as u64), - &InstructionData::UnaryIeee64 { imm, .. } => Some(imm.bits()), - &InstructionData::UnaryBool { imm, .. } => { - let imm = if imm { - let bits = ty_bits(func.dfg.value_type(func.dfg.inst_results(inst)[0])); - - if bits < 64 { - (1u64 << bits) - 1 - } else { - u64::MAX - } - } else { - 0 - }; - - Some(imm) - } - _ => None, - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/atomic_rmw_op.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/atomic_rmw_op.rs deleted file mode 100644 index e9873e55d..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/atomic_rmw_op.rs +++ /dev/null @@ -1,72 +0,0 @@ -/// Describes the arithmetic operation in an atomic memory read-modify-write operation. -use core::fmt::{self, Display, Formatter}; -use core::str::FromStr; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -/// Describes the arithmetic operation in an atomic memory read-modify-write operation. -pub enum AtomicRmwOp { - /// Add - Add, - /// Sub - Sub, - /// And - And, - /// Nand - Nand, - /// Or - Or, - /// Xor - Xor, - /// Exchange - Xchg, - /// Unsigned min - Umin, - /// Unsigned max - Umax, - /// Signed min - Smin, - /// Signed max - Smax, -} - -impl Display for AtomicRmwOp { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let s = match self { - AtomicRmwOp::Add => "add", - AtomicRmwOp::Sub => "sub", - AtomicRmwOp::And => "and", - AtomicRmwOp::Nand => "nand", - AtomicRmwOp::Or => "or", - AtomicRmwOp::Xor => "xor", - AtomicRmwOp::Xchg => "xchg", - AtomicRmwOp::Umin => "umin", - AtomicRmwOp::Umax => "umax", - AtomicRmwOp::Smin => "smin", - AtomicRmwOp::Smax => "smax", - }; - f.write_str(s) - } -} - -impl FromStr for AtomicRmwOp { - type Err = (); - fn from_str(s: &str) -> Result { - match s { - "add" => Ok(AtomicRmwOp::Add), - "sub" => Ok(AtomicRmwOp::Sub), - "and" => Ok(AtomicRmwOp::And), - "nand" => Ok(AtomicRmwOp::Nand), - "or" => Ok(AtomicRmwOp::Or), - "xor" => Ok(AtomicRmwOp::Xor), - "xchg" => Ok(AtomicRmwOp::Xchg), - "umin" => Ok(AtomicRmwOp::Umin), - "umax" => Ok(AtomicRmwOp::Umax), - "smin" => Ok(AtomicRmwOp::Smin), - "smax" => Ok(AtomicRmwOp::Smax), - _ => Err(()), - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/builder.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/builder.rs deleted file mode 100644 index 3191f9dae..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/builder.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Cranelift instruction builder. -//! -//! A `Builder` provides a convenient interface for inserting instructions into a Cranelift -//! function. Many of its methods are generated from the meta language instruction definitions. - -use crate::ir; -use crate::ir::types; -use crate::ir::{DataFlowGraph, InstructionData}; -use crate::ir::{Inst, Opcode, Type, Value}; - -/// Base trait for instruction builders. -/// -/// The `InstBuilderBase` trait provides the basic functionality required by the methods of the -/// generated `InstBuilder` trait. These methods should not normally be used directly. Use the -/// methods in the `InstBuilder` trait instead. -/// -/// Any data type that implements `InstBuilderBase` also gets all the methods of the `InstBuilder` -/// trait. -pub trait InstBuilderBase<'f>: Sized { - /// Get an immutable reference to the data flow graph that will hold the constructed - /// instructions. - fn data_flow_graph(&self) -> &DataFlowGraph; - /// Get a mutable reference to the data flow graph that will hold the constructed - /// instructions. - fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph; - - /// Insert an instruction and return a reference to it, consuming the builder. - /// - /// The result types may depend on a controlling type variable. For non-polymorphic - /// instructions with multiple results, pass `INVALID` for the `ctrl_typevar` argument. - fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph); -} - -// Include trait code generated by `cranelift-codegen/meta/src/gen_inst.rs`. -// -// This file defines the `InstBuilder` trait as an extension of `InstBuilderBase` with methods per -// instruction format and per opcode. -include!(concat!(env!("OUT_DIR"), "/inst_builder.rs")); - -/// Any type implementing `InstBuilderBase` gets all the `InstBuilder` methods for free. -impl<'f, T: InstBuilderBase<'f>> InstBuilder<'f> for T {} - -/// Base trait for instruction inserters. -/// -/// This is an alternative base trait for an instruction builder to implement. -/// -/// An instruction inserter can be adapted into an instruction builder by wrapping it in an -/// `InsertBuilder`. This provides some common functionality for instruction builders that insert -/// new instructions, as opposed to the `ReplaceBuilder` which overwrites existing instructions. -pub trait InstInserterBase<'f>: Sized { - /// Get an immutable reference to the data flow graph. - fn data_flow_graph(&self) -> &DataFlowGraph; - - /// Get a mutable reference to the data flow graph. - fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph; - - /// Insert a new instruction which belongs to the DFG. - fn insert_built_inst(self, inst: Inst) -> &'f mut DataFlowGraph; -} - -use core::marker::PhantomData; - -/// Builder that inserts an instruction at the current position. -/// -/// An `InsertBuilder` is a wrapper for an `InstInserterBase` that turns it into an instruction -/// builder with some additional facilities for creating instructions that reuse existing values as -/// their results. -pub struct InsertBuilder<'f, IIB: InstInserterBase<'f>> { - inserter: IIB, - unused: PhantomData<&'f u32>, -} - -impl<'f, IIB: InstInserterBase<'f>> InsertBuilder<'f, IIB> { - /// Create a new builder which inserts instructions at `pos`. - /// The `dfg` and `pos.layout` references should be from the same `Function`. - pub fn new(inserter: IIB) -> Self { - Self { - inserter, - unused: PhantomData, - } - } - - /// Reuse result values in `reuse`. - /// - /// Convert this builder into one that will reuse the provided result values instead of - /// allocating new ones. The provided values for reuse must not be attached to anything. Any - /// missing result values will be allocated as normal. - /// - /// The `reuse` argument is expected to be an array of `Option`. - pub fn with_results(self, reuse: Array) -> InsertReuseBuilder<'f, IIB, Array> - where - Array: AsRef<[Option]>, - { - InsertReuseBuilder { - inserter: self.inserter, - reuse, - unused: PhantomData, - } - } - - /// Reuse a single result value. - /// - /// Convert this into a builder that will reuse `v` as the single result value. The reused - /// result value `v` must not be attached to anything. - /// - /// This method should only be used when building an instruction with exactly one result. Use - /// `with_results()` for the more general case. - pub fn with_result(self, v: Value) -> InsertReuseBuilder<'f, IIB, [Option; 1]> { - // TODO: Specialize this to return a different builder that just attaches `v` instead of - // calling `make_inst_results_reusing()`. - self.with_results([Some(v)]) - } -} - -impl<'f, IIB: InstInserterBase<'f>> InstBuilderBase<'f> for InsertBuilder<'f, IIB> { - fn data_flow_graph(&self) -> &DataFlowGraph { - self.inserter.data_flow_graph() - } - - fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph { - self.inserter.data_flow_graph_mut() - } - - fn build(mut self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) { - let inst; - { - let dfg = self.inserter.data_flow_graph_mut(); - inst = dfg.make_inst(data); - dfg.make_inst_results(inst, ctrl_typevar); - } - (inst, self.inserter.insert_built_inst(inst)) - } -} - -/// Builder that inserts a new instruction like `InsertBuilder`, but reusing result values. -pub struct InsertReuseBuilder<'f, IIB, Array> -where - IIB: InstInserterBase<'f>, - Array: AsRef<[Option]>, -{ - inserter: IIB, - reuse: Array, - unused: PhantomData<&'f u32>, -} - -impl<'f, IIB, Array> InstBuilderBase<'f> for InsertReuseBuilder<'f, IIB, Array> -where - IIB: InstInserterBase<'f>, - Array: AsRef<[Option]>, -{ - fn data_flow_graph(&self) -> &DataFlowGraph { - self.inserter.data_flow_graph() - } - - fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph { - self.inserter.data_flow_graph_mut() - } - - fn build(mut self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) { - let inst; - { - let dfg = self.inserter.data_flow_graph_mut(); - inst = dfg.make_inst(data); - // Make an `Iterator>`. - let ru = self.reuse.as_ref().iter().cloned(); - dfg.make_inst_results_reusing(inst, ctrl_typevar, ru); - } - (inst, self.inserter.insert_built_inst(inst)) - } -} - -/// Instruction builder that replaces an existing instruction. -/// -/// The inserted instruction will have the same `Inst` number as the old one. -/// -/// If the old instruction still has result values attached, it is assumed that the new instruction -/// produces the same number and types of results. The old result values are preserved. If the -/// replacement instruction format does not support multiple results, the builder panics. It is a -/// bug to leave result values dangling. -pub struct ReplaceBuilder<'f> { - dfg: &'f mut DataFlowGraph, - inst: Inst, -} - -impl<'f> ReplaceBuilder<'f> { - /// Create a `ReplaceBuilder` that will overwrite `inst`. - pub fn new(dfg: &'f mut DataFlowGraph, inst: Inst) -> Self { - Self { dfg, inst } - } -} - -impl<'f> InstBuilderBase<'f> for ReplaceBuilder<'f> { - fn data_flow_graph(&self) -> &DataFlowGraph { - self.dfg - } - - fn data_flow_graph_mut(&mut self) -> &mut DataFlowGraph { - self.dfg - } - - fn build(self, data: InstructionData, ctrl_typevar: Type) -> (Inst, &'f mut DataFlowGraph) { - // Splat the new instruction on top of the old one. - self.dfg[self.inst] = data; - - if !self.dfg.has_results(self.inst) { - // The old result values were either detached or non-existent. - // Construct new ones. - self.dfg.make_inst_results(self.inst, ctrl_typevar); - } - - (self.inst, self.dfg) - } -} - -#[cfg(test)] -mod tests { - use crate::cursor::{Cursor, FuncCursor}; - use crate::ir::condcodes::*; - use crate::ir::types::*; - use crate::ir::{Function, InstBuilder, ValueDef}; - - #[test] - fn types() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let arg0 = func.dfg.append_block_param(block0, I32); - let mut pos = FuncCursor::new(&mut func); - pos.insert_block(block0); - - // Explicit types. - let v0 = pos.ins().iconst(I32, 3); - assert_eq!(pos.func.dfg.value_type(v0), I32); - - // Inferred from inputs. - let v1 = pos.ins().iadd(arg0, v0); - assert_eq!(pos.func.dfg.value_type(v1), I32); - - // Formula. - let cmp = pos.ins().icmp(IntCC::Equal, arg0, v0); - assert_eq!(pos.func.dfg.value_type(cmp), B1); - } - - #[test] - fn reuse_results() { - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let arg0 = func.dfg.append_block_param(block0, I32); - let mut pos = FuncCursor::new(&mut func); - pos.insert_block(block0); - - let v0 = pos.ins().iadd_imm(arg0, 17); - assert_eq!(pos.func.dfg.value_type(v0), I32); - let iadd = pos.prev_inst().unwrap(); - assert_eq!(pos.func.dfg.value_def(v0), ValueDef::Result(iadd, 0)); - - // Detach v0 and reuse it for a different instruction. - pos.func.dfg.clear_results(iadd); - let v0b = pos.ins().with_result(v0).iconst(I32, 3); - assert_eq!(v0, v0b); - assert_eq!(pos.current_inst(), Some(iadd)); - let iconst = pos.prev_inst().unwrap(); - assert!(iadd != iconst); - assert_eq!(pos.func.dfg.value_def(v0), ValueDef::Result(iconst, 0)); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/condcodes.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/condcodes.rs deleted file mode 100644 index 00e9717ca..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/condcodes.rs +++ /dev/null @@ -1,410 +0,0 @@ -//! Condition codes for the Cranelift code generator. -//! -//! A condition code here is an enumerated type that determined how to compare two numbers. There -//! are different rules for comparing integers and floating point numbers, so they use different -//! condition codes. - -use core::fmt::{self, Display, Formatter}; -use core::str::FromStr; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Common traits of condition codes. -pub trait CondCode: Copy { - /// Get the inverse condition code of `self`. - /// - /// The inverse condition code produces the opposite result for all comparisons. - /// That is, `cmp CC, x, y` is true if and only if `cmp CC.inverse(), x, y` is false. - #[must_use] - fn inverse(self) -> Self; - - /// Get the reversed condition code for `self`. - /// - /// The reversed condition code produces the same result as swapping `x` and `y` in the - /// comparison. That is, `cmp CC, x, y` is the same as `cmp CC.reverse(), y, x`. - #[must_use] - fn reverse(self) -> Self; -} - -/// Condition code for comparing integers. -/// -/// This condition code is used by the `icmp` instruction to compare integer values. There are -/// separate codes for comparing the integers as signed or unsigned numbers where it makes a -/// difference. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum IntCC { - /// `==`. - Equal, - /// `!=`. - NotEqual, - /// Signed `<`. - SignedLessThan, - /// Signed `>=`. - SignedGreaterThanOrEqual, - /// Signed `>`. - SignedGreaterThan, - /// Signed `<=`. - SignedLessThanOrEqual, - /// Unsigned `<`. - UnsignedLessThan, - /// Unsigned `>=`. - UnsignedGreaterThanOrEqual, - /// Unsigned `>`. - UnsignedGreaterThan, - /// Unsigned `<=`. - UnsignedLessThanOrEqual, - /// Signed Overflow. - Overflow, - /// Signed No Overflow. - NotOverflow, -} - -impl CondCode for IntCC { - fn inverse(self) -> Self { - use self::IntCC::*; - match self { - Equal => NotEqual, - NotEqual => Equal, - SignedLessThan => SignedGreaterThanOrEqual, - SignedGreaterThanOrEqual => SignedLessThan, - SignedGreaterThan => SignedLessThanOrEqual, - SignedLessThanOrEqual => SignedGreaterThan, - UnsignedLessThan => UnsignedGreaterThanOrEqual, - UnsignedGreaterThanOrEqual => UnsignedLessThan, - UnsignedGreaterThan => UnsignedLessThanOrEqual, - UnsignedLessThanOrEqual => UnsignedGreaterThan, - Overflow => NotOverflow, - NotOverflow => Overflow, - } - } - - fn reverse(self) -> Self { - use self::IntCC::*; - match self { - Equal => Equal, - NotEqual => NotEqual, - SignedGreaterThan => SignedLessThan, - SignedGreaterThanOrEqual => SignedLessThanOrEqual, - SignedLessThan => SignedGreaterThan, - SignedLessThanOrEqual => SignedGreaterThanOrEqual, - UnsignedGreaterThan => UnsignedLessThan, - UnsignedGreaterThanOrEqual => UnsignedLessThanOrEqual, - UnsignedLessThan => UnsignedGreaterThan, - UnsignedLessThanOrEqual => UnsignedGreaterThanOrEqual, - Overflow => Overflow, - NotOverflow => NotOverflow, - } - } -} - -impl IntCC { - /// Get the corresponding IntCC with the equal component removed. - /// For conditions without a zero component, this is a no-op. - pub fn without_equal(self) -> Self { - use self::IntCC::*; - match self { - SignedGreaterThan | SignedGreaterThanOrEqual => SignedGreaterThan, - SignedLessThan | SignedLessThanOrEqual => SignedLessThan, - UnsignedGreaterThan | UnsignedGreaterThanOrEqual => UnsignedGreaterThan, - UnsignedLessThan | UnsignedLessThanOrEqual => UnsignedLessThan, - _ => self, - } - } - - /// Get the corresponding IntCC with the signed component removed. - /// For conditions without a signed component, this is a no-op. - pub fn unsigned(self) -> Self { - use self::IntCC::*; - match self { - SignedGreaterThan | UnsignedGreaterThan => UnsignedGreaterThan, - SignedGreaterThanOrEqual | UnsignedGreaterThanOrEqual => UnsignedGreaterThanOrEqual, - SignedLessThan | UnsignedLessThan => UnsignedLessThan, - SignedLessThanOrEqual | UnsignedLessThanOrEqual => UnsignedLessThanOrEqual, - _ => self, - } - } - - /// Get the corresponding string condition code for the IntCC object. - pub fn to_static_str(self) -> &'static str { - use self::IntCC::*; - match self { - Equal => "eq", - NotEqual => "ne", - SignedGreaterThan => "sgt", - SignedGreaterThanOrEqual => "sge", - SignedLessThan => "slt", - SignedLessThanOrEqual => "sle", - UnsignedGreaterThan => "ugt", - UnsignedGreaterThanOrEqual => "uge", - UnsignedLessThan => "ult", - UnsignedLessThanOrEqual => "ule", - Overflow => "of", - NotOverflow => "nof", - } - } -} - -impl Display for IntCC { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - f.write_str(self.to_static_str()) - } -} - -impl FromStr for IntCC { - type Err = (); - - fn from_str(s: &str) -> Result { - use self::IntCC::*; - match s { - "eq" => Ok(Equal), - "ne" => Ok(NotEqual), - "sge" => Ok(SignedGreaterThanOrEqual), - "sgt" => Ok(SignedGreaterThan), - "sle" => Ok(SignedLessThanOrEqual), - "slt" => Ok(SignedLessThan), - "uge" => Ok(UnsignedGreaterThanOrEqual), - "ugt" => Ok(UnsignedGreaterThan), - "ule" => Ok(UnsignedLessThanOrEqual), - "ult" => Ok(UnsignedLessThan), - "of" => Ok(Overflow), - "nof" => Ok(NotOverflow), - _ => Err(()), - } - } -} - -/// Condition code for comparing floating point numbers. -/// -/// This condition code is used by the `fcmp` instruction to compare floating point values. Two -/// IEEE floating point values relate in exactly one of four ways: -/// -/// 1. `UN` - unordered when either value is NaN. -/// 2. `EQ` - equal numerical value. -/// 3. `LT` - `x` is less than `y`. -/// 4. `GT` - `x` is greater than `y`. -/// -/// Note that `0.0` and `-0.0` relate as `EQ` because they both represent the number 0. -/// -/// The condition codes described here are used to produce a single boolean value from the -/// comparison. The 14 condition codes here cover every possible combination of the relation above -/// except the impossible `!UN & !EQ & !LT & !GT` and the always true `UN | EQ | LT | GT`. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum FloatCC { - /// EQ | LT | GT - Ordered, - /// UN - Unordered, - - /// EQ - Equal, - /// The C '!=' operator is the inverse of '==': `NotEqual`. - /// UN | LT | GT - NotEqual, - /// LT | GT - OrderedNotEqual, - /// UN | EQ - UnorderedOrEqual, - - /// LT - LessThan, - /// LT | EQ - LessThanOrEqual, - /// GT - GreaterThan, - /// GT | EQ - GreaterThanOrEqual, - - /// UN | LT - UnorderedOrLessThan, - /// UN | LT | EQ - UnorderedOrLessThanOrEqual, - /// UN | GT - UnorderedOrGreaterThan, - /// UN | GT | EQ - UnorderedOrGreaterThanOrEqual, -} - -impl CondCode for FloatCC { - fn inverse(self) -> Self { - use self::FloatCC::*; - match self { - Ordered => Unordered, - Unordered => Ordered, - Equal => NotEqual, - NotEqual => Equal, - OrderedNotEqual => UnorderedOrEqual, - UnorderedOrEqual => OrderedNotEqual, - LessThan => UnorderedOrGreaterThanOrEqual, - LessThanOrEqual => UnorderedOrGreaterThan, - GreaterThan => UnorderedOrLessThanOrEqual, - GreaterThanOrEqual => UnorderedOrLessThan, - UnorderedOrLessThan => GreaterThanOrEqual, - UnorderedOrLessThanOrEqual => GreaterThan, - UnorderedOrGreaterThan => LessThanOrEqual, - UnorderedOrGreaterThanOrEqual => LessThan, - } - } - fn reverse(self) -> Self { - use self::FloatCC::*; - match self { - Ordered => Ordered, - Unordered => Unordered, - Equal => Equal, - NotEqual => NotEqual, - OrderedNotEqual => OrderedNotEqual, - UnorderedOrEqual => UnorderedOrEqual, - LessThan => GreaterThan, - LessThanOrEqual => GreaterThanOrEqual, - GreaterThan => LessThan, - GreaterThanOrEqual => LessThanOrEqual, - UnorderedOrLessThan => UnorderedOrGreaterThan, - UnorderedOrLessThanOrEqual => UnorderedOrGreaterThanOrEqual, - UnorderedOrGreaterThan => UnorderedOrLessThan, - UnorderedOrGreaterThanOrEqual => UnorderedOrLessThanOrEqual, - } - } -} - -impl Display for FloatCC { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - use self::FloatCC::*; - f.write_str(match *self { - Ordered => "ord", - Unordered => "uno", - Equal => "eq", - NotEqual => "ne", - OrderedNotEqual => "one", - UnorderedOrEqual => "ueq", - LessThan => "lt", - LessThanOrEqual => "le", - GreaterThan => "gt", - GreaterThanOrEqual => "ge", - UnorderedOrLessThan => "ult", - UnorderedOrLessThanOrEqual => "ule", - UnorderedOrGreaterThan => "ugt", - UnorderedOrGreaterThanOrEqual => "uge", - }) - } -} - -impl FromStr for FloatCC { - type Err = (); - - fn from_str(s: &str) -> Result { - use self::FloatCC::*; - match s { - "ord" => Ok(Ordered), - "uno" => Ok(Unordered), - "eq" => Ok(Equal), - "ne" => Ok(NotEqual), - "one" => Ok(OrderedNotEqual), - "ueq" => Ok(UnorderedOrEqual), - "lt" => Ok(LessThan), - "le" => Ok(LessThanOrEqual), - "gt" => Ok(GreaterThan), - "ge" => Ok(GreaterThanOrEqual), - "ult" => Ok(UnorderedOrLessThan), - "ule" => Ok(UnorderedOrLessThanOrEqual), - "ugt" => Ok(UnorderedOrGreaterThan), - "uge" => Ok(UnorderedOrGreaterThanOrEqual), - _ => Err(()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::string::ToString; - - static INT_ALL: [IntCC; 12] = [ - IntCC::Equal, - IntCC::NotEqual, - IntCC::SignedLessThan, - IntCC::SignedGreaterThanOrEqual, - IntCC::SignedGreaterThan, - IntCC::SignedLessThanOrEqual, - IntCC::UnsignedLessThan, - IntCC::UnsignedGreaterThanOrEqual, - IntCC::UnsignedGreaterThan, - IntCC::UnsignedLessThanOrEqual, - IntCC::Overflow, - IntCC::NotOverflow, - ]; - - #[test] - fn int_inverse() { - for r in &INT_ALL { - let cc = *r; - let inv = cc.inverse(); - assert!(cc != inv); - assert_eq!(inv.inverse(), cc); - } - } - - #[test] - fn int_reverse() { - for r in &INT_ALL { - let cc = *r; - let rev = cc.reverse(); - assert_eq!(rev.reverse(), cc); - } - } - - #[test] - fn int_display() { - for r in &INT_ALL { - let cc = *r; - assert_eq!(cc.to_string().parse(), Ok(cc)); - } - assert_eq!("bogus".parse::(), Err(())); - } - - static FLOAT_ALL: [FloatCC; 14] = [ - FloatCC::Ordered, - FloatCC::Unordered, - FloatCC::Equal, - FloatCC::NotEqual, - FloatCC::OrderedNotEqual, - FloatCC::UnorderedOrEqual, - FloatCC::LessThan, - FloatCC::LessThanOrEqual, - FloatCC::GreaterThan, - FloatCC::GreaterThanOrEqual, - FloatCC::UnorderedOrLessThan, - FloatCC::UnorderedOrLessThanOrEqual, - FloatCC::UnorderedOrGreaterThan, - FloatCC::UnorderedOrGreaterThanOrEqual, - ]; - - #[test] - fn float_inverse() { - for r in &FLOAT_ALL { - let cc = *r; - let inv = cc.inverse(); - assert!(cc != inv); - assert_eq!(inv.inverse(), cc); - } - } - - #[test] - fn float_reverse() { - for r in &FLOAT_ALL { - let cc = *r; - let rev = cc.reverse(); - assert_eq!(rev.reverse(), cc); - } - } - - #[test] - fn float_display() { - for r in &FLOAT_ALL { - let cc = *r; - assert_eq!(cc.to_string().parse(), Ok(cc)); - } - assert_eq!("bogus".parse::(), Err(())); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/constant.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/constant.rs deleted file mode 100644 index 3cd88d554..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/constant.rs +++ /dev/null @@ -1,460 +0,0 @@ -//! Constants -//! -//! The constant pool defined here allows Cranelift to avoid emitting the same constant multiple -//! times. As constants are inserted in the pool, a handle is returned; the handle is a Cranelift -//! Entity. Inserting the same data multiple times will always return the same handle. -//! -//! Future work could include: -//! - ensuring alignment of constants within the pool, -//! - bucketing constants by size. - -use crate::ir::immediates::{IntoBytes, V128Imm}; -use crate::ir::Constant; -use crate::HashMap; -use alloc::collections::BTreeMap; -use alloc::vec::Vec; -use core::fmt; -use core::iter::FromIterator; -use core::slice::Iter; -use core::str::{from_utf8, FromStr}; -use cranelift_entity::EntityRef; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// This type describes the actual constant data. Note that the bytes stored in this structure are -/// expected to be in little-endian order; this is due to ease-of-use when interacting with -/// WebAssembly values, which are [little-endian by design]. -/// -/// [little-endian by design]: https://github.com/WebAssembly/design/blob/master/Portability.md -#[derive(Clone, Hash, Eq, PartialEq, Debug, Default)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct ConstantData(Vec); - -impl FromIterator for ConstantData { - fn from_iter>(iter: T) -> Self { - let v = iter.into_iter().collect(); - Self(v) - } -} - -impl From> for ConstantData { - fn from(v: Vec) -> Self { - Self(v) - } -} - -impl From<&[u8]> for ConstantData { - fn from(v: &[u8]) -> Self { - Self(v.to_vec()) - } -} - -impl From for ConstantData { - fn from(v: V128Imm) -> Self { - Self(v.to_vec()) - } -} - -impl ConstantData { - /// Return the number of bytes in the constant. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Check if the constant contains any bytes. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Return the data as a slice. - pub fn as_slice(&self) -> &[u8] { - self.0.as_slice() - } - - /// Convert the data to a vector. - pub fn into_vec(self) -> Vec { - self.0 - } - - /// Iterate over the constant's bytes. - pub fn iter(&self) -> Iter { - self.0.iter() - } - - /// Add new bytes to the constant data. - pub fn append(mut self, bytes: impl IntoBytes) -> Self { - let mut to_add = bytes.into_bytes(); - self.0.append(&mut to_add); - self - } - - /// Expand the size of the constant data to `expected_size` number of bytes by adding zeroes - /// in the high-order byte slots. - pub fn expand_to(mut self, expected_size: usize) -> Self { - if self.len() > expected_size { - panic!( - "The constant data is already expanded beyond {} bytes", - expected_size - ) - } - self.0.resize(expected_size, 0); - self - } -} - -impl fmt::Display for ConstantData { - /// Print the constant data in hexadecimal format, e.g. 0x000102030405060708090a0b0c0d0e0f. - /// This function will flip the stored order of bytes--little-endian--to the more readable - /// big-endian ordering. - /// - /// ``` - /// use cranelift_codegen::ir::ConstantData; - /// let data = ConstantData::from([3, 2, 1, 0, 0].as_ref()); // note the little-endian order - /// assert_eq!(data.to_string(), "0x0000010203"); - /// ``` - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if !self.is_empty() { - write!(f, "0x")?; - for b in self.0.iter().rev() { - write!(f, "{:02x}", b)?; - } - } - Ok(()) - } -} - -impl FromStr for ConstantData { - type Err = &'static str; - - /// Parse a hexadecimal string to `ConstantData`. This is the inverse of `Display::fmt`. - /// - /// ``` - /// use cranelift_codegen::ir::ConstantData; - /// let c: ConstantData = "0x000102".parse().unwrap(); - /// assert_eq!(c.into_vec(), [2, 1, 0]); - /// ``` - fn from_str(s: &str) -> Result { - if s.len() <= 2 || &s[0..2] != "0x" { - return Err("Expected a hexadecimal string, e.g. 0x1234"); - } - - // clean and check the string - let cleaned: Vec = s[2..] - .as_bytes() - .iter() - .filter(|&&b| b as char != '_') - .cloned() - .collect(); // remove 0x prefix and any intervening _ characters - - if cleaned.is_empty() { - Err("Hexadecimal string must have some digits") - } else if cleaned.len() % 2 != 0 { - Err("Hexadecimal string must have an even number of digits") - } else if cleaned.len() > 32 { - Err("Hexadecimal string has too many digits to fit in a 128-bit vector") - } else { - let mut buffer = Vec::with_capacity((s.len() - 2) / 2); - for i in (0..cleaned.len()).step_by(2) { - let pair = from_utf8(&cleaned[i..i + 2]) - .or_else(|_| Err("Unable to parse hexadecimal pair as UTF-8"))?; - let byte = u8::from_str_radix(pair, 16) - .or_else(|_| Err("Unable to parse as hexadecimal"))?; - buffer.insert(0, byte); - } - Ok(Self(buffer)) - } - } -} - -/// Maintains the mapping between a constant handle (i.e. [`Constant`](crate::ir::Constant)) and -/// its constant data (i.e. [`ConstantData`](crate::ir::ConstantData)). -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct ConstantPool { - /// This mapping maintains the insertion order as long as Constants are created with - /// sequentially increasing integers. - handles_to_values: BTreeMap, - - /// This mapping is unordered (no need for lexicographic ordering) but allows us to map - /// constant data back to handles. - values_to_handles: HashMap, -} - -impl ConstantPool { - /// Create a new constant pool instance. - pub fn new() -> Self { - Self { - handles_to_values: BTreeMap::new(), - values_to_handles: HashMap::new(), - } - } - - /// Empty the constant pool of all data. - pub fn clear(&mut self) { - self.handles_to_values.clear(); - self.values_to_handles.clear(); - } - - /// Insert constant data into the pool, returning a handle for later referencing; when constant - /// data is inserted that is a duplicate of previous constant data, the existing handle will be - /// returned. - pub fn insert(&mut self, constant_value: ConstantData) -> Constant { - if self.values_to_handles.contains_key(&constant_value) { - *self.values_to_handles.get(&constant_value).unwrap() - } else { - let constant_handle = Constant::new(self.len()); - self.set(constant_handle, constant_value); - constant_handle - } - } - - /// Retrieve the constant data given a handle. - pub fn get(&self, constant_handle: Constant) -> &ConstantData { - assert!(self.handles_to_values.contains_key(&constant_handle)); - self.handles_to_values.get(&constant_handle).unwrap() - } - - /// Link a constant handle to its value. This does not de-duplicate data but does avoid - /// replacing any existing constant values. use `set` to tie a specific `const42` to its value; - /// use `insert` to add a value and return the next available `const` entity. - pub fn set(&mut self, constant_handle: Constant, constant_value: ConstantData) { - let replaced = self - .handles_to_values - .insert(constant_handle, constant_value.clone()); - assert!( - replaced.is_none(), - "attempted to overwrite an existing constant {:?}: {:?} => {:?}", - constant_handle, - &constant_value, - replaced.unwrap() - ); - self.values_to_handles - .insert(constant_value, constant_handle); - } - - /// Iterate over the constants in insertion order. - pub fn iter(&self) -> impl Iterator { - self.handles_to_values.iter() - } - - /// Iterate over mutable entries in the constant pool in insertion order. - pub fn entries_mut(&mut self) -> impl Iterator { - self.handles_to_values.values_mut() - } - - /// Return the number of constants in the pool. - pub fn len(&self) -> usize { - self.handles_to_values.len() - } - - /// Return the combined size of all of the constant values in the pool. - pub fn byte_size(&self) -> usize { - self.values_to_handles.keys().map(|c| c.len()).sum() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::string::ToString; - - #[test] - fn empty() { - let sut = ConstantPool::new(); - assert_eq!(sut.len(), 0); - } - - #[test] - fn insert() { - let mut sut = ConstantPool::new(); - sut.insert(vec![1, 2, 3].into()); - sut.insert(vec![4, 5, 6].into()); - assert_eq!(sut.len(), 2); - } - - #[test] - fn insert_duplicate() { - let mut sut = ConstantPool::new(); - let a = sut.insert(vec![1, 2, 3].into()); - sut.insert(vec![4, 5, 6].into()); - let b = sut.insert(vec![1, 2, 3].into()); - assert_eq!(a, b); - } - - #[test] - fn clear() { - let mut sut = ConstantPool::new(); - sut.insert(vec![1, 2, 3].into()); - assert_eq!(sut.len(), 1); - - sut.clear(); - assert_eq!(sut.len(), 0); - } - - #[test] - fn iteration_order() { - let mut sut = ConstantPool::new(); - sut.insert(vec![1, 2, 3].into()); - sut.insert(vec![4, 5, 6].into()); - sut.insert(vec![1, 2, 3].into()); - let data = sut.iter().map(|(_, v)| v).collect::>(); - assert_eq!(data, vec![&vec![1, 2, 3].into(), &vec![4, 5, 6].into()]); - } - - #[test] - fn get() { - let mut sut = ConstantPool::new(); - let data = vec![1, 2, 3]; - let handle = sut.insert(data.clone().into()); - assert_eq!(sut.get(handle), &data.into()); - } - - #[test] - fn set() { - let mut sut = ConstantPool::new(); - let handle = Constant::with_number(42).unwrap(); - let data = vec![1, 2, 3]; - sut.set(handle, data.clone().into()); - assert_eq!(sut.get(handle), &data.into()); - } - - #[test] - #[should_panic] - fn disallow_overwriting_constant() { - let mut sut = ConstantPool::new(); - let handle = Constant::with_number(42).unwrap(); - sut.set(handle, vec![].into()); - sut.set(handle, vec![1].into()); - } - - #[test] - #[should_panic] - fn get_nonexistent_constant() { - let sut = ConstantPool::new(); - let a = Constant::with_number(42).unwrap(); - sut.get(a); // panics, only use constants returned by ConstantPool - } - - #[test] - fn display_constant_data() { - assert_eq!(ConstantData::from([0].as_ref()).to_string(), "0x00"); - assert_eq!(ConstantData::from([42].as_ref()).to_string(), "0x2a"); - assert_eq!( - ConstantData::from([3, 2, 1, 0].as_ref()).to_string(), - "0x00010203" - ); - assert_eq!( - ConstantData::from(3735928559u32.to_le_bytes().as_ref()).to_string(), - "0xdeadbeef" - ); - assert_eq!( - ConstantData::from(0x0102030405060708u64.to_le_bytes().as_ref()).to_string(), - "0x0102030405060708" - ); - } - - #[test] - fn iterate_over_constant_data() { - let c = ConstantData::from([1, 2, 3].as_ref()); - let mut iter = c.iter(); - assert_eq!(iter.next(), Some(&1)); - assert_eq!(iter.next(), Some(&2)); - assert_eq!(iter.next(), Some(&3)); - assert_eq!(iter.next(), None); - } - - #[test] - fn add_to_constant_data() { - let d = ConstantData::from([1, 2].as_ref()); - let e = d.append(i16::from(3u8)); - assert_eq!(e.into_vec(), vec![1, 2, 3, 0]) - } - - #[test] - fn extend_constant_data() { - let d = ConstantData::from([1, 2].as_ref()); - assert_eq!(d.expand_to(4).into_vec(), vec![1, 2, 0, 0]) - } - - #[test] - #[should_panic] - fn extend_constant_data_to_invalid_length() { - ConstantData::from([1, 2].as_ref()).expand_to(1); - } - - #[test] - fn parse_constant_data_and_restringify() { - // Verify that parsing of `from` succeeds and stringifies to `to`. - fn parse_ok(from: &str, to: &str) { - let parsed = from.parse::().unwrap(); - assert_eq!(parsed.to_string(), to); - } - - // Verify that parsing of `from` fails with `error_msg`. - fn parse_err(from: &str, error_msg: &str) { - let parsed = from.parse::(); - assert!( - parsed.is_err(), - "Expected a parse error but parsing succeeded: {}", - from - ); - assert_eq!(parsed.err().unwrap(), error_msg); - } - - parse_ok("0x00", "0x00"); - parse_ok("0x00000042", "0x00000042"); - parse_ok( - "0x0102030405060708090a0b0c0d0e0f00", - "0x0102030405060708090a0b0c0d0e0f00", - ); - parse_ok("0x_0000_0043_21", "0x0000004321"); - - parse_err("", "Expected a hexadecimal string, e.g. 0x1234"); - parse_err("0x", "Expected a hexadecimal string, e.g. 0x1234"); - parse_err( - "0x042", - "Hexadecimal string must have an even number of digits", - ); - parse_err( - "0x00000000000000000000000000000000000000000000000000", - "Hexadecimal string has too many digits to fit in a 128-bit vector", - ); - parse_err("0xrstu", "Unable to parse as hexadecimal"); - parse_err("0x__", "Hexadecimal string must have some digits"); - } - - #[test] - fn verify_stored_bytes_in_constant_data() { - assert_eq!("0x01".parse::().unwrap().into_vec(), [1]); - assert_eq!(ConstantData::from([1, 0].as_ref()).0, [1, 0]); - assert_eq!(ConstantData::from(vec![1, 0, 0, 0]).0, [1, 0, 0, 0]); - } - - #[test] - fn check_constant_data_endianness_as_uimm128() { - fn parse_to_uimm128(from: &str) -> Vec { - from.parse::() - .unwrap() - .expand_to(16) - .into_vec() - } - - assert_eq!( - parse_to_uimm128("0x42"), - [0x42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - assert_eq!( - parse_to_uimm128("0x00"), - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - assert_eq!( - parse_to_uimm128("0x12345678"), - [0x78, 0x56, 0x34, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - assert_eq!( - parse_to_uimm128("0x1234_5678"), - [0x78, 0x56, 0x34, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/dfg.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/dfg.rs deleted file mode 100644 index 7c424e2d7..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/dfg.rs +++ /dev/null @@ -1,1324 +0,0 @@ -//! Data flow graph tracking Instructions, Values, and blocks. - -use crate::entity::{self, PrimaryMap, SecondaryMap}; -use crate::ir; -use crate::ir::builder::ReplaceBuilder; -use crate::ir::extfunc::ExtFuncData; -use crate::ir::instructions::{BranchInfo, CallInfo, InstructionData}; -use crate::ir::{types, ConstantData, ConstantPool, Immediate}; -use crate::ir::{ - Block, FuncRef, Inst, SigRef, Signature, SourceLoc, Type, Value, ValueLabelAssignments, - ValueList, ValueListPool, -}; -use crate::packed_option::ReservedValue; -use crate::write::write_operands; -use crate::HashMap; -use alloc::vec::Vec; -use core::fmt; -use core::iter; -use core::mem; -use core::ops::{Index, IndexMut}; -use core::u16; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// A data flow graph defines all instructions and basic blocks in a function as well as -/// the data flow dependencies between them. The DFG also tracks values which can be either -/// instruction results or block parameters. -/// -/// The layout of blocks in the function and of instructions in each block is recorded by the -/// `Layout` data structure which forms the other half of the function representation. -/// -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct DataFlowGraph { - /// Data about all of the instructions in the function, including opcodes and operands. - /// The instructions in this map are not in program order. That is tracked by `Layout`, along - /// with the block containing each instruction. - insts: PrimaryMap, - - /// List of result values for each instruction. - /// - /// This map gets resized automatically by `make_inst()` so it is always in sync with the - /// primary `insts` map. - results: SecondaryMap, - - /// basic blocks in the function and their parameters. - /// - /// This map is not in program order. That is handled by `Layout`, and so is the sequence of - /// instructions contained in each block. - blocks: PrimaryMap, - - /// Memory pool of value lists. - /// - /// The `ValueList` references into this pool appear in many places: - /// - /// - Instructions in `insts` that don't have room for their entire argument list inline. - /// - Instruction result values in `results`. - /// - block parameters in `blocks`. - pub value_lists: ValueListPool, - - /// Primary value table with entries for all values. - values: PrimaryMap, - - /// Function signature table. These signatures are referenced by indirect call instructions as - /// well as the external function references. - pub signatures: PrimaryMap, - - /// The pre-legalization signature for each entry in `signatures`, if any. - pub old_signatures: SecondaryMap>, - - /// External function references. These are functions that can be called directly. - pub ext_funcs: PrimaryMap, - - /// Saves Value labels. - pub values_labels: Option>, - - /// Constants used within the function - pub constants: ConstantPool, - - /// Stores large immediates that otherwise will not fit on InstructionData - pub immediates: PrimaryMap, -} - -impl DataFlowGraph { - /// Create a new empty `DataFlowGraph`. - pub fn new() -> Self { - Self { - insts: PrimaryMap::new(), - results: SecondaryMap::new(), - blocks: PrimaryMap::new(), - value_lists: ValueListPool::new(), - values: PrimaryMap::new(), - signatures: PrimaryMap::new(), - old_signatures: SecondaryMap::new(), - ext_funcs: PrimaryMap::new(), - values_labels: None, - constants: ConstantPool::new(), - immediates: PrimaryMap::new(), - } - } - - /// Clear everything. - pub fn clear(&mut self) { - self.insts.clear(); - self.results.clear(); - self.blocks.clear(); - self.value_lists.clear(); - self.values.clear(); - self.signatures.clear(); - self.old_signatures.clear(); - self.ext_funcs.clear(); - self.values_labels = None; - self.constants.clear(); - self.immediates.clear(); - } - - /// Get the total number of instructions created in this function, whether they are currently - /// inserted in the layout or not. - /// - /// This is intended for use with `SecondaryMap::with_capacity`. - pub fn num_insts(&self) -> usize { - self.insts.len() - } - - /// Returns `true` if the given instruction reference is valid. - pub fn inst_is_valid(&self, inst: Inst) -> bool { - self.insts.is_valid(inst) - } - - /// Get the total number of basic blocks created in this function, whether they are - /// currently inserted in the layout or not. - /// - /// This is intended for use with `SecondaryMap::with_capacity`. - pub fn num_blocks(&self) -> usize { - self.blocks.len() - } - - /// Returns `true` if the given block reference is valid. - pub fn block_is_valid(&self, block: Block) -> bool { - self.blocks.is_valid(block) - } - - /// Get the total number of values. - pub fn num_values(&self) -> usize { - self.values.len() - } - - /// Starts collection of debug information. - pub fn collect_debug_info(&mut self) { - if self.values_labels.is_none() { - self.values_labels = Some(HashMap::new()); - } - } - - /// Inserts a `ValueLabelAssignments::Alias` for `to_alias` if debug info - /// collection is enabled. - pub fn add_value_label_alias(&mut self, to_alias: Value, from: SourceLoc, value: Value) { - if let Some(values_labels) = self.values_labels.as_mut() { - values_labels.insert(to_alias, ir::ValueLabelAssignments::Alias { from, value }); - } - } -} - -/// Resolve value aliases. -/// -/// Find the original SSA value that `value` aliases, or None if an -/// alias cycle is detected. -fn maybe_resolve_aliases(values: &PrimaryMap, value: Value) -> Option { - let mut v = value; - - // Note that values may be empty here. - for _ in 0..=values.len() { - if let ValueData::Alias { original, .. } = values[v] { - v = original; - } else { - return Some(v); - } - } - - None -} - -/// Resolve value aliases. -/// -/// Find the original SSA value that `value` aliases. -fn resolve_aliases(values: &PrimaryMap, value: Value) -> Value { - if let Some(v) = maybe_resolve_aliases(values, value) { - v - } else { - panic!("Value alias loop detected for {}", value); - } -} - -/// Iterator over all Values in a DFG -pub struct Values<'a> { - inner: entity::Iter<'a, Value, ValueData>, -} - -/// Check for non-values -fn valid_valuedata(data: &ValueData) -> bool { - if let ValueData::Alias { - ty: types::INVALID, - original, - } = *data - { - if original == Value::reserved_value() { - return false; - } - } - true -} - -impl<'a> Iterator for Values<'a> { - type Item = Value; - - fn next(&mut self) -> Option { - self.inner - .by_ref() - .find(|kv| valid_valuedata(kv.1)) - .map(|kv| kv.0) - } -} - -/// Handling values. -/// -/// Values are either block parameters or instruction results. -impl DataFlowGraph { - /// Allocate an extended value entry. - fn make_value(&mut self, data: ValueData) -> Value { - self.values.push(data) - } - - /// Get an iterator over all values. - pub fn values<'a>(&'a self) -> Values { - Values { - inner: self.values.iter(), - } - } - - /// Check if a value reference is valid. - pub fn value_is_valid(&self, v: Value) -> bool { - self.values.is_valid(v) - } - - /// Get the type of a value. - pub fn value_type(&self, v: Value) -> Type { - self.values[v].ty() - } - - /// Get the definition of a value. - /// - /// This is either the instruction that defined it or the Block that has the value as an - /// parameter. - pub fn value_def(&self, v: Value) -> ValueDef { - match self.values[v] { - ValueData::Inst { inst, num, .. } => ValueDef::Result(inst, num as usize), - ValueData::Param { block, num, .. } => ValueDef::Param(block, num as usize), - ValueData::Alias { original, .. } => { - // Make sure we only recurse one level. `resolve_aliases` has safeguards to - // detect alias loops without overrunning the stack. - self.value_def(self.resolve_aliases(original)) - } - } - } - - /// Determine if `v` is an attached instruction result / block parameter. - /// - /// An attached value can't be attached to something else without first being detached. - /// - /// Value aliases are not considered to be attached to anything. Use `resolve_aliases()` to - /// determine if the original aliased value is attached. - pub fn value_is_attached(&self, v: Value) -> bool { - use self::ValueData::*; - match self.values[v] { - Inst { inst, num, .. } => Some(&v) == self.inst_results(inst).get(num as usize), - Param { block, num, .. } => Some(&v) == self.block_params(block).get(num as usize), - Alias { .. } => false, - } - } - - /// Resolve value aliases. - /// - /// Find the original SSA value that `value` aliases. - pub fn resolve_aliases(&self, value: Value) -> Value { - resolve_aliases(&self.values, value) - } - - /// Resolve all aliases among inst's arguments. - /// - /// For each argument of inst which is defined by an alias, replace the - /// alias with the aliased value. - pub fn resolve_aliases_in_arguments(&mut self, inst: Inst) { - for arg in self.insts[inst].arguments_mut(&mut self.value_lists) { - let resolved = resolve_aliases(&self.values, *arg); - if resolved != *arg { - *arg = resolved; - } - } - } - - /// Turn a value into an alias of another. - /// - /// Change the `dest` value to behave as an alias of `src`. This means that all uses of `dest` - /// will behave as if they used that value `src`. - /// - /// The `dest` value can't be attached to an instruction or block. - pub fn change_to_alias(&mut self, dest: Value, src: Value) { - debug_assert!(!self.value_is_attached(dest)); - // Try to create short alias chains by finding the original source value. - // This also avoids the creation of loops. - let original = self.resolve_aliases(src); - debug_assert_ne!( - dest, original, - "Aliasing {} to {} would create a loop", - dest, src - ); - let ty = self.value_type(original); - debug_assert_eq!( - self.value_type(dest), - ty, - "Aliasing {} to {} would change its type {} to {}", - dest, - src, - self.value_type(dest), - ty - ); - debug_assert_ne!(ty, types::INVALID); - - self.values[dest] = ValueData::Alias { ty, original }; - } - - /// Replace the results of one instruction with aliases to the results of another. - /// - /// Change all the results of `dest_inst` to behave as aliases of - /// corresponding results of `src_inst`, as if calling change_to_alias for - /// each. - /// - /// After calling this instruction, `dest_inst` will have had its results - /// cleared, so it likely needs to be removed from the graph. - /// - pub fn replace_with_aliases(&mut self, dest_inst: Inst, src_inst: Inst) { - debug_assert_ne!( - dest_inst, src_inst, - "Replacing {} with itself would create a loop", - dest_inst - ); - debug_assert_eq!( - self.results[dest_inst].len(&self.value_lists), - self.results[src_inst].len(&self.value_lists), - "Replacing {} with {} would produce a different number of results.", - dest_inst, - src_inst - ); - - for (&dest, &src) in self.results[dest_inst] - .as_slice(&self.value_lists) - .iter() - .zip(self.results[src_inst].as_slice(&self.value_lists)) - { - let original = src; - let ty = self.value_type(original); - debug_assert_eq!( - self.value_type(dest), - ty, - "Aliasing {} to {} would change its type {} to {}", - dest, - src, - self.value_type(dest), - ty - ); - debug_assert_ne!(ty, types::INVALID); - - self.values[dest] = ValueData::Alias { ty, original }; - } - - self.clear_results(dest_inst); - } -} - -/// Where did a value come from? -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum ValueDef { - /// Value is the n'th result of an instruction. - Result(Inst, usize), - /// Value is the n'th parameter to a block. - Param(Block, usize), -} - -impl ValueDef { - /// Unwrap the instruction where the value was defined, or panic. - pub fn unwrap_inst(&self) -> Inst { - self.inst().expect("Value is not an instruction result") - } - - /// Get the instruction where the value was defined, if any. - pub fn inst(&self) -> Option { - match *self { - Self::Result(inst, _) => Some(inst), - _ => None, - } - } - - /// Unwrap the block there the parameter is defined, or panic. - pub fn unwrap_block(&self) -> Block { - match *self { - Self::Param(block, _) => block, - _ => panic!("Value is not a block parameter"), - } - } - - /// Get the program point where the value was defined. - pub fn pp(self) -> ir::ExpandedProgramPoint { - self.into() - } - - /// Get the number component of this definition. - /// - /// When multiple values are defined at the same program point, this indicates the index of - /// this value. - pub fn num(self) -> usize { - match self { - Self::Result(_, n) | Self::Param(_, n) => n, - } - } -} - -/// Internal table storage for extended values. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -enum ValueData { - /// Value is defined by an instruction. - Inst { ty: Type, num: u16, inst: Inst }, - - /// Value is a block parameter. - Param { ty: Type, num: u16, block: Block }, - - /// Value is an alias of another value. - /// An alias value can't be linked as an instruction result or block parameter. It is used as a - /// placeholder when the original instruction or block has been rewritten or modified. - Alias { ty: Type, original: Value }, -} - -impl ValueData { - fn ty(&self) -> Type { - match *self { - ValueData::Inst { ty, .. } - | ValueData::Param { ty, .. } - | ValueData::Alias { ty, .. } => ty, - } - } -} - -/// Instructions. -/// -impl DataFlowGraph { - /// Create a new instruction. - /// - /// The type of the first result is indicated by `data.ty`. If the instruction produces - /// multiple results, also call `make_inst_results` to allocate value table entries. - pub fn make_inst(&mut self, data: InstructionData) -> Inst { - let n = self.num_insts() + 1; - self.results.resize(n); - self.insts.push(data) - } - - /// Returns an object that displays `inst`. - pub fn display_inst<'a>(&'a self, inst: Inst) -> DisplayInst<'a> { - DisplayInst(self, inst) - } - - /// Get all value arguments on `inst` as a slice. - pub fn inst_args(&self, inst: Inst) -> &[Value] { - self.insts[inst].arguments(&self.value_lists) - } - - /// Get all value arguments on `inst` as a mutable slice. - pub fn inst_args_mut(&mut self, inst: Inst) -> &mut [Value] { - self.insts[inst].arguments_mut(&mut self.value_lists) - } - - /// Get the fixed value arguments on `inst` as a slice. - pub fn inst_fixed_args(&self, inst: Inst) -> &[Value] { - let num_fixed_args = self[inst] - .opcode() - .constraints() - .num_fixed_value_arguments(); - &self.inst_args(inst)[..num_fixed_args] - } - - /// Get the fixed value arguments on `inst` as a mutable slice. - pub fn inst_fixed_args_mut(&mut self, inst: Inst) -> &mut [Value] { - let num_fixed_args = self[inst] - .opcode() - .constraints() - .num_fixed_value_arguments(); - &mut self.inst_args_mut(inst)[..num_fixed_args] - } - - /// Get the variable value arguments on `inst` as a slice. - pub fn inst_variable_args(&self, inst: Inst) -> &[Value] { - let num_fixed_args = self[inst] - .opcode() - .constraints() - .num_fixed_value_arguments(); - &self.inst_args(inst)[num_fixed_args..] - } - - /// Get the variable value arguments on `inst` as a mutable slice. - pub fn inst_variable_args_mut(&mut self, inst: Inst) -> &mut [Value] { - let num_fixed_args = self[inst] - .opcode() - .constraints() - .num_fixed_value_arguments(); - &mut self.inst_args_mut(inst)[num_fixed_args..] - } - - /// Create result values for an instruction that produces multiple results. - /// - /// Instructions that produce no result values only need to be created with `make_inst`, - /// otherwise call `make_inst_results` to allocate value table entries for the results. - /// - /// The result value types are determined from the instruction's value type constraints and the - /// provided `ctrl_typevar` type for polymorphic instructions. For non-polymorphic - /// instructions, `ctrl_typevar` is ignored, and `INVALID` can be used. - /// - /// The type of the first result value is also set, even if it was already set in the - /// `InstructionData` passed to `make_inst`. If this function is called with a single-result - /// instruction, that is the only effect. - pub fn make_inst_results(&mut self, inst: Inst, ctrl_typevar: Type) -> usize { - self.make_inst_results_reusing(inst, ctrl_typevar, iter::empty()) - } - - /// Create result values for `inst`, reusing the provided detached values. - /// - /// Create a new set of result values for `inst` using `ctrl_typevar` to determine the result - /// types. Any values provided by `reuse` will be reused. When `reuse` is exhausted or when it - /// produces `None`, a new value is created. - pub fn make_inst_results_reusing( - &mut self, - inst: Inst, - ctrl_typevar: Type, - reuse: I, - ) -> usize - where - I: Iterator>, - { - let mut reuse = reuse.fuse(); - - self.results[inst].clear(&mut self.value_lists); - - // Get the call signature if this is a function call. - if let Some(sig) = self.call_signature(inst) { - // Create result values corresponding to the call return types. - debug_assert_eq!( - self.insts[inst].opcode().constraints().num_fixed_results(), - 0 - ); - let num_results = self.signatures[sig].returns.len(); - for res_idx in 0..num_results { - let ty = self.signatures[sig].returns[res_idx].value_type; - if let Some(Some(v)) = reuse.next() { - debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty); - self.attach_result(inst, v); - } else { - self.append_result(inst, ty); - } - } - num_results - } else { - // Create result values corresponding to the opcode's constraints. - let constraints = self.insts[inst].opcode().constraints(); - let num_results = constraints.num_fixed_results(); - for res_idx in 0..num_results { - let ty = constraints.result_type(res_idx, ctrl_typevar); - if let Some(Some(v)) = reuse.next() { - debug_assert_eq!(self.value_type(v), ty, "Reused {} is wrong type", ty); - self.attach_result(inst, v); - } else { - self.append_result(inst, ty); - } - } - num_results - } - } - - /// Create a `ReplaceBuilder` that will replace `inst` with a new instruction in place. - pub fn replace(&mut self, inst: Inst) -> ReplaceBuilder { - ReplaceBuilder::new(self, inst) - } - - /// Detach the list of result values from `inst` and return it. - /// - /// This leaves `inst` without any result values. New result values can be created by calling - /// `make_inst_results` or by using a `replace(inst)` builder. - pub fn detach_results(&mut self, inst: Inst) -> ValueList { - self.results[inst].take() - } - - /// Clear the list of result values from `inst`. - /// - /// This leaves `inst` without any result values. New result values can be created by calling - /// `make_inst_results` or by using a `replace(inst)` builder. - pub fn clear_results(&mut self, inst: Inst) { - self.results[inst].clear(&mut self.value_lists) - } - - /// Attach an existing value to the result value list for `inst`. - /// - /// The `res` value is appended to the end of the result list. - /// - /// This is a very low-level operation. Usually, instruction results with the correct types are - /// created automatically. The `res` value must not be attached to anything else. - pub fn attach_result(&mut self, inst: Inst, res: Value) { - debug_assert!(!self.value_is_attached(res)); - let num = self.results[inst].push(res, &mut self.value_lists); - debug_assert!(num <= u16::MAX as usize, "Too many result values"); - let ty = self.value_type(res); - self.values[res] = ValueData::Inst { - ty, - num: num as u16, - inst, - }; - } - - /// Replace an instruction result with a new value of type `new_type`. - /// - /// The `old_value` must be an attached instruction result. - /// - /// The old value is left detached, so it should probably be changed into something else. - /// - /// Returns the new value. - pub fn replace_result(&mut self, old_value: Value, new_type: Type) -> Value { - let (num, inst) = match self.values[old_value] { - ValueData::Inst { num, inst, .. } => (num, inst), - _ => panic!("{} is not an instruction result value", old_value), - }; - let new_value = self.make_value(ValueData::Inst { - ty: new_type, - num, - inst, - }); - let num = num as usize; - let attached = mem::replace( - self.results[inst] - .get_mut(num, &mut self.value_lists) - .expect("Replacing detached result"), - new_value, - ); - debug_assert_eq!( - attached, - old_value, - "{} wasn't detached from {}", - old_value, - self.display_inst(inst) - ); - new_value - } - - /// Append a new instruction result value to `inst`. - pub fn append_result(&mut self, inst: Inst, ty: Type) -> Value { - let res = self.values.next_key(); - let num = self.results[inst].push(res, &mut self.value_lists); - debug_assert!(num <= u16::MAX as usize, "Too many result values"); - self.make_value(ValueData::Inst { - ty, - inst, - num: num as u16, - }) - } - - /// Append a new value argument to an instruction. - /// - /// Panics if the instruction doesn't support arguments. - pub fn append_inst_arg(&mut self, inst: Inst, new_arg: Value) { - let mut branch_values = self.insts[inst] - .take_value_list() - .expect("the instruction doesn't have value arguments"); - branch_values.push(new_arg, &mut self.value_lists); - self.insts[inst].put_value_list(branch_values) - } - - /// Get the first result of an instruction. - /// - /// This function panics if the instruction doesn't have any result. - pub fn first_result(&self, inst: Inst) -> Value { - self.results[inst] - .first(&self.value_lists) - .expect("Instruction has no results") - } - - /// Test if `inst` has any result values currently. - pub fn has_results(&self, inst: Inst) -> bool { - !self.results[inst].is_empty() - } - - /// Return all the results of an instruction. - pub fn inst_results(&self, inst: Inst) -> &[Value] { - self.results[inst].as_slice(&self.value_lists) - } - - /// Return all the results of an instruction as ValueList. - pub fn inst_results_list(&self, inst: Inst) -> ValueList { - self.results[inst] - } - - /// Get the call signature of a direct or indirect call instruction. - /// Returns `None` if `inst` is not a call instruction. - pub fn call_signature(&self, inst: Inst) -> Option { - match self.insts[inst].analyze_call(&self.value_lists) { - CallInfo::NotACall => None, - CallInfo::Direct(f, _) => Some(self.ext_funcs[f].signature), - CallInfo::Indirect(s, _) => Some(s), - } - } - - /// Check if `inst` is a branch. - pub fn analyze_branch(&self, inst: Inst) -> BranchInfo { - self.insts[inst].analyze_branch(&self.value_lists) - } - - /// Compute the type of an instruction result from opcode constraints and call signatures. - /// - /// This computes the same sequence of result types that `make_inst_results()` above would - /// assign to the created result values, but it does not depend on `make_inst_results()` being - /// called first. - /// - /// Returns `None` if asked about a result index that is too large. - pub fn compute_result_type( - &self, - inst: Inst, - result_idx: usize, - ctrl_typevar: Type, - ) -> Option { - let constraints = self.insts[inst].opcode().constraints(); - let num_fixed_results = constraints.num_fixed_results(); - - if result_idx < num_fixed_results { - return Some(constraints.result_type(result_idx, ctrl_typevar)); - } - - // Not a fixed result, try to extract a return type from the call signature. - self.call_signature(inst).and_then(|sigref| { - self.signatures[sigref] - .returns - .get(result_idx - num_fixed_results) - .map(|&arg| arg.value_type) - }) - } - - /// Get the controlling type variable, or `INVALID` if `inst` isn't polymorphic. - pub fn ctrl_typevar(&self, inst: Inst) -> Type { - let constraints = self[inst].opcode().constraints(); - - if !constraints.is_polymorphic() { - types::INVALID - } else if constraints.requires_typevar_operand() { - // Not all instruction formats have a designated operand, but in that case - // `requires_typevar_operand()` should never be true. - self.value_type( - self[inst] - .typevar_operand(&self.value_lists) - .expect("Instruction format doesn't have a designated operand, bad opcode."), - ) - } else { - self.value_type(self.first_result(inst)) - } - } -} - -/// Allow immutable access to instructions via indexing. -impl Index for DataFlowGraph { - type Output = InstructionData; - - fn index(&self, inst: Inst) -> &InstructionData { - &self.insts[inst] - } -} - -/// Allow mutable access to instructions via indexing. -impl IndexMut for DataFlowGraph { - fn index_mut(&mut self, inst: Inst) -> &mut InstructionData { - &mut self.insts[inst] - } -} - -/// basic blocks. -impl DataFlowGraph { - /// Create a new basic block. - pub fn make_block(&mut self) -> Block { - self.blocks.push(BlockData::new()) - } - - /// Get the number of parameters on `block`. - pub fn num_block_params(&self, block: Block) -> usize { - self.blocks[block].params.len(&self.value_lists) - } - - /// Get the parameters on `block`. - pub fn block_params(&self, block: Block) -> &[Value] { - self.blocks[block].params.as_slice(&self.value_lists) - } - - /// Get the types of the parameters on `block`. - pub fn block_param_types(&self, block: Block) -> Vec { - self.block_params(block) - .iter() - .map(|&v| self.value_type(v)) - .collect() - } - - /// Append a parameter with type `ty` to `block`. - pub fn append_block_param(&mut self, block: Block, ty: Type) -> Value { - let param = self.values.next_key(); - let num = self.blocks[block].params.push(param, &mut self.value_lists); - debug_assert!(num <= u16::MAX as usize, "Too many parameters on block"); - self.make_value(ValueData::Param { - ty, - num: num as u16, - block, - }) - } - - /// Removes `val` from `block`'s parameters by swapping it with the last parameter on `block`. - /// Returns the position of `val` before removal. - /// - /// *Important*: to ensure O(1) deletion, this method swaps the removed parameter with the - /// last `block` parameter. This can disrupt all the branch instructions jumping to this - /// `block` for which you have to change the branch argument order if necessary. - /// - /// Panics if `val` is not a block parameter. - pub fn swap_remove_block_param(&mut self, val: Value) -> usize { - let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] { - (block, num) - } else { - panic!("{} must be a block parameter", val); - }; - self.blocks[block] - .params - .swap_remove(num as usize, &mut self.value_lists); - if let Some(last_arg_val) = self.blocks[block] - .params - .get(num as usize, &self.value_lists) - { - // We update the position of the old last arg. - if let ValueData::Param { - num: ref mut old_num, - .. - } = self.values[last_arg_val] - { - *old_num = num; - } else { - panic!("{} should be a Block parameter", last_arg_val); - } - } - num as usize - } - - /// Removes `val` from `block`'s parameters by a standard linear time list removal which - /// preserves ordering. Also updates the values' data. - pub fn remove_block_param(&mut self, val: Value) { - let (block, num) = if let ValueData::Param { num, block, .. } = self.values[val] { - (block, num) - } else { - panic!("{} must be a block parameter", val); - }; - self.blocks[block] - .params - .remove(num as usize, &mut self.value_lists); - for index in num..(self.num_block_params(block) as u16) { - match self.values[self.blocks[block] - .params - .get(index as usize, &self.value_lists) - .unwrap()] - { - ValueData::Param { ref mut num, .. } => { - *num -= 1; - } - _ => panic!( - "{} must be a block parameter", - self.blocks[block] - .params - .get(index as usize, &self.value_lists) - .unwrap() - ), - } - } - } - - /// Append an existing value to `block`'s parameters. - /// - /// The appended value can't already be attached to something else. - /// - /// In almost all cases, you should be using `append_block_param()` instead of this method. - pub fn attach_block_param(&mut self, block: Block, param: Value) { - debug_assert!(!self.value_is_attached(param)); - let num = self.blocks[block].params.push(param, &mut self.value_lists); - debug_assert!(num <= u16::MAX as usize, "Too many parameters on block"); - let ty = self.value_type(param); - self.values[param] = ValueData::Param { - ty, - num: num as u16, - block, - }; - } - - /// Replace a block parameter with a new value of type `ty`. - /// - /// The `old_value` must be an attached block parameter. It is removed from its place in the list - /// of parameters and replaced by a new value of type `new_type`. The new value gets the same - /// position in the list, and other parameters are not disturbed. - /// - /// The old value is left detached, so it should probably be changed into something else. - /// - /// Returns the new value. - pub fn replace_block_param(&mut self, old_value: Value, new_type: Type) -> Value { - // Create new value identical to the old one except for the type. - let (block, num) = if let ValueData::Param { num, block, .. } = self.values[old_value] { - (block, num) - } else { - panic!("{} must be a block parameter", old_value); - }; - let new_arg = self.make_value(ValueData::Param { - ty: new_type, - num, - block, - }); - - self.blocks[block] - .params - .as_mut_slice(&mut self.value_lists)[num as usize] = new_arg; - new_arg - } - - /// Detach all the parameters from `block` and return them as a `ValueList`. - /// - /// This is a quite low-level operation. Sensible things to do with the detached block parameters - /// is to put them back on the same block with `attach_block_param()` or change them into aliases - /// with `change_to_alias()`. - pub fn detach_block_params(&mut self, block: Block) -> ValueList { - self.blocks[block].params.take() - } -} - -/// Contents of a basic block. -/// -/// Parameters on a basic block are values that dominate everything in the block. All -/// branches to this block must provide matching arguments, and the arguments to the entry block must -/// match the function arguments. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -struct BlockData { - /// List of parameters to this block. - params: ValueList, -} - -impl BlockData { - fn new() -> Self { - Self { - params: ValueList::new(), - } - } -} - -/// Object that can display an instruction. -pub struct DisplayInst<'a>(&'a DataFlowGraph, Inst); - -impl<'a> fmt::Display for DisplayInst<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let dfg = self.0; - let inst = self.1; - - if let Some((first, rest)) = dfg.inst_results(inst).split_first() { - write!(f, "{}", first)?; - for v in rest { - write!(f, ", {}", v)?; - } - write!(f, " = ")?; - } - - let typevar = dfg.ctrl_typevar(inst); - if typevar.is_invalid() { - write!(f, "{}", dfg[inst].opcode())?; - } else { - write!(f, "{}.{}", dfg[inst].opcode(), typevar)?; - } - write_operands(f, dfg, inst) - } -} - -/// Parser routines. These routines should not be used outside the parser. -impl DataFlowGraph { - /// Set the type of a value. This is only for use in the parser, which needs - /// to create invalid values for index padding which may be reassigned later. - #[cold] - fn set_value_type_for_parser(&mut self, v: Value, t: Type) { - assert_eq!( - self.value_type(v), - types::INVALID, - "this function is only for assigning types to previously invalid values" - ); - match self.values[v] { - ValueData::Inst { ref mut ty, .. } - | ValueData::Param { ref mut ty, .. } - | ValueData::Alias { ref mut ty, .. } => *ty = t, - } - } - - /// Create result values for `inst`, reusing the provided detached values. - /// This is similar to `make_inst_results_reusing` except it's only for use - /// in the parser, which needs to reuse previously invalid values. - #[cold] - pub fn make_inst_results_for_parser( - &mut self, - inst: Inst, - ctrl_typevar: Type, - reuse: &[Value], - ) -> usize { - // Get the call signature if this is a function call. - if let Some(sig) = self.call_signature(inst) { - assert_eq!( - self.insts[inst].opcode().constraints().num_fixed_results(), - 0 - ); - for res_idx in 0..self.signatures[sig].returns.len() { - let ty = self.signatures[sig].returns[res_idx].value_type; - if let Some(v) = reuse.get(res_idx) { - self.set_value_type_for_parser(*v, ty); - } - } - } else { - let constraints = self.insts[inst].opcode().constraints(); - for res_idx in 0..constraints.num_fixed_results() { - let ty = constraints.result_type(res_idx, ctrl_typevar); - if let Some(v) = reuse.get(res_idx) { - self.set_value_type_for_parser(*v, ty); - } - } - } - - self.make_inst_results_reusing(inst, ctrl_typevar, reuse.iter().map(|x| Some(*x))) - } - - /// Similar to `append_block_param`, append a parameter with type `ty` to - /// `block`, but using value `val`. This is only for use by the parser to - /// create parameters with specific values. - #[cold] - pub fn append_block_param_for_parser(&mut self, block: Block, ty: Type, val: Value) { - let num = self.blocks[block].params.push(val, &mut self.value_lists); - assert!(num <= u16::MAX as usize, "Too many parameters on block"); - self.values[val] = ValueData::Param { - ty, - num: num as u16, - block, - }; - } - - /// Create a new value alias. This is only for use by the parser to create - /// aliases with specific values, and the printer for testing. - #[cold] - pub fn make_value_alias_for_serialization(&mut self, src: Value, dest: Value) { - assert_ne!(src, Value::reserved_value()); - assert_ne!(dest, Value::reserved_value()); - - let ty = if self.values.is_valid(src) { - self.value_type(src) - } else { - // As a special case, if we can't resolve the aliasee yet, use INVALID - // temporarily. It will be resolved later in parsing. - types::INVALID - }; - let data = ValueData::Alias { ty, original: src }; - self.values[dest] = data; - } - - /// If `v` is already defined as an alias, return its destination value. - /// Otherwise return None. This allows the parser to coalesce identical - /// alias definitions, and the printer to identify an alias's immediate target. - #[cold] - pub fn value_alias_dest_for_serialization(&self, v: Value) -> Option { - if let ValueData::Alias { original, .. } = self.values[v] { - Some(original) - } else { - None - } - } - - /// Compute the type of an alias. This is only for use in the parser. - /// Returns false if an alias cycle was encountered. - #[cold] - pub fn set_alias_type_for_parser(&mut self, v: Value) -> bool { - if let Some(resolved) = maybe_resolve_aliases(&self.values, v) { - let old_ty = self.value_type(v); - let new_ty = self.value_type(resolved); - if old_ty == types::INVALID { - self.set_value_type_for_parser(v, new_ty); - } else { - assert_eq!(old_ty, new_ty); - } - true - } else { - false - } - } - - /// Create an invalid value, to pad the index space. This is only for use by - /// the parser to pad out the value index space. - #[cold] - pub fn make_invalid_value_for_parser(&mut self) { - let data = ValueData::Alias { - ty: types::INVALID, - original: Value::reserved_value(), - }; - self.make_value(data); - } - - /// Check if a value reference is valid, while being aware of aliases which - /// may be unresolved while parsing. - #[cold] - pub fn value_is_valid_for_parser(&self, v: Value) -> bool { - if !self.value_is_valid(v) { - return false; - } - if let ValueData::Alias { ty, .. } = self.values[v] { - ty != types::INVALID - } else { - true - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cursor::{Cursor, FuncCursor}; - use crate::ir::types; - use crate::ir::{Function, InstructionData, Opcode, TrapCode}; - use alloc::string::ToString; - - #[test] - fn make_inst() { - let mut dfg = DataFlowGraph::new(); - - let idata = InstructionData::UnaryImm { - opcode: Opcode::Iconst, - imm: 0.into(), - }; - let inst = dfg.make_inst(idata); - - dfg.make_inst_results(inst, types::I32); - assert_eq!(inst.to_string(), "inst0"); - assert_eq!(dfg.display_inst(inst).to_string(), "v0 = iconst.i32 0"); - - // Immutable reference resolution. - { - let immdfg = &dfg; - let ins = &immdfg[inst]; - assert_eq!(ins.opcode(), Opcode::Iconst); - } - - // Results. - let val = dfg.first_result(inst); - assert_eq!(dfg.inst_results(inst), &[val]); - - assert_eq!(dfg.value_def(val), ValueDef::Result(inst, 0)); - assert_eq!(dfg.value_type(val), types::I32); - - // Replacing results. - assert!(dfg.value_is_attached(val)); - let v2 = dfg.replace_result(val, types::F64); - assert!(!dfg.value_is_attached(val)); - assert!(dfg.value_is_attached(v2)); - assert_eq!(dfg.inst_results(inst), &[v2]); - assert_eq!(dfg.value_def(v2), ValueDef::Result(inst, 0)); - assert_eq!(dfg.value_type(v2), types::F64); - } - - #[test] - fn no_results() { - let mut dfg = DataFlowGraph::new(); - - let idata = InstructionData::Trap { - opcode: Opcode::Trap, - code: TrapCode::User(0), - }; - let inst = dfg.make_inst(idata); - assert_eq!(dfg.display_inst(inst).to_string(), "trap user0"); - - // Result slice should be empty. - assert_eq!(dfg.inst_results(inst), &[]); - } - - #[test] - fn block() { - let mut dfg = DataFlowGraph::new(); - - let block = dfg.make_block(); - assert_eq!(block.to_string(), "block0"); - assert_eq!(dfg.num_block_params(block), 0); - assert_eq!(dfg.block_params(block), &[]); - assert!(dfg.detach_block_params(block).is_empty()); - assert_eq!(dfg.num_block_params(block), 0); - assert_eq!(dfg.block_params(block), &[]); - - let arg1 = dfg.append_block_param(block, types::F32); - assert_eq!(arg1.to_string(), "v0"); - assert_eq!(dfg.num_block_params(block), 1); - assert_eq!(dfg.block_params(block), &[arg1]); - - let arg2 = dfg.append_block_param(block, types::I16); - assert_eq!(arg2.to_string(), "v1"); - assert_eq!(dfg.num_block_params(block), 2); - assert_eq!(dfg.block_params(block), &[arg1, arg2]); - - assert_eq!(dfg.value_def(arg1), ValueDef::Param(block, 0)); - assert_eq!(dfg.value_def(arg2), ValueDef::Param(block, 1)); - assert_eq!(dfg.value_type(arg1), types::F32); - assert_eq!(dfg.value_type(arg2), types::I16); - - // Swap the two block parameters. - let vlist = dfg.detach_block_params(block); - assert_eq!(dfg.num_block_params(block), 0); - assert_eq!(dfg.block_params(block), &[]); - assert_eq!(vlist.as_slice(&dfg.value_lists), &[arg1, arg2]); - dfg.attach_block_param(block, arg2); - let arg3 = dfg.append_block_param(block, types::I32); - dfg.attach_block_param(block, arg1); - assert_eq!(dfg.block_params(block), &[arg2, arg3, arg1]); - } - - #[test] - fn replace_block_params() { - let mut dfg = DataFlowGraph::new(); - - let block = dfg.make_block(); - let arg1 = dfg.append_block_param(block, types::F32); - - let new1 = dfg.replace_block_param(arg1, types::I64); - assert_eq!(dfg.value_type(arg1), types::F32); - assert_eq!(dfg.value_type(new1), types::I64); - assert_eq!(dfg.block_params(block), &[new1]); - - dfg.attach_block_param(block, arg1); - assert_eq!(dfg.block_params(block), &[new1, arg1]); - - let new2 = dfg.replace_block_param(arg1, types::I8); - assert_eq!(dfg.value_type(arg1), types::F32); - assert_eq!(dfg.value_type(new2), types::I8); - assert_eq!(dfg.block_params(block), &[new1, new2]); - - dfg.attach_block_param(block, arg1); - assert_eq!(dfg.block_params(block), &[new1, new2, arg1]); - - let new3 = dfg.replace_block_param(new2, types::I16); - assert_eq!(dfg.value_type(new1), types::I64); - assert_eq!(dfg.value_type(new2), types::I8); - assert_eq!(dfg.value_type(new3), types::I16); - assert_eq!(dfg.block_params(block), &[new1, new3, arg1]); - } - - #[test] - fn swap_remove_block_params() { - let mut dfg = DataFlowGraph::new(); - - let block = dfg.make_block(); - let arg1 = dfg.append_block_param(block, types::F32); - let arg2 = dfg.append_block_param(block, types::F32); - let arg3 = dfg.append_block_param(block, types::F32); - assert_eq!(dfg.block_params(block), &[arg1, arg2, arg3]); - - dfg.swap_remove_block_param(arg1); - assert_eq!(dfg.value_is_attached(arg1), false); - assert_eq!(dfg.value_is_attached(arg2), true); - assert_eq!(dfg.value_is_attached(arg3), true); - assert_eq!(dfg.block_params(block), &[arg3, arg2]); - dfg.swap_remove_block_param(arg2); - assert_eq!(dfg.value_is_attached(arg2), false); - assert_eq!(dfg.value_is_attached(arg3), true); - assert_eq!(dfg.block_params(block), &[arg3]); - dfg.swap_remove_block_param(arg3); - assert_eq!(dfg.value_is_attached(arg3), false); - assert_eq!(dfg.block_params(block), &[]); - } - - #[test] - fn aliases() { - use crate::ir::InstBuilder; - - let mut func = Function::new(); - let block0 = func.dfg.make_block(); - let mut pos = FuncCursor::new(&mut func); - pos.insert_block(block0); - - // Build a little test program. - let v1 = pos.ins().iconst(types::I32, 42); - - // Make sure we can resolve value aliases even when values is empty. - assert_eq!(pos.func.dfg.resolve_aliases(v1), v1); - - let arg0 = pos.func.dfg.append_block_param(block0, types::I32); - let (s, c) = pos.ins().iadd_ifcout(v1, arg0); - let iadd = match pos.func.dfg.value_def(s) { - ValueDef::Result(i, 0) => i, - _ => panic!(), - }; - - // Remove `c` from the result list. - pos.func.dfg.clear_results(iadd); - pos.func.dfg.attach_result(iadd, s); - - // Replace `iadd_ifcout` with a normal `iadd` and an `ifcmp`. - pos.func.dfg.replace(iadd).iadd(v1, arg0); - let c2 = pos.ins().ifcmp(s, v1); - pos.func.dfg.change_to_alias(c, c2); - - assert_eq!(pos.func.dfg.resolve_aliases(c2), c2); - assert_eq!(pos.func.dfg.resolve_aliases(c), c2); - - // Make a copy of the alias. - let c3 = pos.ins().copy(c); - // This does not see through copies. - assert_eq!(pos.func.dfg.resolve_aliases(c3), c3); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/entities.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/entities.rs deleted file mode 100644 index d8ca7cef3..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/entities.rs +++ /dev/null @@ -1,533 +0,0 @@ -//! Cranelift IR entity references. -//! -//! Instructions in Cranelift IR need to reference other entities in the function. This can be other -//! parts of the function like basic blocks or stack slots, or it can be external entities -//! that are declared in the function preamble in the text format. -//! -//! These entity references in instruction operands are not implemented as Rust references both -//! because Rust's ownership and mutability rules make it difficult, and because 64-bit pointers -//! take up a lot of space, and we want a compact in-memory representation. Instead, entity -//! references are structs wrapping a `u32` index into a table in the `Function` main data -//! structure. There is a separate index type for each entity type, so we don't lose type safety. -//! -//! The `entities` module defines public types for the entity references along with constants -//! representing an invalid reference. We prefer to use `Option` whenever possible, but -//! unfortunately that type is twice as large as the 32-bit index type on its own. Thus, compact -//! data structures use the `PackedOption` representation, while function arguments and -//! return values prefer the more Rust-like `Option` variant. -//! -//! The entity references all implement the `Display` trait in a way that matches the textual IR -//! format. - -use crate::entity::entity_impl; -use core::fmt; -use core::u32; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// An opaque reference to a [basic block](https://en.wikipedia.org/wiki/Basic_block) in a -/// [`Function`](super::function::Function). -/// -/// You can get a `Block` using -/// [`FunctionBuilder::create_block`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_block) -/// -/// While the order is stable, it is arbitrary and does not necessarily resemble the layout order. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Block(u32); -entity_impl!(Block, "block"); - -impl Block { - /// Create a new block reference from its number. This corresponds to the `blockNN` representation. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to an SSA value. -/// -/// You can get a constant `Value` from the following -/// [`InstBuilder`](super::InstBuilder) instructions: -/// -/// - [`iconst`](super::InstBuilder::iconst) for integer constants -/// - [`f32const`](super::InstBuilder::f32const) for 32-bit float constants -/// - [`f64const`](super::InstBuilder::f64const) for 64-bit float constants -/// - [`bconst`](super::InstBuilder::bconst) for boolean constants -/// - [`vconst`](super::InstBuilder::vconst) for vector constants -/// - [`null`](super::InstBuilder::null) for null reference constants -/// -/// Any `InstBuilder` instruction that has an output will also return a `Value`. -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Value(u32); -entity_impl!(Value, "v"); - -impl Value { - /// Create a value from its number representation. - /// This is the number in the `vNN` notation. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX / 2 { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to an instruction in a [`Function`](super::Function). -/// -/// Most usage of `Inst` is internal. `Inst`ructions are returned by -/// [`InstBuilder`](super::InstBuilder) instructions that do not return a -/// [`Value`], such as control flow and trap instructions. -/// -/// If you look around the API, you can find many inventive uses for `Inst`, -/// such as [annotating specific instructions with a comment][inst_comment] -/// or [performing reflection at compile time](super::DataFlowGraph::analyze_branch) -/// on the type of instruction. -/// -/// [inst_comment]: https://github.com/bjorn3/rustc_codegen_cranelift/blob/0f8814fd6da3d436a90549d4bb19b94034f2b19c/src/pretty_clif.rs -/// -/// While the order is stable, it is arbitrary and does not necessarily resemble the layout order. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Inst(u32); -entity_impl!(Inst, "inst"); - -/// An opaque reference to a stack slot. -/// -/// Stack slots represent an address on the -/// [call stack](https://en.wikipedia.org/wiki/Call_stack). -/// -/// `StackSlot`s can be created with -/// [`FunctionBuilder::create_stackslot`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_stack_slot). -/// -/// `StackSlot`s are most often used with -/// [`stack_addr`](super::InstBuilder::stack_addr), -/// [`stack_load`](super::InstBuilder::stack_load), and -/// [`stack_store`](super::InstBuilder::stack_store). -/// -/// While the order is stable, it is arbitrary and does not necessarily resemble the stack order. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct StackSlot(u32); -entity_impl!(StackSlot, "ss"); - -impl StackSlot { - /// Create a new stack slot reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to a global value. -/// -/// A `GlobalValue` is a [`Value`](Value) that will be live across the entire -/// function lifetime. It can be preloaded from other global values. -/// -/// You can create a `GlobalValue` in the following ways: -/// -/// - When compiling to WASM, you can use it to load values from a -/// [`VmContext`](super::GlobalValueData::VMContext) using -/// [`FuncEnvironment::make_global`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_global). -/// - When compiling to native code, you can use it for objects in static memory with -/// [`Module::declare_data_in_func`](https://docs.rs/cranelift-module/*/cranelift_module/trait.Module.html#method.declare_data_in_func). -/// - For any compilation target, it can be registered with -/// [`FunctionBuilder::create_global_value`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_global_value). -/// -/// `GlobalValue`s can be retrieved with -/// [`InstBuilder:global_value`](super::InstBuilder::global_value). -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct GlobalValue(u32); -entity_impl!(GlobalValue, "gv"); - -impl GlobalValue { - /// Create a new global value reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to a constant. -/// -/// You can store [`ConstantData`](super::ConstantData) in a -/// [`ConstantPool`](super::ConstantPool) for efficient storage and retrieval. -/// See [`ConstantPool::insert`](super::ConstantPool::insert). -/// -/// While the order is stable, it is arbitrary and does not necessarily resemble the order in which -/// the constants are written in the constant pool. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Constant(u32); -entity_impl!(Constant, "const"); - -impl Constant { - /// Create a const reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to an immediate. -/// -/// Some immediates (e.g. SIMD shuffle masks) are too large to store in the -/// [`InstructionData`](super::instructions::InstructionData) struct and therefore must be -/// tracked separately in [`DataFlowGraph::immediates`](super::dfg::DataFlowGraph). `Immediate` -/// provides a way to reference values stored there. -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Immediate(u32); -entity_impl!(Immediate, "imm"); - -impl Immediate { - /// Create an immediate reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to a [jump table](https://en.wikipedia.org/wiki/Branch_table). -/// -/// `JumpTable`s are used for indirect branching and are specialized for dense, -/// 0-based jump offsets. If you want a jump table which doesn't start at 0, -/// or is not contiguous, consider using a [`Switch`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.Switch.html) instead. -/// -/// `JumpTable` are used with [`br_table`](super::InstBuilder::br_table). -/// -/// `JumpTable`s can be created with -/// [`create_jump_table`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_jump_table). -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct JumpTable(u32); -entity_impl!(JumpTable, "jt"); - -impl JumpTable { - /// Create a new jump table reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to another [`Function`](super::Function). -/// -/// `FuncRef`s are used for [direct](super::InstBuilder::call) function calls -/// and by [`func_addr`](super::InstBuilder::func_addr) for use in -/// [indirect](super::InstBuilder::call_indirect) function calls. -/// -/// `FuncRef`s can be created with -/// -/// - [`FunctionBuilder::import_function`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.import_function) -/// for external functions -/// - [`Module::declare_func_in_func`](https://docs.rs/cranelift-module/*/cranelift_module/trait.Module.html#method.declare_func_in_func) -/// for functions declared elsewhere in the same native -/// [`Module`](https://docs.rs/cranelift-module/*/cranelift_module/trait.Module.html) -/// - [`FuncEnvironment::make_direct_func`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_direct_func) -/// for functions declared in the same WebAssembly -/// [`FuncEnvironment`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_direct_func) -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct FuncRef(u32); -entity_impl!(FuncRef, "fn"); - -impl FuncRef { - /// Create a new external function reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to a function [`Signature`](super::Signature). -/// -/// `SigRef`s are used to declare a function with -/// [`FunctionBuiler::import_function`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.import_function) -/// as well as to make an [indirect function call](super::InstBuilder::call_indirect). -/// -/// `SigRef`s can be created with -/// [`FunctionBuilder::import_signature`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.import_signature). -/// -/// You can retrieve the [`Signature`](super::Signature) that was used to create a `SigRef` with -/// [`FunctionBuilder::signature`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.signature) or -/// [`func.dfg.signatures`](super::dfg::DataFlowGraph::signatures). -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct SigRef(u32); -entity_impl!(SigRef, "sig"); - -impl SigRef { - /// Create a new function signature reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to a [heap](https://en.wikipedia.org/wiki/Memory_management#DYNAMIC). -/// -/// Heaps are used to access dynamically allocated memory through -/// [`heap_addr`](super::InstBuilder::heap_addr). -/// -/// To create a heap, use [`FunctionBuilder::create_heap`](https://docs.rs/cranelift-frontend/*/cranelift_frontend/struct.FunctionBuilder.html#method.create_heap). -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Heap(u32); -entity_impl!(Heap, "heap"); - -impl Heap { - /// Create a new heap reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to a [WebAssembly -/// table](https://developer.mozilla.org/en-US/docs/WebAssembly/Understanding_the_text_format#WebAssembly_tables). -/// -/// `Table`s are used to store a list of function references. -/// They can be created with [`FuncEnvironment::make_table`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.make_table). -/// They can be used with -/// [`FuncEnvironment::translate_call_indirect`](https://docs.rs/cranelift-wasm/*/cranelift_wasm/trait.FuncEnvironment.html#tymethod.translate_call_indirect). -/// -/// While the order is stable, it is arbitrary. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Table(u32); -entity_impl!(Table, "table"); - -impl Table { - /// Create a new table reference from its number. - /// - /// This method is for use by the parser. - pub fn with_number(n: u32) -> Option { - if n < u32::MAX { - Some(Self(n)) - } else { - None - } - } -} - -/// An opaque reference to any of the entities defined in this module that can appear in CLIF IR. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum AnyEntity { - /// The whole function. - Function, - /// a basic block. - Block(Block), - /// An instruction. - Inst(Inst), - /// An SSA value. - Value(Value), - /// A stack slot. - StackSlot(StackSlot), - /// A Global value. - GlobalValue(GlobalValue), - /// A jump table. - JumpTable(JumpTable), - /// A constant. - Constant(Constant), - /// An external function. - FuncRef(FuncRef), - /// A function call signature. - SigRef(SigRef), - /// A heap. - Heap(Heap), - /// A table. - Table(Table), - /// A function's stack limit - StackLimit, -} - -impl fmt::Display for AnyEntity { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::Function => write!(f, "function"), - Self::Block(r) => r.fmt(f), - Self::Inst(r) => r.fmt(f), - Self::Value(r) => r.fmt(f), - Self::StackSlot(r) => r.fmt(f), - Self::GlobalValue(r) => r.fmt(f), - Self::JumpTable(r) => r.fmt(f), - Self::Constant(r) => r.fmt(f), - Self::FuncRef(r) => r.fmt(f), - Self::SigRef(r) => r.fmt(f), - Self::Heap(r) => r.fmt(f), - Self::Table(r) => r.fmt(f), - Self::StackLimit => write!(f, "stack_limit"), - } - } -} - -impl fmt::Debug for AnyEntity { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - (self as &dyn fmt::Display).fmt(f) - } -} - -impl From for AnyEntity { - fn from(r: Block) -> Self { - Self::Block(r) - } -} - -impl From for AnyEntity { - fn from(r: Inst) -> Self { - Self::Inst(r) - } -} - -impl From for AnyEntity { - fn from(r: Value) -> Self { - Self::Value(r) - } -} - -impl From for AnyEntity { - fn from(r: StackSlot) -> Self { - Self::StackSlot(r) - } -} - -impl From for AnyEntity { - fn from(r: GlobalValue) -> Self { - Self::GlobalValue(r) - } -} - -impl From for AnyEntity { - fn from(r: JumpTable) -> Self { - Self::JumpTable(r) - } -} - -impl From for AnyEntity { - fn from(r: Constant) -> Self { - Self::Constant(r) - } -} - -impl From for AnyEntity { - fn from(r: FuncRef) -> Self { - Self::FuncRef(r) - } -} - -impl From for AnyEntity { - fn from(r: SigRef) -> Self { - Self::SigRef(r) - } -} - -impl From for AnyEntity { - fn from(r: Heap) -> Self { - Self::Heap(r) - } -} - -impl From for AnyEntity { - fn from(r: Table) -> Self { - Self::Table(r) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::string::ToString; - use core::u32; - - #[test] - fn value_with_number() { - assert_eq!(Value::with_number(0).unwrap().to_string(), "v0"); - assert_eq!(Value::with_number(1).unwrap().to_string(), "v1"); - - assert_eq!(Value::with_number(u32::MAX / 2), None); - assert!(Value::with_number(u32::MAX / 2 - 1).is_some()); - } - - #[test] - fn memory() { - use crate::packed_option::PackedOption; - use core::mem; - // This is the whole point of `PackedOption`. - assert_eq!( - mem::size_of::(), - mem::size_of::>() - ); - } - - #[test] - fn constant_with_number() { - assert_eq!(Constant::with_number(0).unwrap().to_string(), "const0"); - assert_eq!(Constant::with_number(1).unwrap().to_string(), "const1"); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extfunc.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extfunc.rs deleted file mode 100644 index 1a623095a..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extfunc.rs +++ /dev/null @@ -1,474 +0,0 @@ -//! External function calls. -//! -//! To a Cranelift function, all functions are "external". Directly called functions must be -//! declared in the preamble, and all function calls must have a signature. -//! -//! This module declares the data types used to represent external functions and call signatures. - -use crate::ir::{ExternalName, SigRef, Type}; -use crate::isa::CallConv; -use crate::machinst::RelocDistance; -use alloc::vec::Vec; -use core::fmt; -use core::str::FromStr; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Function signature. -/// -/// The function signature describes the types of formal parameters and return values along with -/// other details that are needed to call a function correctly. -/// -/// A signature can optionally include ISA-specific ABI information which specifies exactly how -/// arguments and return values are passed. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Signature { - /// The arguments passed to the function. - pub params: Vec, - /// Values returned from the function. - pub returns: Vec, - - /// Calling convention. - pub call_conv: CallConv, -} - -impl Signature { - /// Create a new blank signature. - pub fn new(call_conv: CallConv) -> Self { - Self { - params: Vec::new(), - returns: Vec::new(), - call_conv, - } - } - - /// Clear the signature so it is identical to a fresh one returned by `new()`. - pub fn clear(&mut self, call_conv: CallConv) { - self.params.clear(); - self.returns.clear(); - self.call_conv = call_conv; - } - - /// Find the index of a presumed unique special-purpose parameter. - pub fn special_param_index(&self, purpose: ArgumentPurpose) -> Option { - self.params.iter().rposition(|arg| arg.purpose == purpose) - } - - /// Find the index of a presumed unique special-purpose parameter. - pub fn special_return_index(&self, purpose: ArgumentPurpose) -> Option { - self.returns.iter().rposition(|arg| arg.purpose == purpose) - } - - /// Does this signature have a parameter whose `ArgumentPurpose` is - /// `purpose`? - pub fn uses_special_param(&self, purpose: ArgumentPurpose) -> bool { - self.special_param_index(purpose).is_some() - } - - /// Does this signature have a return whose `ArgumentPurpose` is `purpose`? - pub fn uses_special_return(&self, purpose: ArgumentPurpose) -> bool { - self.special_return_index(purpose).is_some() - } - - /// How many special parameters does this function have? - pub fn num_special_params(&self) -> usize { - self.params - .iter() - .filter(|p| p.purpose != ArgumentPurpose::Normal) - .count() - } - - /// How many special returns does this function have? - pub fn num_special_returns(&self) -> usize { - self.returns - .iter() - .filter(|r| r.purpose != ArgumentPurpose::Normal) - .count() - } - - /// Does this signature take an struct return pointer parameter? - pub fn uses_struct_return_param(&self) -> bool { - self.uses_special_param(ArgumentPurpose::StructReturn) - } - - /// Does this return more than one normal value? (Pre-struct return - /// legalization) - pub fn is_multi_return(&self) -> bool { - self.returns - .iter() - .filter(|r| r.purpose == ArgumentPurpose::Normal) - .count() - > 1 - } -} - -fn write_list(f: &mut fmt::Formatter, args: &[AbiParam]) -> fmt::Result { - match args.split_first() { - None => {} - Some((first, rest)) => { - write!(f, "{}", first)?; - for arg in rest { - write!(f, ", {}", arg)?; - } - } - } - Ok(()) -} - -impl fmt::Display for Signature { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(")?; - write_list(f, &self.params)?; - write!(f, ")")?; - if !self.returns.is_empty() { - write!(f, " -> ")?; - write_list(f, &self.returns)?; - } - write!(f, " {}", self.call_conv) - } -} - -/// Function parameter or return value descriptor. -/// -/// This describes the value type being passed to or from a function along with flags that affect -/// how the argument is passed. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct AbiParam { - /// Type of the argument value. - pub value_type: Type, - /// Special purpose of argument, or `Normal`. - pub purpose: ArgumentPurpose, - /// Method for extending argument to a full register. - pub extension: ArgumentExtension, - - /// Was the argument converted to pointer during legalization? - pub legalized_to_pointer: bool, -} - -impl AbiParam { - /// Create a parameter with default flags. - pub fn new(vt: Type) -> Self { - Self { - value_type: vt, - extension: ArgumentExtension::None, - purpose: ArgumentPurpose::Normal, - legalized_to_pointer: false, - } - } - - /// Create a special-purpose parameter that is not (yet) bound to a specific register. - pub fn special(vt: Type, purpose: ArgumentPurpose) -> Self { - Self { - value_type: vt, - extension: ArgumentExtension::None, - purpose, - legalized_to_pointer: false, - } - } - - /// Convert `self` to a parameter with the `uext` flag set. - pub fn uext(self) -> Self { - debug_assert!(self.value_type.is_int(), "uext on {} arg", self.value_type); - Self { - extension: ArgumentExtension::Uext, - ..self - } - } - - /// Convert `self` to a parameter type with the `sext` flag set. - pub fn sext(self) -> Self { - debug_assert!(self.value_type.is_int(), "sext on {} arg", self.value_type); - Self { - extension: ArgumentExtension::Sext, - ..self - } - } -} - -impl fmt::Display for AbiParam { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.value_type)?; - if self.legalized_to_pointer { - write!(f, " ptr")?; - } - match self.extension { - ArgumentExtension::None => {} - ArgumentExtension::Uext => write!(f, " uext")?, - ArgumentExtension::Sext => write!(f, " sext")?, - } - if self.purpose != ArgumentPurpose::Normal { - write!(f, " {}", self.purpose)?; - } - Ok(()) - } -} - -/// Function argument extension options. -/// -/// On some architectures, small integer function arguments and/or return values are extended to -/// the width of a general-purpose register. -/// -/// This attribute specifies how an argument or return value should be extended *if the platform -/// and ABI require it*. Because the frontend (CLIF generator) does not know anything about the -/// particulars of the target's ABI, and the CLIF should be platform-independent, these attributes -/// specify *how* to extend (according to the signedness of the original program) rather than -/// *whether* to extend. -/// -/// For example, on x86-64, the SystemV ABI does not require extensions of narrow values, so these -/// `ArgumentExtension` attributes are ignored; but in the Baldrdash (SpiderMonkey) ABI on the same -/// platform, all narrow values *are* extended, so these attributes may lead to extra -/// zero/sign-extend instructions in the generated machine code. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum ArgumentExtension { - /// No extension, high bits are indeterminate. - None, - /// Unsigned extension: high bits in register are 0. - Uext, - /// Signed extension: high bits in register replicate sign bit. - Sext, -} - -/// The special purpose of a function argument. -/// -/// Function arguments and return values are used to pass user program values between functions, -/// but they are also used to represent special registers with significance to the ABI such as -/// frame pointers and callee-saved registers. -/// -/// The argument purpose is used to indicate any special meaning of an argument or return value. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum ArgumentPurpose { - /// A normal user program value passed to or from a function. - Normal, - - /// A C struct passed as argument. - StructArgument(u32), - - /// Struct return pointer. - /// - /// When a function needs to return more data than will fit in registers, the caller passes a - /// pointer to a memory location where the return value can be written. In some ABIs, this - /// struct return pointer is passed in a specific register. - /// - /// This argument kind can also appear as a return value for ABIs that require a function with - /// a `StructReturn` pointer argument to also return that pointer in a register. - StructReturn, - - /// The link register. - /// - /// Most RISC architectures implement calls by saving the return address in a designated - /// register rather than pushing it on the stack. This is represented with a `Link` argument. - /// - /// Similarly, some return instructions expect the return address in a register represented as - /// a `Link` return value. - Link, - - /// The frame pointer. - /// - /// This indicates the frame pointer register which has a special meaning in some ABIs. - /// - /// The frame pointer appears as an argument and as a return value since it is a callee-saved - /// register. - FramePointer, - - /// A callee-saved register. - /// - /// Some calling conventions have registers that must be saved by the callee. These registers - /// are represented as `CalleeSaved` arguments and return values. - CalleeSaved, - - /// A VM context pointer. - /// - /// This is a pointer to a context struct containing details about the current sandbox. It is - /// used as a base pointer for `vmctx` global values. - VMContext, - - /// A signature identifier. - /// - /// This is a special-purpose argument used to identify the calling convention expected by the - /// caller in an indirect call. The callee can verify that the expected signature ID matches. - SignatureId, - - /// A stack limit pointer. - /// - /// This is a pointer to a stack limit. It is used to check the current stack pointer - /// against. Can only appear once in a signature. - StackLimit, - - /// A callee TLS value. - /// - /// In the Baldrdash-2020 calling convention, the stack upon entry to the callee contains the - /// TLS-register values for the caller and the callee. This argument is used to provide the - /// value for the callee. - CalleeTLS, - - /// A caller TLS value. - /// - /// In the Baldrdash-2020 calling convention, the stack upon entry to the callee contains the - /// TLS-register values for the caller and the callee. This argument is used to provide the - /// value for the caller. - CallerTLS, -} - -impl fmt::Display for ArgumentPurpose { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match self { - Self::Normal => "normal", - Self::StructArgument(size) => return write!(f, "sarg({})", size), - Self::StructReturn => "sret", - Self::Link => "link", - Self::FramePointer => "fp", - Self::CalleeSaved => "csr", - Self::VMContext => "vmctx", - Self::SignatureId => "sigid", - Self::StackLimit => "stack_limit", - Self::CalleeTLS => "callee_tls", - Self::CallerTLS => "caller_tls", - }) - } -} - -impl FromStr for ArgumentPurpose { - type Err = (); - fn from_str(s: &str) -> Result { - match s { - "normal" => Ok(Self::Normal), - "sret" => Ok(Self::StructReturn), - "link" => Ok(Self::Link), - "fp" => Ok(Self::FramePointer), - "csr" => Ok(Self::CalleeSaved), - "vmctx" => Ok(Self::VMContext), - "sigid" => Ok(Self::SignatureId), - "stack_limit" => Ok(Self::StackLimit), - _ if s.starts_with("sarg(") => { - if !s.ends_with(")") { - return Err(()); - } - // Parse 'sarg(size)' - let size: u32 = s["sarg(".len()..s.len() - 1].parse().map_err(|_| ())?; - Ok(Self::StructArgument(size)) - } - _ => Err(()), - } - } -} - -/// An external function. -/// -/// Information about a function that can be called directly with a direct `call` instruction. -#[derive(Clone, Debug)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct ExtFuncData { - /// Name of the external function. - pub name: ExternalName, - /// Call signature of function. - pub signature: SigRef, - /// Will this function be defined nearby, such that it will always be a certain distance away, - /// after linking? If so, references to it can avoid going through a GOT or PLT. Note that - /// symbols meant to be preemptible cannot be considered colocated. - /// - /// If `true`, some backends may use relocation forms that have limited range. The exact - /// distance depends on the code model in use. Currently on AArch64, for example, Cranelift - /// uses a custom code model supporting up to +/- 128MB displacements. If it is unknown how - /// far away the target will be, it is best not to set the `colocated` flag; in general, this - /// flag is best used when the target is known to be in the same unit of code generation, such - /// as a Wasm module. - /// - /// See the documentation for [`RelocDistance`](crate::machinst::RelocDistance) for more details. A - /// `colocated` flag value of `true` implies `RelocDistance::Near`. - pub colocated: bool, -} - -impl fmt::Display for ExtFuncData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.colocated { - write!(f, "colocated ")?; - } - write!(f, "{} {}", self.name, self.signature) - } -} - -impl ExtFuncData { - /// Return an estimate of the distance to the referred-to function symbol. - pub fn reloc_distance(&self) -> RelocDistance { - if self.colocated { - RelocDistance::Near - } else { - RelocDistance::Far - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::ir::types::{B8, F32, I32}; - use alloc::string::ToString; - - #[test] - fn argument_type() { - let t = AbiParam::new(I32); - assert_eq!(t.to_string(), "i32"); - let mut t = t.uext(); - assert_eq!(t.to_string(), "i32 uext"); - assert_eq!(t.sext().to_string(), "i32 sext"); - t.purpose = ArgumentPurpose::StructReturn; - assert_eq!(t.to_string(), "i32 uext sret"); - t.legalized_to_pointer = true; - assert_eq!(t.to_string(), "i32 ptr uext sret"); - } - - #[test] - fn argument_purpose() { - let all_purpose = [ - (ArgumentPurpose::Normal, "normal"), - (ArgumentPurpose::StructReturn, "sret"), - (ArgumentPurpose::Link, "link"), - (ArgumentPurpose::FramePointer, "fp"), - (ArgumentPurpose::CalleeSaved, "csr"), - (ArgumentPurpose::VMContext, "vmctx"), - (ArgumentPurpose::SignatureId, "sigid"), - (ArgumentPurpose::StackLimit, "stack_limit"), - (ArgumentPurpose::StructArgument(42), "sarg(42)"), - ]; - for &(e, n) in &all_purpose { - assert_eq!(e.to_string(), n); - assert_eq!(Ok(e), n.parse()); - } - } - - #[test] - fn call_conv() { - for &cc in &[ - CallConv::Fast, - CallConv::Cold, - CallConv::SystemV, - CallConv::WindowsFastcall, - CallConv::BaldrdashSystemV, - CallConv::BaldrdashWindows, - CallConv::Baldrdash2020, - ] { - assert_eq!(Ok(cc), cc.to_string().parse()) - } - } - - #[test] - fn signatures() { - let mut sig = Signature::new(CallConv::BaldrdashSystemV); - assert_eq!(sig.to_string(), "() baldrdash_system_v"); - sig.params.push(AbiParam::new(I32)); - assert_eq!(sig.to_string(), "(i32) baldrdash_system_v"); - sig.returns.push(AbiParam::new(F32)); - assert_eq!(sig.to_string(), "(i32) -> f32 baldrdash_system_v"); - sig.params.push(AbiParam::new(I32.by(4).unwrap())); - assert_eq!(sig.to_string(), "(i32, i32x4) -> f32 baldrdash_system_v"); - sig.returns.push(AbiParam::new(B8)); - assert_eq!( - sig.to_string(), - "(i32, i32x4) -> f32, b8 baldrdash_system_v" - ); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extname.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extname.rs deleted file mode 100644 index 362cf8c67..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/extname.rs +++ /dev/null @@ -1,167 +0,0 @@ -//! External names. -//! -//! These are identifiers for declaring entities defined outside the current -//! function. The name of an external declaration doesn't have any meaning to -//! Cranelift, which compiles functions independently. - -use crate::ir::LibCall; -use core::cmp; -use core::fmt::{self, Write}; -use core::str::FromStr; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -const TESTCASE_NAME_LENGTH: usize = 16; - -/// The name of an external is either a reference to a user-defined symbol -/// table, or a short sequence of ascii bytes so that test cases do not have -/// to keep track of a symbol table. -/// -/// External names are primarily used as keys by code using Cranelift to map -/// from a `cranelift_codegen::ir::FuncRef` or similar to additional associated -/// data. -/// -/// External names can also serve as a primitive testing and debugging tool. -/// In particular, many `.clif` test files use function names to identify -/// functions. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum ExternalName { - /// A name in a user-defined symbol table. Cranelift does not interpret - /// these numbers in any way. - User { - /// Arbitrary. - namespace: u32, - /// Arbitrary. - index: u32, - }, - /// A test case function name of up to a hardcoded amount of ascii - /// characters. This is not intended to be used outside test cases. - TestCase { - /// How many of the bytes in `ascii` are valid? - length: u8, - /// Ascii bytes of the name. - ascii: [u8; TESTCASE_NAME_LENGTH], - }, - /// A well-known runtime library function. - LibCall(LibCall), -} - -impl ExternalName { - /// Creates a new external name from a sequence of bytes. Caller is expected - /// to guarantee bytes are only ascii alphanumeric or `_`. - /// - /// # Examples - /// - /// ```rust - /// # use cranelift_codegen::ir::ExternalName; - /// // Create `ExternalName` from a string. - /// let name = ExternalName::testcase("hello"); - /// assert_eq!(name.to_string(), "%hello"); - /// ``` - pub fn testcase>(v: T) -> Self { - let vec = v.as_ref(); - let len = cmp::min(vec.len(), TESTCASE_NAME_LENGTH); - let mut bytes = [0u8; TESTCASE_NAME_LENGTH]; - bytes[0..len].copy_from_slice(&vec[0..len]); - - Self::TestCase { - length: len as u8, - ascii: bytes, - } - } - - /// Create a new external name from user-provided integer indices. - /// - /// # Examples - /// ```rust - /// # use cranelift_codegen::ir::ExternalName; - /// // Create `ExternalName` from integer indices - /// let name = ExternalName::user(123, 456); - /// assert_eq!(name.to_string(), "u123:456"); - /// ``` - pub fn user(namespace: u32, index: u32) -> Self { - Self::User { namespace, index } - } -} - -impl Default for ExternalName { - fn default() -> Self { - Self::user(0, 0) - } -} - -impl fmt::Display for ExternalName { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::User { namespace, index } => write!(f, "u{}:{}", namespace, index), - Self::TestCase { length, ascii } => { - f.write_char('%')?; - for byte in ascii.iter().take(length as usize) { - f.write_char(*byte as char)?; - } - Ok(()) - } - Self::LibCall(lc) => write!(f, "%{}", lc), - } - } -} - -impl FromStr for ExternalName { - type Err = (); - - fn from_str(s: &str) -> Result { - // Try to parse as a libcall name, otherwise it's a test case. - match s.parse() { - Ok(lc) => Ok(Self::LibCall(lc)), - Err(_) => Ok(Self::testcase(s.as_bytes())), - } - } -} - -#[cfg(test)] -mod tests { - use super::ExternalName; - use crate::ir::LibCall; - use alloc::string::ToString; - use core::u32; - - #[test] - fn display_testcase() { - assert_eq!(ExternalName::testcase("").to_string(), "%"); - assert_eq!(ExternalName::testcase("x").to_string(), "%x"); - assert_eq!(ExternalName::testcase("x_1").to_string(), "%x_1"); - assert_eq!( - ExternalName::testcase("longname12345678").to_string(), - "%longname12345678" - ); - // Constructor will silently drop bytes beyond the 16th - assert_eq!( - ExternalName::testcase("longname123456789").to_string(), - "%longname12345678" - ); - } - - #[test] - fn display_user() { - assert_eq!(ExternalName::user(0, 0).to_string(), "u0:0"); - assert_eq!(ExternalName::user(1, 1).to_string(), "u1:1"); - assert_eq!( - ExternalName::user(u32::MAX, u32::MAX).to_string(), - "u4294967295:4294967295" - ); - } - - #[test] - fn parsing() { - assert_eq!( - "FloorF32".parse(), - Ok(ExternalName::LibCall(LibCall::FloorF32)) - ); - assert_eq!( - ExternalName::LibCall(LibCall::FloorF32).to_string(), - "%FloorF32" - ); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/function.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/function.rs deleted file mode 100644 index 0092a3330..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/function.rs +++ /dev/null @@ -1,356 +0,0 @@ -//! Intermediate representation of a function. -//! -//! The `Function` struct defined in this module owns all of its basic blocks and -//! instructions. - -use crate::entity::{PrimaryMap, SecondaryMap}; -use crate::ir; -use crate::ir::JumpTables; -use crate::ir::{ - instructions::BranchInfo, Block, ExtFuncData, FuncRef, GlobalValue, GlobalValueData, Heap, - HeapData, Inst, InstructionData, JumpTable, JumpTableData, Opcode, SigRef, StackSlot, - StackSlotData, Table, TableData, -}; -use crate::ir::{DataFlowGraph, ExternalName, Layout, Signature}; -use crate::ir::{SourceLocs, StackSlots}; -use crate::isa::CallConv; -use crate::value_label::ValueLabelsRanges; -use crate::write::write_function; -#[cfg(feature = "enable-serde")] -use alloc::string::String; -use core::fmt; - -#[cfg(feature = "enable-serde")] -use serde::de::{Deserializer, Error}; -#[cfg(feature = "enable-serde")] -use serde::ser::Serializer; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// A version marker used to ensure that serialized clif ir is never deserialized with a -/// different version of Cranelift. -#[derive(Copy, Clone, Debug)] -pub struct VersionMarker; - -#[cfg(feature = "enable-serde")] -impl Serialize for VersionMarker { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - crate::VERSION.serialize(serializer) - } -} - -#[cfg(feature = "enable-serde")] -impl<'de> Deserialize<'de> for VersionMarker { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let version = String::deserialize(deserializer)?; - if version != crate::VERSION { - return Err(D::Error::custom(&format!( - "Expected a clif ir function for version {}, found one for version {}", - crate::VERSION, - version, - ))); - } - Ok(VersionMarker) - } -} - -/// -/// Functions can be cloned, but it is not a very fast operation. -/// The clone will have all the same entity numbers as the original. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Function { - /// A version marker used to ensure that serialized clif ir is never deserialized with a - /// different version of Cranelift. - // Note: This must be the first field to ensure that Serde will deserialize it before - // attempting to deserialize other fields that are potentially changed between versions. - pub version_marker: VersionMarker, - - /// Name of this function. Mostly used by `.clif` files. - pub name: ExternalName, - - /// Signature of this function. - pub signature: Signature, - - /// Stack slots allocated in this function. - pub stack_slots: StackSlots, - - /// Global values referenced. - pub global_values: PrimaryMap, - - /// Heaps referenced. - pub heaps: PrimaryMap, - - /// Tables referenced. - pub tables: PrimaryMap, - - /// Jump tables used in this function. - pub jump_tables: JumpTables, - - /// Data flow graph containing the primary definition of all instructions, blocks and values. - pub dfg: DataFlowGraph, - - /// Layout of blocks and instructions in the function body. - pub layout: Layout, - - /// Source locations. - /// - /// Track the original source location for each instruction. The source locations are not - /// interpreted by Cranelift, only preserved. - pub srclocs: SourceLocs, - - /// An optional global value which represents an expression evaluating to - /// the stack limit for this function. This `GlobalValue` will be - /// interpreted in the prologue, if necessary, to insert a stack check to - /// ensure that a trap happens if the stack pointer goes below the - /// threshold specified here. - pub stack_limit: Option, -} - -impl Function { - /// Create a function with the given name and signature. - pub fn with_name_signature(name: ExternalName, sig: Signature) -> Self { - Self { - version_marker: VersionMarker, - name, - signature: sig, - stack_slots: StackSlots::new(), - global_values: PrimaryMap::new(), - heaps: PrimaryMap::new(), - tables: PrimaryMap::new(), - jump_tables: PrimaryMap::new(), - dfg: DataFlowGraph::new(), - layout: Layout::new(), - srclocs: SecondaryMap::new(), - stack_limit: None, - } - } - - /// Clear all data structures in this function. - pub fn clear(&mut self) { - self.signature.clear(CallConv::Fast); - self.stack_slots.clear(); - self.global_values.clear(); - self.heaps.clear(); - self.tables.clear(); - self.jump_tables.clear(); - self.dfg.clear(); - self.layout.clear(); - self.srclocs.clear(); - self.stack_limit = None; - } - - /// Create a new empty, anonymous function with a Fast calling convention. - pub fn new() -> Self { - Self::with_name_signature(ExternalName::default(), Signature::new(CallConv::Fast)) - } - - /// Creates a jump table in the function, to be used by `br_table` instructions. - pub fn create_jump_table(&mut self, data: JumpTableData) -> JumpTable { - self.jump_tables.push(data) - } - - /// Creates a stack slot in the function, to be used by `stack_load`, `stack_store` and - /// `stack_addr` instructions. - pub fn create_stack_slot(&mut self, data: StackSlotData) -> StackSlot { - self.stack_slots.push(data) - } - - /// Adds a signature which can later be used to declare an external function import. - pub fn import_signature(&mut self, signature: Signature) -> SigRef { - self.dfg.signatures.push(signature) - } - - /// Declare an external function import. - pub fn import_function(&mut self, data: ExtFuncData) -> FuncRef { - self.dfg.ext_funcs.push(data) - } - - /// Declares a global value accessible to the function. - pub fn create_global_value(&mut self, data: GlobalValueData) -> GlobalValue { - self.global_values.push(data) - } - - /// Declares a heap accessible to the function. - pub fn create_heap(&mut self, data: HeapData) -> Heap { - self.heaps.push(data) - } - - /// Declares a table accessible to the function. - pub fn create_table(&mut self, data: TableData) -> Table { - self.tables.push(data) - } - - /// Return an object that can display this function with correct ISA-specific annotations. - pub fn display(&self) -> DisplayFunction<'_> { - DisplayFunction(self, Default::default()) - } - - /// Return an object that can display this function with correct ISA-specific annotations. - pub fn display_with<'a>( - &'a self, - annotations: DisplayFunctionAnnotations<'a>, - ) -> DisplayFunction<'a> { - DisplayFunction(self, annotations) - } - - /// Find a presumed unique special-purpose function parameter value. - /// - /// Returns the value of the last `purpose` parameter, or `None` if no such parameter exists. - pub fn special_param(&self, purpose: ir::ArgumentPurpose) -> Option { - let entry = self.layout.entry_block().expect("Function is empty"); - self.signature - .special_param_index(purpose) - .map(|i| self.dfg.block_params(entry)[i]) - } - - /// Starts collection of debug information. - pub fn collect_debug_info(&mut self) { - self.dfg.collect_debug_info(); - } - - /// Changes the destination of a jump or branch instruction. - /// Does nothing if called with a non-jump or non-branch instruction. - /// - /// Note that this method ignores multi-destination branches like `br_table`. - pub fn change_branch_destination(&mut self, inst: Inst, new_dest: Block) { - match self.dfg[inst].branch_destination_mut() { - None => (), - Some(inst_dest) => *inst_dest = new_dest, - } - } - - /// Rewrite the branch destination to `new_dest` if the destination matches `old_dest`. - /// Does nothing if called with a non-jump or non-branch instruction. - /// - /// Unlike [change_branch_destination](Function::change_branch_destination), this method rewrite the destinations of - /// multi-destination branches like `br_table`. - pub fn rewrite_branch_destination(&mut self, inst: Inst, old_dest: Block, new_dest: Block) { - match self.dfg.analyze_branch(inst) { - BranchInfo::SingleDest(dest, ..) => { - if dest == old_dest { - self.change_branch_destination(inst, new_dest); - } - } - - BranchInfo::Table(table, default_dest) => { - self.jump_tables[table].iter_mut().for_each(|entry| { - if *entry == old_dest { - *entry = new_dest; - } - }); - - if default_dest == Some(old_dest) { - match &mut self.dfg[inst] { - InstructionData::BranchTable { destination, .. } => { - *destination = new_dest; - } - _ => panic!( - "Unexpected instruction {} having default destination", - self.dfg.display_inst(inst) - ), - } - } - } - - BranchInfo::NotABranch => {} - } - } - - /// Checks that the specified block can be encoded as a basic block. - /// - /// On error, returns the first invalid instruction and an error message. - pub fn is_block_basic(&self, block: Block) -> Result<(), (Inst, &'static str)> { - let dfg = &self.dfg; - let inst_iter = self.layout.block_insts(block); - - // Ignore all instructions prior to the first branch. - let mut inst_iter = inst_iter.skip_while(|&inst| !dfg[inst].opcode().is_branch()); - - // A conditional branch is permitted in a basic block only when followed - // by a terminal jump instruction. - if let Some(_branch) = inst_iter.next() { - if let Some(next) = inst_iter.next() { - match dfg[next].opcode() { - Opcode::Jump => (), - _ => return Err((next, "post-branch instruction not jump")), - } - } - } - - Ok(()) - } - - /// Returns true if the function is function that doesn't call any other functions. This is not - /// to be confused with a "leaf function" in Windows terminology. - pub fn is_leaf(&self) -> bool { - // Conservative result: if there's at least one function signature referenced in this - // function, assume it is not a leaf. - self.dfg.signatures.is_empty() - } - - /// Replace the `dst` instruction's data with the `src` instruction's data - /// and then remove `src`. - /// - /// `src` and its result values should not be used at all, as any uses would - /// be left dangling after calling this method. - /// - /// `src` and `dst` must have the same number of resulting values, and - /// `src`'s i^th value must have the same type as `dst`'s i^th value. - pub fn transplant_inst(&mut self, dst: Inst, src: Inst) { - debug_assert_eq!( - self.dfg.inst_results(dst).len(), - self.dfg.inst_results(src).len() - ); - debug_assert!(self - .dfg - .inst_results(dst) - .iter() - .zip(self.dfg.inst_results(src)) - .all(|(a, b)| self.dfg.value_type(*a) == self.dfg.value_type(*b))); - - self.dfg[dst] = self.dfg[src].clone(); - self.layout.remove_inst(src); - } - - /// Size occupied by all stack slots associated with this function. - /// - /// Does not include any padding necessary due to offsets - pub fn stack_size(&self) -> u32 { - self.stack_slots.values().map(|ss| ss.size).sum() - } -} - -/// Additional annotations for function display. -#[derive(Default)] -pub struct DisplayFunctionAnnotations<'a> { - /// Enable value labels annotations. - pub value_ranges: Option<&'a ValueLabelsRanges>, -} - -/// Wrapper type capable of displaying a `Function` with correct ISA annotations. -pub struct DisplayFunction<'a>(&'a Function, DisplayFunctionAnnotations<'a>); - -impl<'a> fmt::Display for DisplayFunction<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write_function(fmt, self.0) - } -} - -impl fmt::Display for Function { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write_function(fmt, self) - } -} - -impl fmt::Debug for Function { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write_function(fmt, self) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/globalvalue.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/globalvalue.rs deleted file mode 100644 index e70f8221f..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/globalvalue.rs +++ /dev/null @@ -1,159 +0,0 @@ -//! Global values. - -use crate::ir::immediates::{Imm64, Offset32}; -use crate::ir::{ExternalName, GlobalValue, Type}; -use crate::isa::TargetIsa; -use crate::machinst::RelocDistance; -use core::fmt; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Information about a global value declaration. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum GlobalValueData { - /// Value is the address of the VM context struct. - VMContext, - - /// Value is pointed to by another global value. - /// - /// The `base` global value is assumed to contain a pointer. This global value is computed - /// by loading from memory at that pointer value. The memory must be accessible, and - /// naturally aligned to hold a value of the type. The data at this address is assumed - /// to never change while the current function is executing. - Load { - /// The base pointer global value. - base: GlobalValue, - - /// Offset added to the base pointer before doing the load. - offset: Offset32, - - /// Type of the loaded value. - global_type: Type, - - /// Specifies whether the memory that this refers to is readonly, allowing for the - /// elimination of redundant loads. - readonly: bool, - }, - - /// Value is an offset from another global value. - IAddImm { - /// The base pointer global value. - base: GlobalValue, - - /// Byte offset to be added to the value. - offset: Imm64, - - /// Type of the iadd. - global_type: Type, - }, - - /// Value is symbolic, meaning it's a name which will be resolved to an - /// actual value later (eg. by linking). Cranelift itself does not interpret - /// this name; it's used by embedders to link with other data structures. - /// - /// For now, symbolic values always have pointer type, and represent - /// addresses, however in the future they could be used to represent other - /// things as well. - Symbol { - /// The symbolic name. - name: ExternalName, - - /// Offset from the symbol. This can be used instead of IAddImm to represent folding an - /// offset into a symbol. - offset: Imm64, - - /// Will this symbol be defined nearby, such that it will always be a certain distance - /// away, after linking? If so, references to it can avoid going through a GOT. Note that - /// symbols meant to be preemptible cannot be colocated. - /// - /// If `true`, some backends may use relocation forms that have limited range: for example, - /// a +/- 2^27-byte range on AArch64. See the documentation for - /// [`RelocDistance`](crate::machinst::RelocDistance) for more details. - colocated: bool, - - /// Does this symbol refer to a thread local storage value? - tls: bool, - }, -} - -impl GlobalValueData { - /// Assume that `self` is an `GlobalValueData::Symbol` and return its name. - pub fn symbol_name(&self) -> &ExternalName { - match *self { - Self::Symbol { ref name, .. } => name, - _ => panic!("only symbols have names"), - } - } - - /// Return the type of this global. - pub fn global_type(&self, isa: &dyn TargetIsa) -> Type { - match *self { - Self::VMContext { .. } | Self::Symbol { .. } => isa.pointer_type(), - Self::IAddImm { global_type, .. } | Self::Load { global_type, .. } => global_type, - } - } - - /// If this global references a symbol, return an estimate of the relocation distance, - /// based on the `colocated` flag. - pub fn maybe_reloc_distance(&self) -> Option { - match self { - &GlobalValueData::Symbol { - colocated: true, .. - } => Some(RelocDistance::Near), - &GlobalValueData::Symbol { - colocated: false, .. - } => Some(RelocDistance::Far), - _ => None, - } - } -} - -impl fmt::Display for GlobalValueData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::VMContext => write!(f, "vmctx"), - Self::Load { - base, - offset, - global_type, - readonly, - } => write!( - f, - "load.{} notrap aligned {}{}{}", - global_type, - if readonly { "readonly " } else { "" }, - base, - offset - ), - Self::IAddImm { - global_type, - base, - offset, - } => write!(f, "iadd_imm.{} {}, {}", global_type, base, offset), - Self::Symbol { - ref name, - offset, - colocated, - tls, - } => { - write!( - f, - "symbol {}{}{}", - if colocated { "colocated " } else { "" }, - if tls { "tls " } else { "" }, - name - )?; - let offset_val: i64 = offset.into(); - if offset_val > 0 { - write!(f, "+")?; - } - if offset_val != 0 { - write!(f, "{}", offset)?; - } - Ok(()) - } - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/heap.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/heap.rs deleted file mode 100644 index 91aabccaa..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/heap.rs +++ /dev/null @@ -1,67 +0,0 @@ -//! Heaps. - -use crate::ir::immediates::Uimm64; -use crate::ir::{GlobalValue, Type}; -use core::fmt; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Information about a heap declaration. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct HeapData { - /// The address of the start of the heap's storage. - pub base: GlobalValue, - - /// Guaranteed minimum heap size in bytes. Heap accesses before `min_size` don't need bounds - /// checking. - pub min_size: Uimm64, - - /// Size in bytes of the offset-guard pages following the heap. - pub offset_guard_size: Uimm64, - - /// Heap style, with additional style-specific info. - pub style: HeapStyle, - - /// The index type for the heap. - pub index_type: Type, -} - -/// Style of heap including style-specific information. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum HeapStyle { - /// A dynamic heap can be relocated to a different base address when it is grown. - Dynamic { - /// Global value providing the current bound of the heap in bytes. - bound_gv: GlobalValue, - }, - - /// A static heap has a fixed base address and a number of not-yet-allocated pages before the - /// offset-guard pages. - Static { - /// Heap bound in bytes. The offset-guard pages are allocated after the bound. - bound: Uimm64, - }, -} - -impl fmt::Display for HeapData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match self.style { - HeapStyle::Dynamic { .. } => "dynamic", - HeapStyle::Static { .. } => "static", - })?; - - write!(f, " {}, min {}", self.base, self.min_size)?; - match self.style { - HeapStyle::Dynamic { bound_gv } => write!(f, ", bound {}", bound_gv)?, - HeapStyle::Static { bound } => write!(f, ", bound {}", bound)?, - } - write!( - f, - ", offset_guard {}, index_type {}", - self.offset_guard_size, self.index_type - ) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/immediates.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/immediates.rs deleted file mode 100644 index ca9f8a978..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/immediates.rs +++ /dev/null @@ -1,1330 +0,0 @@ -//! Immediate operands for Cranelift instructions -//! -//! This module defines the types of immediate operands that can appear on Cranelift instructions. -//! Each type here should have a corresponding definition in the -//! `cranelift-codegen/meta/src/shared/immediates` crate in the meta language. - -use alloc::vec::Vec; -use core::cmp::Ordering; -use core::convert::TryFrom; -use core::fmt::{self, Display, Formatter}; -use core::str::FromStr; -use core::{i32, u32}; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Convert a type into a vector of bytes; all implementors in this file must use little-endian -/// orderings of bytes to match WebAssembly's little-endianness. -pub trait IntoBytes { - /// Return the little-endian byte representation of the implementing type. - fn into_bytes(self) -> Vec; -} - -impl IntoBytes for u8 { - fn into_bytes(self) -> Vec { - vec![self] - } -} - -impl IntoBytes for i8 { - fn into_bytes(self) -> Vec { - vec![self as u8] - } -} - -impl IntoBytes for i16 { - fn into_bytes(self) -> Vec { - self.to_le_bytes().to_vec() - } -} - -impl IntoBytes for i32 { - fn into_bytes(self) -> Vec { - self.to_le_bytes().to_vec() - } -} - -impl IntoBytes for Vec { - fn into_bytes(self) -> Vec { - self - } -} - -/// 64-bit immediate signed integer operand. -/// -/// An `Imm64` operand can also be used to represent immediate values of smaller integer types by -/// sign-extending to `i64`. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Imm64(i64); - -impl Imm64 { - /// Create a new `Imm64` representing the signed number `x`. - pub fn new(x: i64) -> Self { - Self(x) - } - - /// Return self negated. - pub fn wrapping_neg(self) -> Self { - Self(self.0.wrapping_neg()) - } - - /// Returns the value of this immediate. - pub fn bits(&self) -> i64 { - self.0 - } - - /// Sign extend this immediate as if it were a signed integer of the given - /// power-of-two width. - pub fn sign_extend_from_width(&mut self, bit_width: u16) { - debug_assert!(bit_width.is_power_of_two()); - - if bit_width >= 64 { - return; - } - - let bit_width = i64::from(bit_width); - let delta = 64 - bit_width; - let sign_extended = (self.0 << delta) >> delta; - *self = Imm64(sign_extended); - } -} - -impl From for i64 { - fn from(val: Imm64) -> i64 { - val.0 - } -} - -impl IntoBytes for Imm64 { - fn into_bytes(self) -> Vec { - self.0.to_le_bytes().to_vec() - } -} - -impl From for Imm64 { - fn from(x: i64) -> Self { - Self(x) - } -} - -impl Display for Imm64 { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let x = self.0; - if -10_000 < x && x < 10_000 { - // Use decimal for small numbers. - write!(f, "{}", x) - } else { - write_hex(x as u64, f) - } - } -} - -/// Parse a 64-bit signed number. -fn parse_i64(s: &str) -> Result { - let negative = s.starts_with('-'); - let s2 = if negative || s.starts_with('+') { - &s[1..] - } else { - s - }; - - let mut value = parse_u64(s2)?; - - // We support the range-and-a-half from -2^63 .. 2^64-1. - if negative { - value = value.wrapping_neg(); - // Don't allow large negative values to wrap around and become positive. - if value as i64 > 0 { - return Err("Negative number too small"); - } - } - Ok(value as i64) -} - -impl FromStr for Imm64 { - type Err = &'static str; - - // Parse a decimal or hexadecimal `Imm64`, formatted as above. - fn from_str(s: &str) -> Result { - parse_i64(s).map(Self::new) - } -} - -/// 64-bit immediate unsigned integer operand. -/// -/// A `Uimm64` operand can also be used to represent immediate values of smaller integer types by -/// zero-extending to `i64`. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Uimm64(u64); - -impl Uimm64 { - /// Create a new `Uimm64` representing the unsigned number `x`. - pub fn new(x: u64) -> Self { - Self(x) - } - - /// Return self negated. - pub fn wrapping_neg(self) -> Self { - Self(self.0.wrapping_neg()) - } -} - -impl From for u64 { - fn from(val: Uimm64) -> u64 { - val.0 - } -} - -impl From for Uimm64 { - fn from(x: u64) -> Self { - Self(x) - } -} - -/// Hexadecimal with a multiple of 4 digits and group separators: -/// -/// 0xfff0 -/// 0x0001_ffff -/// 0xffff_ffff_fff8_4400 -/// -fn write_hex(x: u64, f: &mut Formatter) -> fmt::Result { - let mut pos = (64 - x.leading_zeros() - 1) & 0xf0; - write!(f, "0x{:04x}", (x >> pos) & 0xffff)?; - while pos > 0 { - pos -= 16; - write!(f, "_{:04x}", (x >> pos) & 0xffff)?; - } - Ok(()) -} - -impl Display for Uimm64 { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let x = self.0; - if x < 10_000 { - // Use decimal for small numbers. - write!(f, "{}", x) - } else { - write_hex(x, f) - } - } -} - -/// Parse a 64-bit unsigned number. -fn parse_u64(s: &str) -> Result { - let mut value: u64 = 0; - let mut digits = 0; - - if s.starts_with("-0x") { - return Err("Invalid character in hexadecimal number"); - } else if s.starts_with("0x") { - // Hexadecimal. - for ch in s[2..].chars() { - match ch.to_digit(16) { - Some(digit) => { - digits += 1; - if digits > 16 { - return Err("Too many hexadecimal digits"); - } - // This can't overflow given the digit limit. - value = (value << 4) | u64::from(digit); - } - None => { - // Allow embedded underscores, but fail on anything else. - if ch != '_' { - return Err("Invalid character in hexadecimal number"); - } - } - } - } - } else { - // Decimal number, possibly negative. - for ch in s.chars() { - match ch.to_digit(16) { - Some(digit) => { - digits += 1; - match value.checked_mul(10) { - None => return Err("Too large decimal number"), - Some(v) => value = v, - } - match value.checked_add(u64::from(digit)) { - None => return Err("Too large decimal number"), - Some(v) => value = v, - } - } - None => { - // Allow embedded underscores, but fail on anything else. - if ch != '_' { - return Err("Invalid character in decimal number"); - } - } - } - } - } - - if digits == 0 { - return Err("No digits in number"); - } - - Ok(value) -} - -impl FromStr for Uimm64 { - type Err = &'static str; - - // Parse a decimal or hexadecimal `Uimm64`, formatted as above. - fn from_str(s: &str) -> Result { - parse_u64(s).map(Self::new) - } -} - -/// 8-bit unsigned integer immediate operand. -/// -/// This is used to indicate lane indexes typically. -pub type Uimm8 = u8; - -/// A 32-bit unsigned integer immediate operand. -/// -/// This is used to represent sizes of memory objects. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Uimm32(u32); - -impl From for u32 { - fn from(val: Uimm32) -> u32 { - val.0 - } -} - -impl From for u64 { - fn from(val: Uimm32) -> u64 { - val.0.into() - } -} - -impl From for i64 { - fn from(val: Uimm32) -> i64 { - i64::from(val.0) - } -} - -impl From for Uimm32 { - fn from(x: u32) -> Self { - Self(x) - } -} - -impl Display for Uimm32 { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.0 < 10_000 { - write!(f, "{}", self.0) - } else { - write_hex(u64::from(self.0), f) - } - } -} - -impl FromStr for Uimm32 { - type Err = &'static str; - - // Parse a decimal or hexadecimal `Uimm32`, formatted as above. - fn from_str(s: &str) -> Result { - parse_i64(s).and_then(|x| { - if 0 <= x && x <= i64::from(u32::MAX) { - Ok(Self(x as u32)) - } else { - Err("Uimm32 out of range") - } - }) - } -} - -/// A 128-bit immediate operand. -/// -/// This is used as an immediate value in SIMD instructions. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct V128Imm(pub [u8; 16]); - -impl V128Imm { - /// Iterate over the bytes in the constant. - pub fn bytes(&self) -> impl Iterator { - self.0.iter() - } - - /// Convert the immediate into a vector. - pub fn to_vec(self) -> Vec { - self.0.to_vec() - } - - /// Convert the immediate into a slice. - pub fn as_slice(&self) -> &[u8] { - &self.0[..] - } -} - -impl From<&[u8]> for V128Imm { - fn from(slice: &[u8]) -> Self { - assert_eq!(slice.len(), 16); - let mut buffer = [0; 16]; - buffer.copy_from_slice(slice); - Self(buffer) - } -} - -impl From for V128Imm { - fn from(val: u128) -> Self { - V128Imm(val.to_le_bytes()) - } -} - -/// 32-bit signed immediate offset. -/// -/// This is used to encode an immediate offset for load/store instructions. All supported ISAs have -/// a maximum load/store offset that fits in an `i32`. -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Offset32(i32); - -impl Offset32 { - /// Create a new `Offset32` representing the signed number `x`. - pub fn new(x: i32) -> Self { - Self(x) - } - - /// Create a new `Offset32` representing the signed number `x` if possible. - pub fn try_from_i64(x: i64) -> Option { - let x = i32::try_from(x).ok()?; - Some(Self::new(x)) - } - - /// Add in the signed number `x` if possible. - pub fn try_add_i64(self, x: i64) -> Option { - let x = i32::try_from(x).ok()?; - let ret = self.0.checked_add(x)?; - Some(Self::new(ret)) - } -} - -impl From for i32 { - fn from(val: Offset32) -> i32 { - val.0 - } -} - -impl From for i64 { - fn from(val: Offset32) -> i64 { - i64::from(val.0) - } -} - -impl From for Offset32 { - fn from(x: i32) -> Self { - Self(x) - } -} - -impl Display for Offset32 { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - // 0 displays as an empty offset. - if self.0 == 0 { - return Ok(()); - } - - // Always include a sign. - write!(f, "{}", if self.0 < 0 { '-' } else { '+' })?; - - let val = i64::from(self.0).abs(); - if val < 10_000 { - write!(f, "{}", val) - } else { - write_hex(val as u64, f) - } - } -} - -impl FromStr for Offset32 { - type Err = &'static str; - - // Parse a decimal or hexadecimal `Offset32`, formatted as above. - fn from_str(s: &str) -> Result { - if !(s.starts_with('-') || s.starts_with('+')) { - return Err("Offset must begin with sign"); - } - parse_i64(s).and_then(|x| { - if i64::from(i32::MIN) <= x && x <= i64::from(i32::MAX) { - Ok(Self::new(x as i32)) - } else { - Err("Offset out of range") - } - }) - } -} - -/// An IEEE binary32 immediate floating point value, represented as a u32 -/// containing the bit pattern. -/// -/// All bit patterns are allowed. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -#[repr(C)] -pub struct Ieee32(u32); - -/// An IEEE binary64 immediate floating point value, represented as a u64 -/// containing the bit pattern. -/// -/// All bit patterns are allowed. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -#[repr(C)] -pub struct Ieee64(u64); - -/// Format a floating point number in a way that is reasonably human-readable, and that can be -/// converted back to binary without any rounding issues. The hexadecimal formatting of normal and -/// subnormal numbers is compatible with C99 and the `printf "%a"` format specifier. The NaN and Inf -/// formats are not supported by C99. -/// -/// The encoding parameters are: -/// -/// w - exponent field width in bits -/// t - trailing significand field width in bits -/// -fn format_float(bits: u64, w: u8, t: u8, f: &mut Formatter) -> fmt::Result { - debug_assert!(w > 0 && w <= 16, "Invalid exponent range"); - debug_assert!(1 + w + t <= 64, "Too large IEEE format for u64"); - debug_assert!((t + w + 1).is_power_of_two(), "Unexpected IEEE format size"); - - let max_e_bits = (1u64 << w) - 1; - let t_bits = bits & ((1u64 << t) - 1); // Trailing significand. - let e_bits = (bits >> t) & max_e_bits; // Biased exponent. - let sign_bit = (bits >> (w + t)) & 1; - - let bias: i32 = (1 << (w - 1)) - 1; - let e = e_bits as i32 - bias; // Unbiased exponent. - let emin = 1 - bias; // Minimum exponent. - - // How many hexadecimal digits are needed for the trailing significand? - let digits = (t + 3) / 4; - // Trailing significand left-aligned in `digits` hexadecimal digits. - let left_t_bits = t_bits << (4 * digits - t); - - // All formats share the leading sign. - if sign_bit != 0 { - write!(f, "-")?; - } - - if e_bits == 0 { - if t_bits == 0 { - // Zero. - write!(f, "0.0") - } else { - // Subnormal. - write!( - f, - "0x0.{0:01$x}p{2}", - left_t_bits, - usize::from(digits), - emin - ) - } - } else if e_bits == max_e_bits { - // Always print a `+` or `-` sign for these special values. - // This makes them easier to parse as they can't be confused as identifiers. - if sign_bit == 0 { - write!(f, "+")?; - } - if t_bits == 0 { - // Infinity. - write!(f, "Inf") - } else { - // NaN. - let payload = t_bits & ((1 << (t - 1)) - 1); - if t_bits & (1 << (t - 1)) != 0 { - // Quiet NaN. - if payload != 0 { - write!(f, "NaN:0x{:x}", payload) - } else { - write!(f, "NaN") - } - } else { - // Signaling NaN. - write!(f, "sNaN:0x{:x}", payload) - } - } - } else { - // Normal number. - write!(f, "0x1.{0:01$x}p{2}", left_t_bits, usize::from(digits), e) - } -} - -/// Parse a float using the same format as `format_float` above. -/// -/// The encoding parameters are: -/// -/// w - exponent field width in bits -/// t - trailing significand field width in bits -/// -fn parse_float(s: &str, w: u8, t: u8) -> Result { - debug_assert!(w > 0 && w <= 16, "Invalid exponent range"); - debug_assert!(1 + w + t <= 64, "Too large IEEE format for u64"); - debug_assert!((t + w + 1).is_power_of_two(), "Unexpected IEEE format size"); - - let (sign_bit, s2) = if s.starts_with('-') { - (1u64 << (t + w), &s[1..]) - } else if s.starts_with('+') { - (0, &s[1..]) - } else { - (0, s) - }; - - if !s2.starts_with("0x") { - let max_e_bits = ((1u64 << w) - 1) << t; - let quiet_bit = 1u64 << (t - 1); - - // The only decimal encoding allowed is 0. - if s2 == "0.0" { - return Ok(sign_bit); - } - - if s2 == "Inf" { - // +/- infinity: e = max, t = 0. - return Ok(sign_bit | max_e_bits); - } - if s2 == "NaN" { - // Canonical quiet NaN: e = max, t = quiet. - return Ok(sign_bit | max_e_bits | quiet_bit); - } - if s2.starts_with("NaN:0x") { - // Quiet NaN with payload. - return match u64::from_str_radix(&s2[6..], 16) { - Ok(payload) if payload < quiet_bit => { - Ok(sign_bit | max_e_bits | quiet_bit | payload) - } - _ => Err("Invalid NaN payload"), - }; - } - if s2.starts_with("sNaN:0x") { - // Signaling NaN with payload. - return match u64::from_str_radix(&s2[7..], 16) { - Ok(payload) if 0 < payload && payload < quiet_bit => { - Ok(sign_bit | max_e_bits | payload) - } - _ => Err("Invalid sNaN payload"), - }; - } - - return Err("Float must be hexadecimal"); - } - let s3 = &s2[2..]; - - let mut digits = 0u8; - let mut digits_before_period: Option = None; - let mut significand = 0u64; - let mut exponent = 0i32; - - for (idx, ch) in s3.char_indices() { - match ch { - '.' => { - // This is the radix point. There can only be one. - if digits_before_period != None { - return Err("Multiple radix points"); - } else { - digits_before_period = Some(digits); - } - } - 'p' => { - // The following exponent is a decimal number. - let exp_str = &s3[1 + idx..]; - match exp_str.parse::() { - Ok(e) => { - exponent = i32::from(e); - break; - } - Err(_) => return Err("Bad exponent"), - } - } - _ => match ch.to_digit(16) { - Some(digit) => { - digits += 1; - if digits > 16 { - return Err("Too many digits"); - } - significand = (significand << 4) | u64::from(digit); - } - None => return Err("Invalid character"), - }, - } - } - - if digits == 0 { - return Err("No digits"); - } - - if significand == 0 { - // This is +/- 0.0. - return Ok(sign_bit); - } - - // Number of bits appearing after the radix point. - match digits_before_period { - None => {} // No radix point present. - Some(d) => exponent -= 4 * i32::from(digits - d), - }; - - // Normalize the significand and exponent. - let significant_bits = (64 - significand.leading_zeros()) as u8; - if significant_bits > t + 1 { - let adjust = significant_bits - (t + 1); - if significand & ((1u64 << adjust) - 1) != 0 { - return Err("Too many significant bits"); - } - // Adjust significand down. - significand >>= adjust; - exponent += i32::from(adjust); - } else { - let adjust = t + 1 - significant_bits; - significand <<= adjust; - exponent -= i32::from(adjust); - } - debug_assert_eq!(significand >> t, 1); - - // Trailing significand excludes the high bit. - let t_bits = significand & ((1 << t) - 1); - - let max_exp = (1i32 << w) - 2; - let bias: i32 = (1 << (w - 1)) - 1; - exponent += bias + i32::from(t); - - if exponent > max_exp { - Err("Magnitude too large") - } else if exponent > 0 { - // This is a normal number. - let e_bits = (exponent as u64) << t; - Ok(sign_bit | e_bits | t_bits) - } else if 1 - exponent <= i32::from(t) { - // This is a subnormal number: e = 0, t = significand bits. - // Renormalize significand for exponent = 1. - let adjust = 1 - exponent; - if significand & ((1u64 << adjust) - 1) != 0 { - Err("Subnormal underflow") - } else { - significand >>= adjust; - Ok(sign_bit | significand) - } - } else { - Err("Magnitude too small") - } -} - -impl Ieee32 { - /// Create a new `Ieee32` containing the bits of `x`. - pub fn with_bits(x: u32) -> Self { - Self(x) - } - - /// Create an `Ieee32` number representing `2.0^n`. - pub fn pow2>(n: I) -> Self { - let n = n.into(); - let w = 8; - let t = 23; - let bias = (1 << (w - 1)) - 1; - let exponent = (n + bias) as u32; - assert!(exponent > 0, "Underflow n={}", n); - assert!(exponent < (1 << w) + 1, "Overflow n={}", n); - Self(exponent << t) - } - - /// Create an `Ieee32` number representing the greatest negative value - /// not convertable from f32 to a signed integer with width n. - pub fn fcvt_to_sint_negative_overflow>(n: I) -> Self { - let n = n.into(); - debug_assert!(n < 32); - debug_assert!(23 + 1 - n < 32); - Self::with_bits((1u32 << (32 - 1)) | Self::pow2(n - 1).0 | (1u32 << (23 + 1 - n))) - } - - /// Return self negated. - pub fn neg(self) -> Self { - Self(self.0 ^ (1 << 31)) - } - - /// Create a new `Ieee32` representing the number `x`. - pub fn with_float(x: f32) -> Self { - Self(x.to_bits()) - } - - /// Get the bitwise representation. - pub fn bits(self) -> u32 { - self.0 - } - - /// Check if the value is a NaN. - pub fn is_nan(&self) -> bool { - f32::from_bits(self.0).is_nan() - } -} - -impl PartialOrd for Ieee32 { - fn partial_cmp(&self, other: &Self) -> Option { - f32::from_bits(self.0).partial_cmp(&f32::from_bits(other.0)) - } -} - -impl Display for Ieee32 { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let bits: u32 = self.0; - format_float(u64::from(bits), 8, 23, f) - } -} - -impl FromStr for Ieee32 { - type Err = &'static str; - - fn from_str(s: &str) -> Result { - match parse_float(s, 8, 23) { - Ok(b) => Ok(Self(b as u32)), - Err(s) => Err(s), - } - } -} - -impl From for Ieee32 { - fn from(x: f32) -> Self { - Self::with_float(x) - } -} - -impl IntoBytes for Ieee32 { - fn into_bytes(self) -> Vec { - self.0.to_le_bytes().to_vec() - } -} - -impl Ieee64 { - /// Create a new `Ieee64` containing the bits of `x`. - pub fn with_bits(x: u64) -> Self { - Self(x) - } - - /// Create an `Ieee64` number representing `2.0^n`. - pub fn pow2>(n: I) -> Self { - let n = n.into(); - let w = 11; - let t = 52; - let bias = (1 << (w - 1)) - 1; - let exponent = (n + bias) as u64; - assert!(exponent > 0, "Underflow n={}", n); - assert!(exponent < (1 << w) + 1, "Overflow n={}", n); - Self(exponent << t) - } - - /// Create an `Ieee64` number representing the greatest negative value - /// not convertable from f64 to a signed integer with width n. - pub fn fcvt_to_sint_negative_overflow>(n: I) -> Self { - let n = n.into(); - debug_assert!(n < 64); - debug_assert!(52 + 1 - n < 64); - Self::with_bits((1u64 << (64 - 1)) | Self::pow2(n - 1).0 | (1u64 << (52 + 1 - n))) - } - - /// Return self negated. - pub fn neg(self) -> Self { - Self(self.0 ^ (1 << 63)) - } - - /// Create a new `Ieee64` representing the number `x`. - pub fn with_float(x: f64) -> Self { - Self(x.to_bits()) - } - - /// Get the bitwise representation. - pub fn bits(self) -> u64 { - self.0 - } - - /// Check if the value is a NaN. For [Ieee64], this means checking that the 11 exponent bits are - /// all set. - pub fn is_nan(&self) -> bool { - f64::from_bits(self.0).is_nan() - } -} - -impl PartialOrd for Ieee64 { - fn partial_cmp(&self, other: &Self) -> Option { - f64::from_bits(self.0).partial_cmp(&f64::from_bits(other.0)) - } -} - -impl Display for Ieee64 { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let bits: u64 = self.0; - format_float(bits, 11, 52, f) - } -} - -impl FromStr for Ieee64 { - type Err = &'static str; - - fn from_str(s: &str) -> Result { - match parse_float(s, 11, 52) { - Ok(b) => Ok(Self(b)), - Err(s) => Err(s), - } - } -} - -impl From for Ieee64 { - fn from(x: f64) -> Self { - Self::with_float(x) - } -} - -impl From for Ieee64 { - fn from(x: u64) -> Self { - Self::with_float(f64::from_bits(x)) - } -} - -impl IntoBytes for Ieee64 { - fn into_bytes(self) -> Vec { - self.0.to_le_bytes().to_vec() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::string::ToString; - use core::fmt::Display; - use core::mem; - use core::str::FromStr; - use core::{f32, f64}; - - #[test] - fn format_imm64() { - assert_eq!(Imm64(0).to_string(), "0"); - assert_eq!(Imm64(9999).to_string(), "9999"); - assert_eq!(Imm64(10000).to_string(), "0x2710"); - assert_eq!(Imm64(-9999).to_string(), "-9999"); - assert_eq!(Imm64(-10000).to_string(), "0xffff_ffff_ffff_d8f0"); - assert_eq!(Imm64(0xffff).to_string(), "0xffff"); - assert_eq!(Imm64(0x10000).to_string(), "0x0001_0000"); - } - - #[test] - fn format_uimm64() { - assert_eq!(Uimm64(0).to_string(), "0"); - assert_eq!(Uimm64(9999).to_string(), "9999"); - assert_eq!(Uimm64(10000).to_string(), "0x2710"); - assert_eq!(Uimm64(-9999i64 as u64).to_string(), "0xffff_ffff_ffff_d8f1"); - assert_eq!( - Uimm64(-10000i64 as u64).to_string(), - "0xffff_ffff_ffff_d8f0" - ); - assert_eq!(Uimm64(0xffff).to_string(), "0xffff"); - assert_eq!(Uimm64(0x10000).to_string(), "0x0001_0000"); - } - - // Verify that `text` can be parsed as a `T` into a value that displays as `want`. - fn parse_ok(text: &str, want: &str) - where - ::Err: Display, - { - match text.parse::() { - Err(s) => panic!("\"{}\".parse() error: {}", text, s), - Ok(x) => assert_eq!(x.to_string(), want), - } - } - - // Verify that `text` fails to parse as `T` with the error `msg`. - fn parse_err(text: &str, msg: &str) - where - ::Err: Display, - { - match text.parse::() { - Err(s) => assert_eq!(s.to_string(), msg), - Ok(x) => panic!("Wanted Err({}), but got {}", msg, x), - } - } - - #[test] - fn parse_imm64() { - parse_ok::("0", "0"); - parse_ok::("1", "1"); - parse_ok::("-0", "0"); - parse_ok::("-1", "-1"); - parse_ok::("0x0", "0"); - parse_ok::("0xf", "15"); - parse_ok::("-0x9", "-9"); - - // Probe limits. - parse_ok::("0xffffffff_ffffffff", "-1"); - parse_ok::("0x80000000_00000000", "0x8000_0000_0000_0000"); - parse_ok::("-0x80000000_00000000", "0x8000_0000_0000_0000"); - parse_err::("-0x80000000_00000001", "Negative number too small"); - parse_ok::("18446744073709551615", "-1"); - parse_ok::("-9223372036854775808", "0x8000_0000_0000_0000"); - // Overflow both the `checked_add` and `checked_mul`. - parse_err::("18446744073709551616", "Too large decimal number"); - parse_err::("184467440737095516100", "Too large decimal number"); - parse_err::("-9223372036854775809", "Negative number too small"); - - // Underscores are allowed where digits go. - parse_ok::("0_0", "0"); - parse_ok::("-_10_0", "-100"); - parse_ok::("_10_", "10"); - parse_ok::("0x97_88_bb", "0x0097_88bb"); - parse_ok::("0x_97_", "151"); - - parse_err::("", "No digits in number"); - parse_err::("-", "No digits in number"); - parse_err::("_", "No digits in number"); - parse_err::("0x", "No digits in number"); - parse_err::("0x_", "No digits in number"); - parse_err::("-0x", "No digits in number"); - parse_err::(" ", "Invalid character in decimal number"); - parse_err::("0 ", "Invalid character in decimal number"); - parse_err::(" 0", "Invalid character in decimal number"); - parse_err::("--", "Invalid character in decimal number"); - parse_err::("-0x-", "Invalid character in hexadecimal number"); - - // Hex count overflow. - parse_err::("0x0_0000_0000_0000_0000", "Too many hexadecimal digits"); - } - - #[test] - fn parse_uimm64() { - parse_ok::("0", "0"); - parse_ok::("1", "1"); - parse_ok::("0x0", "0"); - parse_ok::("0xf", "15"); - parse_ok::("0xffffffff_fffffff7", "0xffff_ffff_ffff_fff7"); - - // Probe limits. - parse_ok::("0xffffffff_ffffffff", "0xffff_ffff_ffff_ffff"); - parse_ok::("0x80000000_00000000", "0x8000_0000_0000_0000"); - parse_ok::("18446744073709551615", "0xffff_ffff_ffff_ffff"); - // Overflow both the `checked_add` and `checked_mul`. - parse_err::("18446744073709551616", "Too large decimal number"); - parse_err::("184467440737095516100", "Too large decimal number"); - - // Underscores are allowed where digits go. - parse_ok::("0_0", "0"); - parse_ok::("_10_", "10"); - parse_ok::("0x97_88_bb", "0x0097_88bb"); - parse_ok::("0x_97_", "151"); - - parse_err::("", "No digits in number"); - parse_err::("_", "No digits in number"); - parse_err::("0x", "No digits in number"); - parse_err::("0x_", "No digits in number"); - parse_err::("-", "Invalid character in decimal number"); - parse_err::("-0x", "Invalid character in hexadecimal number"); - parse_err::(" ", "Invalid character in decimal number"); - parse_err::("0 ", "Invalid character in decimal number"); - parse_err::(" 0", "Invalid character in decimal number"); - parse_err::("--", "Invalid character in decimal number"); - parse_err::("-0x-", "Invalid character in hexadecimal number"); - parse_err::("-0", "Invalid character in decimal number"); - parse_err::("-1", "Invalid character in decimal number"); - - // Hex count overflow. - parse_err::("0x0_0000_0000_0000_0000", "Too many hexadecimal digits"); - } - - #[test] - fn format_offset32() { - assert_eq!(Offset32(0).to_string(), ""); - assert_eq!(Offset32(1).to_string(), "+1"); - assert_eq!(Offset32(-1).to_string(), "-1"); - assert_eq!(Offset32(9999).to_string(), "+9999"); - assert_eq!(Offset32(10000).to_string(), "+0x2710"); - assert_eq!(Offset32(-9999).to_string(), "-9999"); - assert_eq!(Offset32(-10000).to_string(), "-0x2710"); - assert_eq!(Offset32(0xffff).to_string(), "+0xffff"); - assert_eq!(Offset32(0x10000).to_string(), "+0x0001_0000"); - } - - #[test] - fn parse_offset32() { - parse_ok::("+0", ""); - parse_ok::("+1", "+1"); - parse_ok::("-0", ""); - parse_ok::("-1", "-1"); - parse_ok::("+0x0", ""); - parse_ok::("+0xf", "+15"); - parse_ok::("-0x9", "-9"); - parse_ok::("-0x8000_0000", "-0x8000_0000"); - - parse_err::("+0x8000_0000", "Offset out of range"); - } - - #[test] - fn format_ieee32() { - assert_eq!(Ieee32::with_float(0.0).to_string(), "0.0"); - assert_eq!(Ieee32::with_float(-0.0).to_string(), "-0.0"); - assert_eq!(Ieee32::with_float(1.0).to_string(), "0x1.000000p0"); - assert_eq!(Ieee32::with_float(1.5).to_string(), "0x1.800000p0"); - assert_eq!(Ieee32::with_float(0.5).to_string(), "0x1.000000p-1"); - assert_eq!( - Ieee32::with_float(f32::EPSILON).to_string(), - "0x1.000000p-23" - ); - assert_eq!(Ieee32::with_float(f32::MIN).to_string(), "-0x1.fffffep127"); - assert_eq!(Ieee32::with_float(f32::MAX).to_string(), "0x1.fffffep127"); - // Smallest positive normal number. - assert_eq!( - Ieee32::with_float(f32::MIN_POSITIVE).to_string(), - "0x1.000000p-126" - ); - // Subnormals. - assert_eq!( - Ieee32::with_float(f32::MIN_POSITIVE / 2.0).to_string(), - "0x0.800000p-126" - ); - assert_eq!( - Ieee32::with_float(f32::MIN_POSITIVE * f32::EPSILON).to_string(), - "0x0.000002p-126" - ); - assert_eq!(Ieee32::with_float(f32::INFINITY).to_string(), "+Inf"); - assert_eq!(Ieee32::with_float(f32::NEG_INFINITY).to_string(), "-Inf"); - assert_eq!(Ieee32::with_float(f32::NAN).to_string(), "+NaN"); - assert_eq!(Ieee32::with_float(-f32::NAN).to_string(), "-NaN"); - // Construct some qNaNs with payloads. - assert_eq!(Ieee32(0x7fc00001).to_string(), "+NaN:0x1"); - assert_eq!(Ieee32(0x7ff00001).to_string(), "+NaN:0x300001"); - // Signaling NaNs. - assert_eq!(Ieee32(0x7f800001).to_string(), "+sNaN:0x1"); - assert_eq!(Ieee32(0x7fa00001).to_string(), "+sNaN:0x200001"); - } - - #[test] - fn parse_ieee32() { - parse_ok::("0.0", "0.0"); - parse_ok::("+0.0", "0.0"); - parse_ok::("-0.0", "-0.0"); - parse_ok::("0x0", "0.0"); - parse_ok::("0x0.0", "0.0"); - parse_ok::("0x.0", "0.0"); - parse_ok::("0x0.", "0.0"); - parse_ok::("0x1", "0x1.000000p0"); - parse_ok::("+0x1", "0x1.000000p0"); - parse_ok::("-0x1", "-0x1.000000p0"); - parse_ok::("0x10", "0x1.000000p4"); - parse_ok::("0x10.0", "0x1.000000p4"); - parse_err::("0.", "Float must be hexadecimal"); - parse_err::(".0", "Float must be hexadecimal"); - parse_err::("0", "Float must be hexadecimal"); - parse_err::("-0", "Float must be hexadecimal"); - parse_err::(".", "Float must be hexadecimal"); - parse_err::("", "Float must be hexadecimal"); - parse_err::("-", "Float must be hexadecimal"); - parse_err::("0x", "No digits"); - parse_err::("0x..", "Multiple radix points"); - - // Check significant bits. - parse_ok::("0x0.ffffff", "0x1.fffffep-1"); - parse_ok::("0x1.fffffe", "0x1.fffffep0"); - parse_ok::("0x3.fffffc", "0x1.fffffep1"); - parse_ok::("0x7.fffff8", "0x1.fffffep2"); - parse_ok::("0xf.fffff0", "0x1.fffffep3"); - parse_err::("0x1.ffffff", "Too many significant bits"); - parse_err::("0x1.fffffe0000000000", "Too many digits"); - - // Exponents. - parse_ok::("0x1p3", "0x1.000000p3"); - parse_ok::("0x1p-3", "0x1.000000p-3"); - parse_ok::("0x1.0p3", "0x1.000000p3"); - parse_ok::("0x2.0p3", "0x1.000000p4"); - parse_ok::("0x1.0p127", "0x1.000000p127"); - parse_ok::("0x1.0p-126", "0x1.000000p-126"); - parse_ok::("0x0.1p-122", "0x1.000000p-126"); - parse_err::("0x2.0p127", "Magnitude too large"); - - // Subnormals. - parse_ok::("0x1.0p-127", "0x0.800000p-126"); - parse_ok::("0x1.0p-149", "0x0.000002p-126"); - parse_ok::("0x0.000002p-126", "0x0.000002p-126"); - parse_err::("0x0.100001p-126", "Subnormal underflow"); - parse_err::("0x1.8p-149", "Subnormal underflow"); - parse_err::("0x1.0p-150", "Magnitude too small"); - - // NaNs and Infs. - parse_ok::("Inf", "+Inf"); - parse_ok::("+Inf", "+Inf"); - parse_ok::("-Inf", "-Inf"); - parse_ok::("NaN", "+NaN"); - parse_ok::("+NaN", "+NaN"); - parse_ok::("-NaN", "-NaN"); - parse_ok::("NaN:0x0", "+NaN"); - parse_err::("NaN:", "Float must be hexadecimal"); - parse_err::("NaN:0", "Float must be hexadecimal"); - parse_err::("NaN:0x", "Invalid NaN payload"); - parse_ok::("NaN:0x000001", "+NaN:0x1"); - parse_ok::("NaN:0x300001", "+NaN:0x300001"); - parse_err::("NaN:0x400001", "Invalid NaN payload"); - parse_ok::("sNaN:0x1", "+sNaN:0x1"); - parse_err::("sNaN:0x0", "Invalid sNaN payload"); - parse_ok::("sNaN:0x200001", "+sNaN:0x200001"); - parse_err::("sNaN:0x400001", "Invalid sNaN payload"); - } - - #[test] - fn pow2_ieee32() { - assert_eq!(Ieee32::pow2(0).to_string(), "0x1.000000p0"); - assert_eq!(Ieee32::pow2(1).to_string(), "0x1.000000p1"); - assert_eq!(Ieee32::pow2(-1).to_string(), "0x1.000000p-1"); - assert_eq!(Ieee32::pow2(127).to_string(), "0x1.000000p127"); - assert_eq!(Ieee32::pow2(-126).to_string(), "0x1.000000p-126"); - - assert_eq!(Ieee32::pow2(1).neg().to_string(), "-0x1.000000p1"); - } - - #[test] - fn fcvt_to_sint_negative_overflow_ieee32() { - for n in &[8, 16] { - assert_eq!(-((1u32 << (n - 1)) as f32) - 1.0, unsafe { - mem::transmute(Ieee32::fcvt_to_sint_negative_overflow(*n)) - }); - } - } - - #[test] - fn format_ieee64() { - assert_eq!(Ieee64::with_float(0.0).to_string(), "0.0"); - assert_eq!(Ieee64::with_float(-0.0).to_string(), "-0.0"); - assert_eq!(Ieee64::with_float(1.0).to_string(), "0x1.0000000000000p0"); - assert_eq!(Ieee64::with_float(1.5).to_string(), "0x1.8000000000000p0"); - assert_eq!(Ieee64::with_float(0.5).to_string(), "0x1.0000000000000p-1"); - assert_eq!( - Ieee64::with_float(f64::EPSILON).to_string(), - "0x1.0000000000000p-52" - ); - assert_eq!( - Ieee64::with_float(f64::MIN).to_string(), - "-0x1.fffffffffffffp1023" - ); - assert_eq!( - Ieee64::with_float(f64::MAX).to_string(), - "0x1.fffffffffffffp1023" - ); - // Smallest positive normal number. - assert_eq!( - Ieee64::with_float(f64::MIN_POSITIVE).to_string(), - "0x1.0000000000000p-1022" - ); - // Subnormals. - assert_eq!( - Ieee64::with_float(f64::MIN_POSITIVE / 2.0).to_string(), - "0x0.8000000000000p-1022" - ); - assert_eq!( - Ieee64::with_float(f64::MIN_POSITIVE * f64::EPSILON).to_string(), - "0x0.0000000000001p-1022" - ); - assert_eq!(Ieee64::with_float(f64::INFINITY).to_string(), "+Inf"); - assert_eq!(Ieee64::with_float(f64::NEG_INFINITY).to_string(), "-Inf"); - assert_eq!(Ieee64::with_float(f64::NAN).to_string(), "+NaN"); - assert_eq!(Ieee64::with_float(-f64::NAN).to_string(), "-NaN"); - // Construct some qNaNs with payloads. - assert_eq!(Ieee64(0x7ff8000000000001).to_string(), "+NaN:0x1"); - assert_eq!( - Ieee64(0x7ffc000000000001).to_string(), - "+NaN:0x4000000000001" - ); - // Signaling NaNs. - assert_eq!(Ieee64(0x7ff0000000000001).to_string(), "+sNaN:0x1"); - assert_eq!( - Ieee64(0x7ff4000000000001).to_string(), - "+sNaN:0x4000000000001" - ); - } - - #[test] - fn parse_ieee64() { - parse_ok::("0.0", "0.0"); - parse_ok::("-0.0", "-0.0"); - parse_ok::("0x0", "0.0"); - parse_ok::("0x0.0", "0.0"); - parse_ok::("0x.0", "0.0"); - parse_ok::("0x0.", "0.0"); - parse_ok::("0x1", "0x1.0000000000000p0"); - parse_ok::("-0x1", "-0x1.0000000000000p0"); - parse_ok::("0x10", "0x1.0000000000000p4"); - parse_ok::("0x10.0", "0x1.0000000000000p4"); - parse_err::("0.", "Float must be hexadecimal"); - parse_err::(".0", "Float must be hexadecimal"); - parse_err::("0", "Float must be hexadecimal"); - parse_err::("-0", "Float must be hexadecimal"); - parse_err::(".", "Float must be hexadecimal"); - parse_err::("", "Float must be hexadecimal"); - parse_err::("-", "Float must be hexadecimal"); - parse_err::("0x", "No digits"); - parse_err::("0x..", "Multiple radix points"); - - // Check significant bits. - parse_ok::("0x0.fffffffffffff8", "0x1.fffffffffffffp-1"); - parse_ok::("0x1.fffffffffffff", "0x1.fffffffffffffp0"); - parse_ok::("0x3.ffffffffffffe", "0x1.fffffffffffffp1"); - parse_ok::("0x7.ffffffffffffc", "0x1.fffffffffffffp2"); - parse_ok::("0xf.ffffffffffff8", "0x1.fffffffffffffp3"); - parse_err::("0x3.fffffffffffff", "Too many significant bits"); - parse_err::("0x001.fffffe00000000", "Too many digits"); - - // Exponents. - parse_ok::("0x1p3", "0x1.0000000000000p3"); - parse_ok::("0x1p-3", "0x1.0000000000000p-3"); - parse_ok::("0x1.0p3", "0x1.0000000000000p3"); - parse_ok::("0x2.0p3", "0x1.0000000000000p4"); - parse_ok::("0x1.0p1023", "0x1.0000000000000p1023"); - parse_ok::("0x1.0p-1022", "0x1.0000000000000p-1022"); - parse_ok::("0x0.1p-1018", "0x1.0000000000000p-1022"); - parse_err::("0x2.0p1023", "Magnitude too large"); - - // Subnormals. - parse_ok::("0x1.0p-1023", "0x0.8000000000000p-1022"); - parse_ok::("0x1.0p-1074", "0x0.0000000000001p-1022"); - parse_ok::("0x0.0000000000001p-1022", "0x0.0000000000001p-1022"); - parse_err::("0x0.10000000000008p-1022", "Subnormal underflow"); - parse_err::("0x1.8p-1074", "Subnormal underflow"); - parse_err::("0x1.0p-1075", "Magnitude too small"); - - // NaNs and Infs. - parse_ok::("Inf", "+Inf"); - parse_ok::("-Inf", "-Inf"); - parse_ok::("NaN", "+NaN"); - parse_ok::("-NaN", "-NaN"); - parse_ok::("NaN:0x0", "+NaN"); - parse_err::("NaN:", "Float must be hexadecimal"); - parse_err::("NaN:0", "Float must be hexadecimal"); - parse_err::("NaN:0x", "Invalid NaN payload"); - parse_ok::("NaN:0x000001", "+NaN:0x1"); - parse_ok::("NaN:0x4000000000001", "+NaN:0x4000000000001"); - parse_err::("NaN:0x8000000000001", "Invalid NaN payload"); - parse_ok::("sNaN:0x1", "+sNaN:0x1"); - parse_err::("sNaN:0x0", "Invalid sNaN payload"); - parse_ok::("sNaN:0x4000000000001", "+sNaN:0x4000000000001"); - parse_err::("sNaN:0x8000000000001", "Invalid sNaN payload"); - } - - #[test] - fn pow2_ieee64() { - assert_eq!(Ieee64::pow2(0).to_string(), "0x1.0000000000000p0"); - assert_eq!(Ieee64::pow2(1).to_string(), "0x1.0000000000000p1"); - assert_eq!(Ieee64::pow2(-1).to_string(), "0x1.0000000000000p-1"); - assert_eq!(Ieee64::pow2(1023).to_string(), "0x1.0000000000000p1023"); - assert_eq!(Ieee64::pow2(-1022).to_string(), "0x1.0000000000000p-1022"); - - assert_eq!(Ieee64::pow2(1).neg().to_string(), "-0x1.0000000000000p1"); - } - - #[test] - fn fcvt_to_sint_negative_overflow_ieee64() { - for n in &[8, 16, 32] { - assert_eq!(-((1u64 << (n - 1)) as f64) - 1.0, unsafe { - mem::transmute(Ieee64::fcvt_to_sint_negative_overflow(*n)) - }); - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/instructions.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/instructions.rs deleted file mode 100644 index 63a17bf9b..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/instructions.rs +++ /dev/null @@ -1,924 +0,0 @@ -//! Instruction formats and opcodes. -//! -//! The `instructions` module contains definitions for instruction formats, opcodes, and the -//! in-memory representation of IR instructions. -//! -//! A large part of this module is auto-generated from the instruction descriptions in the meta -//! directory. - -use alloc::vec::Vec; -use core::convert::{TryFrom, TryInto}; -use core::fmt::{self, Display, Formatter}; -use core::num::NonZeroU32; -use core::ops::{Deref, DerefMut}; -use core::str::FromStr; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -use crate::bitset::BitSet; -use crate::data_value::DataValue; -use crate::entity; -use crate::ir::{ - self, - condcodes::{FloatCC, IntCC}, - trapcode::TrapCode, - types, Block, FuncRef, JumpTable, MemFlags, SigRef, StackSlot, Type, Value, -}; - -/// Some instructions use an external list of argument values because there is not enough space in -/// the 16-byte `InstructionData` struct. These value lists are stored in a memory pool in -/// `dfg.value_lists`. -pub type ValueList = entity::EntityList; - -/// Memory pool for holding value lists. See `ValueList`. -pub type ValueListPool = entity::ListPool; - -// Include code generated by `cranelift-codegen/meta/src/gen_inst.rs`. This file contains: -// -// - The `pub enum InstructionFormat` enum with all the instruction formats. -// - The `pub enum InstructionData` enum with all the instruction data fields. -// - The `pub enum Opcode` definition with all known opcodes, -// - The `const OPCODE_FORMAT: [InstructionFormat; N]` table. -// - The private `fn opcode_name(Opcode) -> &'static str` function, and -// - The hash table `const OPCODE_HASH_TABLE: [Opcode; N]`. -// -// For value type constraints: -// -// - The `const OPCODE_CONSTRAINTS : [OpcodeConstraints; N]` table. -// - The `const TYPE_SETS : [ValueTypeSet; N]` table. -// - The `const OPERAND_CONSTRAINTS : [OperandConstraint; N]` table. -// -include!(concat!(env!("OUT_DIR"), "/opcodes.rs")); - -impl Display for Opcode { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{}", opcode_name(*self)) - } -} - -impl Opcode { - /// Get the instruction format for this opcode. - pub fn format(self) -> InstructionFormat { - OPCODE_FORMAT[self as usize - 1] - } - - /// Get the constraint descriptor for this opcode. - /// Panic if this is called on `NotAnOpcode`. - pub fn constraints(self) -> OpcodeConstraints { - OPCODE_CONSTRAINTS[self as usize - 1] - } - - /// Returns true if the instruction is a resumable trap. - pub fn is_resumable_trap(&self) -> bool { - match self { - Opcode::ResumableTrap | Opcode::ResumableTrapnz => true, - _ => false, - } - } -} - -impl TryFrom for Opcode { - type Error = (); - - #[inline] - fn try_from(x: NonZeroU32) -> Result { - let x: u16 = x.get().try_into().map_err(|_| ())?; - Self::try_from(x) - } -} - -impl From for NonZeroU32 { - #[inline] - fn from(op: Opcode) -> NonZeroU32 { - let x = op as u8; - NonZeroU32::new(x as u32).unwrap() - } -} - -// This trait really belongs in cranelift-reader where it is used by the `.clif` file parser, but since -// it critically depends on the `opcode_name()` function which is needed here anyway, it lives in -// this module. This also saves us from running the build script twice to generate code for the two -// separate crates. -impl FromStr for Opcode { - type Err = &'static str; - - /// Parse an Opcode name from a string. - fn from_str(s: &str) -> Result { - use crate::constant_hash::{probe, simple_hash, Table}; - - impl<'a> Table<&'a str> for [Option] { - fn len(&self) -> usize { - self.len() - } - - fn key(&self, idx: usize) -> Option<&'a str> { - self[idx].map(opcode_name) - } - } - - match probe::<&str, [Option]>(&OPCODE_HASH_TABLE, s, simple_hash(s)) { - Err(_) => Err("Unknown opcode"), - // We unwrap here because probe() should have ensured that the entry - // at this index is not None. - Ok(i) => Ok(OPCODE_HASH_TABLE[i].unwrap()), - } - } -} - -/// A variable list of `Value` operands used for function call arguments and passing arguments to -/// basic blocks. -#[derive(Clone, Debug)] -pub struct VariableArgs(Vec); - -impl VariableArgs { - /// Create an empty argument list. - pub fn new() -> Self { - Self(Vec::new()) - } - - /// Add an argument to the end. - pub fn push(&mut self, v: Value) { - self.0.push(v) - } - - /// Check if the list is empty. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Convert this to a value list in `pool` with `fixed` prepended. - pub fn into_value_list(self, fixed: &[Value], pool: &mut ValueListPool) -> ValueList { - let mut vlist = ValueList::default(); - vlist.extend(fixed.iter().cloned(), pool); - vlist.extend(self.0, pool); - vlist - } -} - -// Coerce `VariableArgs` into a `&[Value]` slice. -impl Deref for VariableArgs { - type Target = [Value]; - - fn deref(&self) -> &[Value] { - &self.0 - } -} - -impl DerefMut for VariableArgs { - fn deref_mut(&mut self) -> &mut [Value] { - &mut self.0 - } -} - -impl Display for VariableArgs { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - for (i, val) in self.0.iter().enumerate() { - if i == 0 { - write!(fmt, "{}", val)?; - } else { - write!(fmt, ", {}", val)?; - } - } - Ok(()) - } -} - -impl Default for VariableArgs { - fn default() -> Self { - Self::new() - } -} - -/// Analyzing an instruction. -/// -/// Avoid large matches on instruction formats by using the methods defined here to examine -/// instructions. -impl InstructionData { - /// Return information about the destination of a branch or jump instruction. - /// - /// Any instruction that can transfer control to another block reveals its possible destinations - /// here. - pub fn analyze_branch<'a>(&'a self, pool: &'a ValueListPool) -> BranchInfo<'a> { - match *self { - Self::Jump { - destination, - ref args, - .. - } => BranchInfo::SingleDest(destination, args.as_slice(pool)), - Self::BranchInt { - destination, - ref args, - .. - } - | Self::BranchFloat { - destination, - ref args, - .. - } - | Self::Branch { - destination, - ref args, - .. - } => BranchInfo::SingleDest(destination, &args.as_slice(pool)[1..]), - Self::BranchIcmp { - destination, - ref args, - .. - } => BranchInfo::SingleDest(destination, &args.as_slice(pool)[2..]), - Self::BranchTable { - table, destination, .. - } => BranchInfo::Table(table, Some(destination)), - _ => { - debug_assert!(!self.opcode().is_branch()); - BranchInfo::NotABranch - } - } - } - - /// Get the single destination of this branch instruction, if it is a single destination - /// branch or jump. - /// - /// Multi-destination branches like `br_table` return `None`. - pub fn branch_destination(&self) -> Option { - match *self { - Self::Jump { destination, .. } - | Self::Branch { destination, .. } - | Self::BranchInt { destination, .. } - | Self::BranchFloat { destination, .. } - | Self::BranchIcmp { destination, .. } => Some(destination), - Self::BranchTable { .. } => None, - _ => { - debug_assert!(!self.opcode().is_branch()); - None - } - } - } - - /// Get a mutable reference to the single destination of this branch instruction, if it is a - /// single destination branch or jump. - /// - /// Multi-destination branches like `br_table` return `None`. - pub fn branch_destination_mut(&mut self) -> Option<&mut Block> { - match *self { - Self::Jump { - ref mut destination, - .. - } - | Self::Branch { - ref mut destination, - .. - } - | Self::BranchInt { - ref mut destination, - .. - } - | Self::BranchFloat { - ref mut destination, - .. - } - | Self::BranchIcmp { - ref mut destination, - .. - } => Some(destination), - Self::BranchTable { .. } => None, - _ => { - debug_assert!(!self.opcode().is_branch()); - None - } - } - } - - /// Return the value of an immediate if the instruction has one or `None` otherwise. Only - /// immediate values are considered, not global values, constant handles, condition codes, etc. - pub fn imm_value(&self) -> Option { - match self { - &InstructionData::UnaryBool { imm, .. } => Some(DataValue::from(imm)), - // 8-bit. - &InstructionData::BinaryImm8 { imm, .. } - | &InstructionData::TernaryImm8 { imm, .. } => Some(DataValue::from(imm as i8)), // Note the switch from unsigned to signed. - // 32-bit - &InstructionData::UnaryIeee32 { imm, .. } => Some(DataValue::from(imm)), - &InstructionData::HeapAddr { imm, .. } => { - let imm: u32 = imm.into(); - Some(DataValue::from(imm as i32)) // Note the switch from unsigned to signed. - } - &InstructionData::Load { offset, .. } - | &InstructionData::LoadComplex { offset, .. } - | &InstructionData::Store { offset, .. } - | &InstructionData::StoreComplex { offset, .. } - | &InstructionData::StackLoad { offset, .. } - | &InstructionData::StackStore { offset, .. } - | &InstructionData::TableAddr { offset, .. } => Some(DataValue::from(offset)), - // 64-bit. - &InstructionData::UnaryImm { imm, .. } - | &InstructionData::BinaryImm64 { imm, .. } - | &InstructionData::IntCompareImm { imm, .. } => Some(DataValue::from(imm.bits())), - &InstructionData::UnaryIeee64 { imm, .. } => Some(DataValue::from(imm)), - // 128-bit; though these immediates are present logically in the IR they are not - // included in the `InstructionData` for memory-size reasons. This case, returning - // `None`, is left here to alert users of this method that they should retrieve the - // value using the `DataFlowGraph`. - &InstructionData::Shuffle { imm: _, .. } => None, - _ => None, - } - } - - /// If this is a trapping instruction, get its trap code. Otherwise, return - /// `None`. - pub fn trap_code(&self) -> Option { - match *self { - Self::CondTrap { code, .. } - | Self::FloatCondTrap { code, .. } - | Self::IntCondTrap { code, .. } - | Self::Trap { code, .. } => Some(code), - _ => None, - } - } - - /// If this is a control-flow instruction depending on an integer condition, gets its - /// condition. Otherwise, return `None`. - pub fn cond_code(&self) -> Option { - match self { - &InstructionData::IntCond { cond, .. } - | &InstructionData::BranchIcmp { cond, .. } - | &InstructionData::IntCompare { cond, .. } - | &InstructionData::IntCondTrap { cond, .. } - | &InstructionData::BranchInt { cond, .. } - | &InstructionData::IntSelect { cond, .. } - | &InstructionData::IntCompareImm { cond, .. } => Some(cond), - _ => None, - } - } - - /// If this is a control-flow instruction depending on a floating-point condition, gets its - /// condition. Otherwise, return `None`. - pub fn fp_cond_code(&self) -> Option { - match self { - &InstructionData::BranchFloat { cond, .. } - | &InstructionData::FloatCompare { cond, .. } - | &InstructionData::FloatCond { cond, .. } - | &InstructionData::FloatCondTrap { cond, .. } => Some(cond), - _ => None, - } - } - - /// If this is a trapping instruction, get an exclusive reference to its - /// trap code. Otherwise, return `None`. - pub fn trap_code_mut(&mut self) -> Option<&mut TrapCode> { - match self { - Self::CondTrap { code, .. } - | Self::FloatCondTrap { code, .. } - | Self::IntCondTrap { code, .. } - | Self::Trap { code, .. } => Some(code), - _ => None, - } - } - - /// If this is an atomic read/modify/write instruction, return its subopcode. - pub fn atomic_rmw_op(&self) -> Option { - match self { - &InstructionData::AtomicRmw { op, .. } => Some(op), - _ => None, - } - } - - /// If this is a load/store instruction, returns its immediate offset. - pub fn load_store_offset(&self) -> Option { - match self { - &InstructionData::Load { offset, .. } - | &InstructionData::StackLoad { offset, .. } - | &InstructionData::LoadComplex { offset, .. } - | &InstructionData::Store { offset, .. } - | &InstructionData::StackStore { offset, .. } - | &InstructionData::StoreComplex { offset, .. } => Some(offset.into()), - _ => None, - } - } - - /// If this is a load/store instruction, return its memory flags. - pub fn memflags(&self) -> Option { - match self { - &InstructionData::Load { flags, .. } - | &InstructionData::LoadComplex { flags, .. } - | &InstructionData::LoadNoOffset { flags, .. } - | &InstructionData::Store { flags, .. } - | &InstructionData::StoreComplex { flags, .. } - | &InstructionData::StoreNoOffset { flags, .. } => Some(flags), - _ => None, - } - } - - /// If this instruction references a stack slot, return it - pub fn stack_slot(&self) -> Option { - match self { - &InstructionData::StackStore { stack_slot, .. } - | &InstructionData::StackLoad { stack_slot, .. } => Some(stack_slot), - _ => None, - } - } - - /// Return information about a call instruction. - /// - /// Any instruction that can call another function reveals its call signature here. - pub fn analyze_call<'a>(&'a self, pool: &'a ValueListPool) -> CallInfo<'a> { - match *self { - Self::Call { - func_ref, ref args, .. - } => CallInfo::Direct(func_ref, args.as_slice(pool)), - Self::CallIndirect { - sig_ref, ref args, .. - } => CallInfo::Indirect(sig_ref, &args.as_slice(pool)[1..]), - _ => { - debug_assert!(!self.opcode().is_call()); - CallInfo::NotACall - } - } - } - - #[inline] - pub(crate) fn sign_extend_immediates(&mut self, ctrl_typevar: Type) { - if ctrl_typevar.is_invalid() { - return; - } - - let bit_width = ctrl_typevar.bits(); - - match self { - Self::BinaryImm64 { - opcode, - arg: _, - imm, - } => { - if *opcode == Opcode::SdivImm || *opcode == Opcode::SremImm { - imm.sign_extend_from_width(bit_width); - } - } - Self::IntCompareImm { - opcode, - arg: _, - cond, - imm, - } => { - debug_assert_eq!(*opcode, Opcode::IcmpImm); - if cond.unsigned() != *cond { - imm.sign_extend_from_width(bit_width); - } - } - _ => {} - } - } -} - -/// Information about branch and jump instructions. -pub enum BranchInfo<'a> { - /// This is not a branch or jump instruction. - /// This instruction will not transfer control to another block in the function, but it may still - /// affect control flow by returning or trapping. - NotABranch, - - /// This is a branch or jump to a single destination block, possibly taking value arguments. - SingleDest(Block, &'a [Value]), - - /// This is a jump table branch which can have many destination blocks and maybe one default block. - Table(JumpTable, Option), -} - -/// Information about call instructions. -pub enum CallInfo<'a> { - /// This is not a call instruction. - NotACall, - - /// This is a direct call to an external function declared in the preamble. See - /// `DataFlowGraph.ext_funcs`. - Direct(FuncRef, &'a [Value]), - - /// This is an indirect call with the specified signature. See `DataFlowGraph.signatures`. - Indirect(SigRef, &'a [Value]), -} - -/// Value type constraints for a given opcode. -/// -/// The `InstructionFormat` determines the constraints on most operands, but `Value` operands and -/// results are not determined by the format. Every `Opcode` has an associated -/// `OpcodeConstraints` object that provides the missing details. -#[derive(Clone, Copy)] -pub struct OpcodeConstraints { - /// Flags for this opcode encoded as a bit field: - /// - /// Bits 0-2: - /// Number of fixed result values. This does not include `variable_args` results as are - /// produced by call instructions. - /// - /// Bit 3: - /// This opcode is polymorphic and the controlling type variable can be inferred from the - /// designated input operand. This is the `typevar_operand` index given to the - /// `InstructionFormat` meta language object. When this bit is not set, the controlling - /// type variable must be the first output value instead. - /// - /// Bit 4: - /// This opcode is polymorphic and the controlling type variable does *not* appear as the - /// first result type. - /// - /// Bits 5-7: - /// Number of fixed value arguments. The minimum required number of value operands. - flags: u8, - - /// Permitted set of types for the controlling type variable as an index into `TYPE_SETS`. - typeset_offset: u8, - - /// Offset into `OPERAND_CONSTRAINT` table of the descriptors for this opcode. The first - /// `num_fixed_results()` entries describe the result constraints, then follows constraints for - /// the fixed `Value` input operands. (`num_fixed_value_arguments()` of them). - constraint_offset: u16, -} - -impl OpcodeConstraints { - /// Can the controlling type variable for this opcode be inferred from the designated value - /// input operand? - /// This also implies that this opcode is polymorphic. - pub fn use_typevar_operand(self) -> bool { - (self.flags & 0x8) != 0 - } - - /// Is it necessary to look at the designated value input operand in order to determine the - /// controlling type variable, or is it good enough to use the first return type? - /// - /// Most polymorphic instructions produce a single result with the type of the controlling type - /// variable. A few polymorphic instructions either don't produce any results, or produce - /// results with a fixed type. These instructions return `true`. - pub fn requires_typevar_operand(self) -> bool { - (self.flags & 0x10) != 0 - } - - /// Get the number of *fixed* result values produced by this opcode. - /// This does not include `variable_args` produced by calls. - pub fn num_fixed_results(self) -> usize { - (self.flags & 0x7) as usize - } - - /// Get the number of *fixed* input values required by this opcode. - /// - /// This does not include `variable_args` arguments on call and branch instructions. - /// - /// The number of fixed input values is usually implied by the instruction format, but - /// instruction formats that use a `ValueList` put both fixed and variable arguments in the - /// list. This method returns the *minimum* number of values required in the value list. - pub fn num_fixed_value_arguments(self) -> usize { - ((self.flags >> 5) & 0x7) as usize - } - - /// Get the offset into `TYPE_SETS` for the controlling type variable. - /// Returns `None` if the instruction is not polymorphic. - fn typeset_offset(self) -> Option { - let offset = usize::from(self.typeset_offset); - if offset < TYPE_SETS.len() { - Some(offset) - } else { - None - } - } - - /// Get the offset into OPERAND_CONSTRAINTS where the descriptors for this opcode begin. - fn constraint_offset(self) -> usize { - self.constraint_offset as usize - } - - /// Get the value type of result number `n`, having resolved the controlling type variable to - /// `ctrl_type`. - pub fn result_type(self, n: usize, ctrl_type: Type) -> Type { - debug_assert!(n < self.num_fixed_results(), "Invalid result index"); - if let ResolvedConstraint::Bound(t) = - OPERAND_CONSTRAINTS[self.constraint_offset() + n].resolve(ctrl_type) - { - t - } else { - panic!("Result constraints can't be free"); - } - } - - /// Get the value type of input value number `n`, having resolved the controlling type variable - /// to `ctrl_type`. - /// - /// Unlike results, it is possible for some input values to vary freely within a specific - /// `ValueTypeSet`. This is represented with the `ArgumentConstraint::Free` variant. - pub fn value_argument_constraint(self, n: usize, ctrl_type: Type) -> ResolvedConstraint { - debug_assert!( - n < self.num_fixed_value_arguments(), - "Invalid value argument index" - ); - let offset = self.constraint_offset() + self.num_fixed_results(); - OPERAND_CONSTRAINTS[offset + n].resolve(ctrl_type) - } - - /// Get the typeset of allowed types for the controlling type variable in a polymorphic - /// instruction. - pub fn ctrl_typeset(self) -> Option { - self.typeset_offset().map(|offset| TYPE_SETS[offset]) - } - - /// Is this instruction polymorphic? - pub fn is_polymorphic(self) -> bool { - self.ctrl_typeset().is_some() - } -} - -type BitSet8 = BitSet; -type BitSet16 = BitSet; - -/// A value type set describes the permitted set of types for a type variable. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct ValueTypeSet { - /// Allowed lane sizes - pub lanes: BitSet16, - /// Allowed int widths - pub ints: BitSet8, - /// Allowed float widths - pub floats: BitSet8, - /// Allowed bool widths - pub bools: BitSet8, - /// Allowed ref widths - pub refs: BitSet8, -} - -impl ValueTypeSet { - /// Is `scalar` part of the base type set? - /// - /// Note that the base type set does not have to be included in the type set proper. - fn is_base_type(self, scalar: Type) -> bool { - let l2b = scalar.log2_lane_bits(); - if scalar.is_int() { - self.ints.contains(l2b) - } else if scalar.is_float() { - self.floats.contains(l2b) - } else if scalar.is_bool() { - self.bools.contains(l2b) - } else if scalar.is_ref() { - self.refs.contains(l2b) - } else { - false - } - } - - /// Does `typ` belong to this set? - pub fn contains(self, typ: Type) -> bool { - let l2l = typ.log2_lane_count(); - self.lanes.contains(l2l) && self.is_base_type(typ.lane_type()) - } - - /// Get an example member of this type set. - /// - /// This is used for error messages to avoid suggesting invalid types. - pub fn example(self) -> Type { - let t = if self.ints.max().unwrap_or(0) > 5 { - types::I32 - } else if self.floats.max().unwrap_or(0) > 5 { - types::F32 - } else if self.bools.max().unwrap_or(0) > 5 { - types::B32 - } else { - types::B1 - }; - t.by(1 << self.lanes.min().unwrap()).unwrap() - } -} - -/// Operand constraints. This describes the value type constraints on a single `Value` operand. -enum OperandConstraint { - /// This operand has a concrete value type. - Concrete(Type), - - /// This operand can vary freely within the given type set. - /// The type set is identified by its index into the TYPE_SETS constant table. - Free(u8), - - /// This operand is the same type as the controlling type variable. - Same, - - /// This operand is `ctrlType.lane_of()`. - LaneOf, - - /// This operand is `ctrlType.as_bool()`. - AsBool, - - /// This operand is `ctrlType.half_width()`. - HalfWidth, - - /// This operand is `ctrlType.double_width()`. - DoubleWidth, - - /// This operand is `ctrlType.half_vector()`. - HalfVector, - - /// This operand is `ctrlType.double_vector()`. - DoubleVector, - - /// This operand is `ctrlType.split_lanes()`. - SplitLanes, - - /// This operand is `ctrlType.merge_lanes()`. - MergeLanes, -} - -impl OperandConstraint { - /// Resolve this operand constraint into a concrete value type, given the value of the - /// controlling type variable. - pub fn resolve(&self, ctrl_type: Type) -> ResolvedConstraint { - use self::OperandConstraint::*; - use self::ResolvedConstraint::Bound; - match *self { - Concrete(t) => Bound(t), - Free(vts) => ResolvedConstraint::Free(TYPE_SETS[vts as usize]), - Same => Bound(ctrl_type), - LaneOf => Bound(ctrl_type.lane_of()), - AsBool => Bound(ctrl_type.as_bool()), - HalfWidth => Bound(ctrl_type.half_width().expect("invalid type for half_width")), - DoubleWidth => Bound( - ctrl_type - .double_width() - .expect("invalid type for double_width"), - ), - HalfVector => Bound( - ctrl_type - .half_vector() - .expect("invalid type for half_vector"), - ), - DoubleVector => Bound(ctrl_type.by(2).expect("invalid type for double_vector")), - SplitLanes => Bound( - ctrl_type - .split_lanes() - .expect("invalid type for split_lanes"), - ), - MergeLanes => Bound( - ctrl_type - .merge_lanes() - .expect("invalid type for merge_lanes"), - ), - } - } -} - -/// The type constraint on a value argument once the controlling type variable is known. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum ResolvedConstraint { - /// The operand is bound to a known type. - Bound(Type), - /// The operand type can vary freely within the given set. - Free(ValueTypeSet), -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::string::ToString; - - #[test] - fn opcodes() { - use core::mem; - - let x = Opcode::Iadd; - let mut y = Opcode::Isub; - - assert!(x != y); - y = Opcode::Iadd; - assert_eq!(x, y); - assert_eq!(x.format(), InstructionFormat::Binary); - - assert_eq!(format!("{:?}", Opcode::IaddImm), "IaddImm"); - assert_eq!(Opcode::IaddImm.to_string(), "iadd_imm"); - - // Check the matcher. - assert_eq!("iadd".parse::(), Ok(Opcode::Iadd)); - assert_eq!("iadd_imm".parse::(), Ok(Opcode::IaddImm)); - assert_eq!("iadd\0".parse::(), Err("Unknown opcode")); - assert_eq!("".parse::(), Err("Unknown opcode")); - assert_eq!("\0".parse::(), Err("Unknown opcode")); - - // Opcode is a single byte, and because Option originally came to 2 bytes, early on - // Opcode included a variant NotAnOpcode to avoid the unnecessary bloat. Since then the Rust - // compiler has brought in NonZero optimization, meaning that an enum not using the 0 value - // can be optional for no size cost. We want to ensure Option remains small. - assert_eq!(mem::size_of::(), mem::size_of::>()); - } - - #[test] - fn instruction_data() { - use core::mem; - // The size of the `InstructionData` enum is important for performance. It should not - // exceed 16 bytes. Use `Box` out-of-line payloads for instruction formats that - // require more space than that. It would be fine with a data structure smaller than 16 - // bytes, but what are the odds of that? - assert_eq!(mem::size_of::(), 16); - } - - #[test] - fn constraints() { - let a = Opcode::Iadd.constraints(); - assert!(a.use_typevar_operand()); - assert!(!a.requires_typevar_operand()); - assert_eq!(a.num_fixed_results(), 1); - assert_eq!(a.num_fixed_value_arguments(), 2); - assert_eq!(a.result_type(0, types::I32), types::I32); - assert_eq!(a.result_type(0, types::I8), types::I8); - assert_eq!( - a.value_argument_constraint(0, types::I32), - ResolvedConstraint::Bound(types::I32) - ); - assert_eq!( - a.value_argument_constraint(1, types::I32), - ResolvedConstraint::Bound(types::I32) - ); - - let b = Opcode::Bitcast.constraints(); - assert!(!b.use_typevar_operand()); - assert!(!b.requires_typevar_operand()); - assert_eq!(b.num_fixed_results(), 1); - assert_eq!(b.num_fixed_value_arguments(), 1); - assert_eq!(b.result_type(0, types::I32), types::I32); - assert_eq!(b.result_type(0, types::I8), types::I8); - match b.value_argument_constraint(0, types::I32) { - ResolvedConstraint::Free(vts) => assert!(vts.contains(types::F32)), - _ => panic!("Unexpected constraint from value_argument_constraint"), - } - - let c = Opcode::Call.constraints(); - assert_eq!(c.num_fixed_results(), 0); - assert_eq!(c.num_fixed_value_arguments(), 0); - - let i = Opcode::CallIndirect.constraints(); - assert_eq!(i.num_fixed_results(), 0); - assert_eq!(i.num_fixed_value_arguments(), 1); - - let cmp = Opcode::Icmp.constraints(); - assert!(cmp.use_typevar_operand()); - assert!(cmp.requires_typevar_operand()); - assert_eq!(cmp.num_fixed_results(), 1); - assert_eq!(cmp.num_fixed_value_arguments(), 2); - } - - #[test] - fn value_set() { - use crate::ir::types::*; - - let vts = ValueTypeSet { - lanes: BitSet16::from_range(0, 8), - ints: BitSet8::from_range(4, 7), - floats: BitSet8::from_range(0, 0), - bools: BitSet8::from_range(3, 7), - refs: BitSet8::from_range(5, 7), - }; - assert!(!vts.contains(I8)); - assert!(vts.contains(I32)); - assert!(vts.contains(I64)); - assert!(vts.contains(I32X4)); - assert!(!vts.contains(F32)); - assert!(!vts.contains(B1)); - assert!(vts.contains(B8)); - assert!(vts.contains(B64)); - assert!(vts.contains(R32)); - assert!(vts.contains(R64)); - assert_eq!(vts.example().to_string(), "i32"); - - let vts = ValueTypeSet { - lanes: BitSet16::from_range(0, 8), - ints: BitSet8::from_range(0, 0), - floats: BitSet8::from_range(5, 7), - bools: BitSet8::from_range(3, 7), - refs: BitSet8::from_range(0, 0), - }; - assert_eq!(vts.example().to_string(), "f32"); - - let vts = ValueTypeSet { - lanes: BitSet16::from_range(1, 8), - ints: BitSet8::from_range(0, 0), - floats: BitSet8::from_range(5, 7), - bools: BitSet8::from_range(3, 7), - refs: BitSet8::from_range(0, 0), - }; - assert_eq!(vts.example().to_string(), "f32x2"); - - let vts = ValueTypeSet { - lanes: BitSet16::from_range(2, 8), - ints: BitSet8::from_range(0, 0), - floats: BitSet8::from_range(0, 0), - bools: BitSet8::from_range(3, 7), - refs: BitSet8::from_range(0, 0), - }; - assert!(!vts.contains(B32X2)); - assert!(vts.contains(B32X4)); - assert_eq!(vts.example().to_string(), "b32x4"); - - let vts = ValueTypeSet { - // TypeSet(lanes=(1, 256), ints=(8, 64)) - lanes: BitSet16::from_range(0, 9), - ints: BitSet8::from_range(3, 7), - floats: BitSet8::from_range(0, 0), - bools: BitSet8::from_range(0, 0), - refs: BitSet8::from_range(0, 0), - }; - assert!(vts.contains(I32)); - assert!(vts.contains(I32X4)); - assert!(!vts.contains(R32)); - assert!(!vts.contains(R64)); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/jumptable.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/jumptable.rs deleted file mode 100644 index bf05169d3..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/jumptable.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! Jump table representation. -//! -//! Jump tables are declared in the preamble and assigned an `ir::entities::JumpTable` reference. -//! The actual table of destinations is stored in a `JumpTableData` struct defined in this module. - -use crate::ir::entities::Block; -use alloc::vec::Vec; -use core::fmt::{self, Display, Formatter}; -use core::slice::{Iter, IterMut}; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Contents of a jump table. -/// -/// All jump tables use 0-based indexing and are densely populated. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct JumpTableData { - // Table entries. - table: Vec, -} - -impl JumpTableData { - /// Create a new empty jump table. - pub fn new() -> Self { - Self { table: Vec::new() } - } - - /// Create a new empty jump table with the specified capacity. - pub fn with_capacity(capacity: usize) -> Self { - Self { - table: Vec::with_capacity(capacity), - } - } - - /// Get the number of table entries. - pub fn len(&self) -> usize { - self.table.len() - } - - /// Append a table entry. - pub fn push_entry(&mut self, dest: Block) { - self.table.push(dest) - } - - /// Checks if any of the entries branch to `block`. - pub fn branches_to(&self, block: Block) -> bool { - self.table.iter().any(|target_block| *target_block == block) - } - - /// Access the whole table as a slice. - pub fn as_slice(&self) -> &[Block] { - self.table.as_slice() - } - - /// Access the whole table as a mutable slice. - pub fn as_mut_slice(&mut self) -> &mut [Block] { - self.table.as_mut_slice() - } - - /// Returns an iterator over the table. - pub fn iter(&self) -> Iter { - self.table.iter() - } - - /// Returns an iterator that allows modifying each value. - pub fn iter_mut(&mut self) -> IterMut { - self.table.iter_mut() - } - - /// Clears all entries in this jump table. - pub fn clear(&mut self) { - self.table.clear(); - } -} - -impl Display for JumpTableData { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "jump_table [")?; - match self.table.first() { - None => (), - Some(first) => write!(fmt, "{}", first)?, - } - for block in self.table.iter().skip(1) { - write!(fmt, ", {}", block)?; - } - write!(fmt, "]") - } -} - -#[cfg(test)] -mod tests { - use super::JumpTableData; - use crate::entity::EntityRef; - use crate::ir::Block; - use alloc::string::ToString; - - #[test] - fn empty() { - let jt = JumpTableData::new(); - - assert_eq!(jt.as_slice().get(0), None); - assert_eq!(jt.as_slice().get(10), None); - - assert_eq!(jt.to_string(), "jump_table []"); - - let v = jt.as_slice(); - assert_eq!(v, []); - } - - #[test] - fn insert() { - let e1 = Block::new(1); - let e2 = Block::new(2); - - let mut jt = JumpTableData::new(); - - jt.push_entry(e1); - jt.push_entry(e2); - jt.push_entry(e1); - - assert_eq!(jt.to_string(), "jump_table [block1, block2, block1]"); - - let v = jt.as_slice(); - assert_eq!(v, [e1, e2, e1]); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/layout.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/layout.rs deleted file mode 100644 index 47ec6b1fd..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/layout.rs +++ /dev/null @@ -1,1324 +0,0 @@ -//! Function layout. -//! -//! The order of basic blocks in a function and the order of instructions in a block is -//! determined by the `Layout` data structure defined in this module. - -use crate::entity::SecondaryMap; -use crate::ir::dfg::DataFlowGraph; -use crate::ir::progpoint::{ExpandedProgramPoint, ProgramOrder}; -use crate::ir::{Block, Inst}; -use crate::packed_option::PackedOption; -use crate::timing; -use core::cmp; -use core::iter::{IntoIterator, Iterator}; - -/// The `Layout` struct determines the layout of blocks and instructions in a function. It does not -/// contain definitions of instructions or blocks, but depends on `Inst` and `Block` entity references -/// being defined elsewhere. -/// -/// This data structure determines: -/// -/// - The order of blocks in the function. -/// - Which block contains a given instruction. -/// - The order of instructions with a block. -/// -/// While data dependencies are not recorded, instruction ordering does affect control -/// dependencies, so part of the semantics of the program are determined by the layout. -/// -#[derive(Clone)] -pub struct Layout { - /// Linked list nodes for the layout order of blocks Forms a doubly linked list, terminated in - /// both ends by `None`. - blocks: SecondaryMap, - - /// Linked list nodes for the layout order of instructions. Forms a double linked list per block, - /// terminated in both ends by `None`. - insts: SecondaryMap, - - /// First block in the layout order, or `None` when no blocks have been laid out. - first_block: Option, - - /// Last block in the layout order, or `None` when no blocks have been laid out. - last_block: Option, -} - -impl Layout { - /// Create a new empty `Layout`. - pub fn new() -> Self { - Self { - blocks: SecondaryMap::new(), - insts: SecondaryMap::new(), - first_block: None, - last_block: None, - } - } - - /// Clear the layout. - pub fn clear(&mut self) { - self.blocks.clear(); - self.insts.clear(); - self.first_block = None; - self.last_block = None; - } - - /// Returns the capacity of the `BlockData` map. - pub fn block_capacity(&self) -> usize { - self.blocks.capacity() - } -} - -/// Sequence numbers. -/// -/// All instructions and blocks are given a sequence number that can be used to quickly determine -/// their relative position in the layout. The sequence numbers are not contiguous, but are assigned -/// like line numbers in BASIC: 10, 20, 30, ... -/// -/// The block sequence numbers are strictly increasing, and so are the instruction sequence numbers -/// within a block. The instruction sequence numbers are all between the sequence number of their -/// containing block and the following block. -/// -/// The result is that sequence numbers work like BASIC line numbers for the textual form of the IR. -type SequenceNumber = u32; - -/// Initial stride assigned to new sequence numbers. -const MAJOR_STRIDE: SequenceNumber = 10; - -/// Secondary stride used when renumbering locally. -const MINOR_STRIDE: SequenceNumber = 2; - -/// Limit on the sequence number range we'll renumber locally. If this limit is exceeded, we'll -/// switch to a full function renumbering. -const LOCAL_LIMIT: SequenceNumber = 100 * MINOR_STRIDE; - -/// Compute the midpoint between `a` and `b`. -/// Return `None` if the midpoint would be equal to either. -fn midpoint(a: SequenceNumber, b: SequenceNumber) -> Option { - debug_assert!(a < b); - // Avoid integer overflow. - let m = a + (b - a) / 2; - if m > a { - Some(m) - } else { - None - } -} - -#[test] -fn test_midpoint() { - assert_eq!(midpoint(0, 1), None); - assert_eq!(midpoint(0, 2), Some(1)); - assert_eq!(midpoint(0, 3), Some(1)); - assert_eq!(midpoint(0, 4), Some(2)); - assert_eq!(midpoint(1, 4), Some(2)); - assert_eq!(midpoint(2, 4), Some(3)); - assert_eq!(midpoint(3, 4), None); - assert_eq!(midpoint(3, 4), None); -} - -impl ProgramOrder for Layout { - fn cmp(&self, a: A, b: B) -> cmp::Ordering - where - A: Into, - B: Into, - { - let a_seq = self.seq(a); - let b_seq = self.seq(b); - a_seq.cmp(&b_seq) - } - - fn is_block_gap(&self, inst: Inst, block: Block) -> bool { - let i = &self.insts[inst]; - let e = &self.blocks[block]; - - i.next.is_none() && i.block == e.prev - } -} - -// Private methods for dealing with sequence numbers. -impl Layout { - /// Get the sequence number of a program point that must correspond to an entity in the layout. - fn seq>(&self, pp: PP) -> SequenceNumber { - // When `PP = Inst` or `PP = Block`, we expect this dynamic type check to be optimized out. - match pp.into() { - ExpandedProgramPoint::Block(block) => self.blocks[block].seq, - ExpandedProgramPoint::Inst(inst) => self.insts[inst].seq, - } - } - - /// Get the last sequence number in `block`. - fn last_block_seq(&self, block: Block) -> SequenceNumber { - // Get the seq of the last instruction if it exists, otherwise use the block header seq. - self.blocks[block] - .last_inst - .map(|inst| self.insts[inst].seq) - .unwrap_or(self.blocks[block].seq) - } - - /// Assign a valid sequence number to `block` such that the numbers are still monotonic. This may - /// require renumbering. - fn assign_block_seq(&mut self, block: Block) { - debug_assert!(self.is_block_inserted(block)); - - // Get the sequence number immediately before `block`, or 0. - let prev_seq = self.blocks[block] - .prev - .map(|prev_block| self.last_block_seq(prev_block)) - .unwrap_or(0); - - // Get the sequence number immediately following `block`. - let next_seq = if let Some(inst) = self.blocks[block].first_inst.expand() { - self.insts[inst].seq - } else if let Some(next_block) = self.blocks[block].next.expand() { - self.blocks[next_block].seq - } else { - // There is nothing after `block`. We can just use a major stride. - self.blocks[block].seq = prev_seq + MAJOR_STRIDE; - return; - }; - - // Check if there is room between these sequence numbers. - if let Some(seq) = midpoint(prev_seq, next_seq) { - self.blocks[block].seq = seq; - } else { - // No available integers between `prev_seq` and `next_seq`. We have to renumber. - self.renumber_from_block(block, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT); - } - } - - /// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may - /// require renumbering. - fn assign_inst_seq(&mut self, inst: Inst) { - let block = self - .inst_block(inst) - .expect("inst must be inserted before assigning an seq"); - - // Get the sequence number immediately before `inst`. - let prev_seq = match self.insts[inst].prev.expand() { - Some(prev_inst) => self.insts[prev_inst].seq, - None => self.blocks[block].seq, - }; - - // Get the sequence number immediately following `inst`. - let next_seq = if let Some(next_inst) = self.insts[inst].next.expand() { - self.insts[next_inst].seq - } else if let Some(next_block) = self.blocks[block].next.expand() { - self.blocks[next_block].seq - } else { - // There is nothing after `inst`. We can just use a major stride. - self.insts[inst].seq = prev_seq + MAJOR_STRIDE; - return; - }; - - // Check if there is room between these sequence numbers. - if let Some(seq) = midpoint(prev_seq, next_seq) { - self.insts[inst].seq = seq; - } else { - // No available integers between `prev_seq` and `next_seq`. We have to renumber. - self.renumber_from_inst(inst, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT); - } - } - - /// Renumber instructions starting from `inst` until the end of the block or until numbers catch - /// up. - /// - /// Return `None` if renumbering has caught up and the sequence is monotonic again. Otherwise - /// return the last used sequence number. - /// - /// If sequence numbers exceed `limit`, switch to a full function renumbering and return `None`. - fn renumber_insts( - &mut self, - inst: Inst, - seq: SequenceNumber, - limit: SequenceNumber, - ) -> Option { - let mut inst = inst; - let mut seq = seq; - - loop { - self.insts[inst].seq = seq; - - // Next instruction. - inst = match self.insts[inst].next.expand() { - None => return Some(seq), - Some(next) => next, - }; - - if seq < self.insts[inst].seq { - // Sequence caught up. - return None; - } - - if seq > limit { - // We're pushing too many instructions in front of us. - // Switch to a full function renumbering to make some space. - self.full_renumber(); - return None; - } - - seq += MINOR_STRIDE; - } - } - - /// Renumber starting from `block` to `seq` and continuing until the sequence numbers are - /// monotonic again. - fn renumber_from_block( - &mut self, - block: Block, - first_seq: SequenceNumber, - limit: SequenceNumber, - ) { - let mut block = block; - let mut seq = first_seq; - - loop { - self.blocks[block].seq = seq; - - // Renumber instructions in `block`. Stop when the numbers catch up. - if let Some(inst) = self.blocks[block].first_inst.expand() { - seq = match self.renumber_insts(inst, seq + MINOR_STRIDE, limit) { - Some(s) => s, - None => return, - } - } - - // Advance to the next block. - block = match self.blocks[block].next.expand() { - Some(next) => next, - None => return, - }; - - // Stop renumbering once the numbers catch up. - if seq < self.blocks[block].seq { - return; - } - - seq += MINOR_STRIDE; - } - } - - /// Renumber starting from `inst` to `seq` and continuing until the sequence numbers are - /// monotonic again. - fn renumber_from_inst(&mut self, inst: Inst, first_seq: SequenceNumber, limit: SequenceNumber) { - if let Some(seq) = self.renumber_insts(inst, first_seq, limit) { - // Renumbering spills over into next block. - if let Some(next_block) = self.blocks[self.inst_block(inst).unwrap()].next.expand() { - self.renumber_from_block(next_block, seq + MINOR_STRIDE, limit); - } - } - } - - /// Renumber all blocks and instructions in the layout. - /// - /// This doesn't affect the position of anything, but it gives more room in the internal - /// sequence numbers for inserting instructions later. - fn full_renumber(&mut self) { - let _tt = timing::layout_renumber(); - let mut seq = 0; - let mut next_block = self.first_block; - while let Some(block) = next_block { - self.blocks[block].seq = seq; - seq += MAJOR_STRIDE; - next_block = self.blocks[block].next.expand(); - - let mut next_inst = self.blocks[block].first_inst.expand(); - while let Some(inst) = next_inst { - self.insts[inst].seq = seq; - seq += MAJOR_STRIDE; - next_inst = self.insts[inst].next.expand(); - } - } - log::trace!("Renumbered {} program points", seq / MAJOR_STRIDE); - } -} - -/// Methods for laying out blocks. -/// -/// An unknown block starts out as *not inserted* in the block layout. The layout is a linear order of -/// inserted blocks. Once a block has been inserted in the layout, instructions can be added. A block -/// can only be removed from the layout when it is empty. -/// -/// Since every block must end with a terminator instruction which cannot fall through, the layout of -/// blocks do not affect the semantics of the program. -/// -impl Layout { - /// Is `block` currently part of the layout? - pub fn is_block_inserted(&self, block: Block) -> bool { - Some(block) == self.first_block || self.blocks[block].prev.is_some() - } - - /// Insert `block` as the last block in the layout. - pub fn append_block(&mut self, block: Block) { - debug_assert!( - !self.is_block_inserted(block), - "Cannot append block that is already in the layout" - ); - { - let node = &mut self.blocks[block]; - debug_assert!(node.first_inst.is_none() && node.last_inst.is_none()); - node.prev = self.last_block.into(); - node.next = None.into(); - } - if let Some(last) = self.last_block { - self.blocks[last].next = block.into(); - } else { - self.first_block = Some(block); - } - self.last_block = Some(block); - self.assign_block_seq(block); - } - - /// Insert `block` in the layout before the existing block `before`. - pub fn insert_block(&mut self, block: Block, before: Block) { - debug_assert!( - !self.is_block_inserted(block), - "Cannot insert block that is already in the layout" - ); - debug_assert!( - self.is_block_inserted(before), - "block Insertion point not in the layout" - ); - let after = self.blocks[before].prev; - { - let node = &mut self.blocks[block]; - node.next = before.into(); - node.prev = after; - } - self.blocks[before].prev = block.into(); - match after.expand() { - None => self.first_block = Some(block), - Some(a) => self.blocks[a].next = block.into(), - } - self.assign_block_seq(block); - } - - /// Insert `block` in the layout *after* the existing block `after`. - pub fn insert_block_after(&mut self, block: Block, after: Block) { - debug_assert!( - !self.is_block_inserted(block), - "Cannot insert block that is already in the layout" - ); - debug_assert!( - self.is_block_inserted(after), - "block Insertion point not in the layout" - ); - let before = self.blocks[after].next; - { - let node = &mut self.blocks[block]; - node.next = before; - node.prev = after.into(); - } - self.blocks[after].next = block.into(); - match before.expand() { - None => self.last_block = Some(block), - Some(b) => self.blocks[b].prev = block.into(), - } - self.assign_block_seq(block); - } - - /// Remove `block` from the layout. - pub fn remove_block(&mut self, block: Block) { - debug_assert!(self.is_block_inserted(block), "block not in the layout"); - debug_assert!(self.first_inst(block).is_none(), "block must be empty."); - - // Clear the `block` node and extract links. - let prev; - let next; - { - let n = &mut self.blocks[block]; - prev = n.prev; - next = n.next; - n.prev = None.into(); - n.next = None.into(); - } - // Fix up links to `block`. - match prev.expand() { - None => self.first_block = next.expand(), - Some(p) => self.blocks[p].next = next, - } - match next.expand() { - None => self.last_block = prev.expand(), - Some(n) => self.blocks[n].prev = prev, - } - } - - /// Return an iterator over all blocks in layout order. - pub fn blocks(&self) -> Blocks { - Blocks { - layout: self, - next: self.first_block, - } - } - - /// Get the function's entry block. - /// This is simply the first block in the layout order. - pub fn entry_block(&self) -> Option { - self.first_block - } - - /// Get the last block in the layout. - pub fn last_block(&self) -> Option { - self.last_block - } - - /// Get the block preceding `block` in the layout order. - pub fn prev_block(&self, block: Block) -> Option { - self.blocks[block].prev.expand() - } - - /// Get the block following `block` in the layout order. - pub fn next_block(&self, block: Block) -> Option { - self.blocks[block].next.expand() - } - - /// Mark a block as "cold". - /// - /// This will try to move it out of the ordinary path of execution - /// when lowered to machine code. - pub fn set_cold(&mut self, block: Block) { - self.blocks[block].cold = true; - } - - /// Is the given block cold? - pub fn is_cold(&self, block: Block) -> bool { - self.blocks[block].cold - } -} - -#[derive(Clone, Debug, Default)] -struct BlockNode { - prev: PackedOption, - next: PackedOption, - first_inst: PackedOption, - last_inst: PackedOption, - seq: SequenceNumber, - cold: bool, -} - -/// Iterate over blocks in layout order. See [crate::ir::layout::Layout::blocks]. -pub struct Blocks<'f> { - layout: &'f Layout, - next: Option, -} - -impl<'f> Iterator for Blocks<'f> { - type Item = Block; - - fn next(&mut self) -> Option { - match self.next { - Some(block) => { - self.next = self.layout.next_block(block); - Some(block) - } - None => None, - } - } -} - -/// Use a layout reference in a for loop. -impl<'f> IntoIterator for &'f Layout { - type Item = Block; - type IntoIter = Blocks<'f>; - - fn into_iter(self) -> Blocks<'f> { - self.blocks() - } -} - -/// Methods for arranging instructions. -/// -/// An instruction starts out as *not inserted* in the layout. An instruction can be inserted into -/// a block at a given position. -impl Layout { - /// Get the block containing `inst`, or `None` if `inst` is not inserted in the layout. - pub fn inst_block(&self, inst: Inst) -> Option { - self.insts[inst].block.into() - } - - /// Get the block containing the program point `pp`. Panic if `pp` is not in the layout. - pub fn pp_block(&self, pp: PP) -> Block - where - PP: Into, - { - match pp.into() { - ExpandedProgramPoint::Block(block) => block, - ExpandedProgramPoint::Inst(inst) => { - self.inst_block(inst).expect("Program point not in layout") - } - } - } - - /// Append `inst` to the end of `block`. - pub fn append_inst(&mut self, inst: Inst, block: Block) { - debug_assert_eq!(self.inst_block(inst), None); - debug_assert!( - self.is_block_inserted(block), - "Cannot append instructions to block not in layout" - ); - { - let block_node = &mut self.blocks[block]; - { - let inst_node = &mut self.insts[inst]; - inst_node.block = block.into(); - inst_node.prev = block_node.last_inst; - debug_assert!(inst_node.next.is_none()); - } - if block_node.first_inst.is_none() { - block_node.first_inst = inst.into(); - } else { - self.insts[block_node.last_inst.unwrap()].next = inst.into(); - } - block_node.last_inst = inst.into(); - } - self.assign_inst_seq(inst); - } - - /// Fetch a block's first instruction. - pub fn first_inst(&self, block: Block) -> Option { - self.blocks[block].first_inst.into() - } - - /// Fetch a block's last instruction. - pub fn last_inst(&self, block: Block) -> Option { - self.blocks[block].last_inst.into() - } - - /// Fetch the instruction following `inst`. - pub fn next_inst(&self, inst: Inst) -> Option { - self.insts[inst].next.expand() - } - - /// Fetch the instruction preceding `inst`. - pub fn prev_inst(&self, inst: Inst) -> Option { - self.insts[inst].prev.expand() - } - - /// Fetch the first instruction in a block's terminal branch group. - pub fn canonical_branch_inst(&self, dfg: &DataFlowGraph, block: Block) -> Option { - // Basic blocks permit at most two terminal branch instructions. - // If two, the former is conditional and the latter is unconditional. - let last = self.last_inst(block)?; - if let Some(prev) = self.prev_inst(last) { - if dfg[prev].opcode().is_branch() { - return Some(prev); - } - } - Some(last) - } - - /// Insert `inst` before the instruction `before` in the same block. - pub fn insert_inst(&mut self, inst: Inst, before: Inst) { - debug_assert_eq!(self.inst_block(inst), None); - let block = self - .inst_block(before) - .expect("Instruction before insertion point not in the layout"); - let after = self.insts[before].prev; - { - let inst_node = &mut self.insts[inst]; - inst_node.block = block.into(); - inst_node.next = before.into(); - inst_node.prev = after; - } - self.insts[before].prev = inst.into(); - match after.expand() { - None => self.blocks[block].first_inst = inst.into(), - Some(a) => self.insts[a].next = inst.into(), - } - self.assign_inst_seq(inst); - } - - /// Remove `inst` from the layout. - pub fn remove_inst(&mut self, inst: Inst) { - let block = self.inst_block(inst).expect("Instruction already removed."); - // Clear the `inst` node and extract links. - let prev; - let next; - { - let n = &mut self.insts[inst]; - prev = n.prev; - next = n.next; - n.block = None.into(); - n.prev = None.into(); - n.next = None.into(); - } - // Fix up links to `inst`. - match prev.expand() { - None => self.blocks[block].first_inst = next, - Some(p) => self.insts[p].next = next, - } - match next.expand() { - None => self.blocks[block].last_inst = prev, - Some(n) => self.insts[n].prev = prev, - } - } - - /// Iterate over the instructions in `block` in layout order. - pub fn block_insts(&self, block: Block) -> Insts { - Insts { - layout: self, - head: self.blocks[block].first_inst.into(), - tail: self.blocks[block].last_inst.into(), - } - } - - /// Iterate over a limited set of instruction which are likely the branches of `block` in layout - /// order. Any instruction not visited by this iterator is not a branch, but an instruction visited by this may not be a branch. - pub fn block_likely_branches(&self, block: Block) -> Insts { - // Note: Checking whether an instruction is a branch or not while walking backward might add - // extra overhead. However, we know that the number of branches is limited to 2 at the end of - // each block, and therefore we can just iterate over the last 2 instructions. - let mut iter = self.block_insts(block); - let head = iter.head; - let tail = iter.tail; - iter.next_back(); - let head = iter.next_back().or(head); - Insts { - layout: self, - head, - tail, - } - } - - /// Split the block containing `before` in two. - /// - /// Insert `new_block` after the old block and move `before` and the following instructions to - /// `new_block`: - /// - /// ```text - /// old_block: - /// i1 - /// i2 - /// i3 << before - /// i4 - /// ``` - /// becomes: - /// - /// ```text - /// old_block: - /// i1 - /// i2 - /// new_block: - /// i3 << before - /// i4 - /// ``` - pub fn split_block(&mut self, new_block: Block, before: Inst) { - let old_block = self - .inst_block(before) - .expect("The `before` instruction must be in the layout"); - debug_assert!(!self.is_block_inserted(new_block)); - - // Insert new_block after old_block. - let next_block = self.blocks[old_block].next; - let last_inst = self.blocks[old_block].last_inst; - { - let node = &mut self.blocks[new_block]; - node.prev = old_block.into(); - node.next = next_block; - node.first_inst = before.into(); - node.last_inst = last_inst; - } - self.blocks[old_block].next = new_block.into(); - - // Fix backwards link. - if Some(old_block) == self.last_block { - self.last_block = Some(new_block); - } else { - self.blocks[next_block.unwrap()].prev = new_block.into(); - } - - // Disconnect the instruction links. - let prev_inst = self.insts[before].prev; - self.insts[before].prev = None.into(); - self.blocks[old_block].last_inst = prev_inst; - match prev_inst.expand() { - None => self.blocks[old_block].first_inst = None.into(), - Some(pi) => self.insts[pi].next = None.into(), - } - - // Fix the instruction -> block pointers. - let mut opt_i = Some(before); - while let Some(i) = opt_i { - debug_assert_eq!(self.insts[i].block.expand(), Some(old_block)); - self.insts[i].block = new_block.into(); - opt_i = self.insts[i].next.into(); - } - - self.assign_block_seq(new_block); - } -} - -#[derive(Clone, Debug, Default)] -struct InstNode { - /// The Block containing this instruction, or `None` if the instruction is not yet inserted. - block: PackedOption, - prev: PackedOption, - next: PackedOption, - seq: SequenceNumber, -} - -/// Iterate over instructions in a block in layout order. See `Layout::block_insts()`. -pub struct Insts<'f> { - layout: &'f Layout, - head: Option, - tail: Option, -} - -impl<'f> Iterator for Insts<'f> { - type Item = Inst; - - fn next(&mut self) -> Option { - let rval = self.head; - if let Some(inst) = rval { - if self.head == self.tail { - self.head = None; - self.tail = None; - } else { - self.head = self.layout.insts[inst].next.into(); - } - } - rval - } -} - -impl<'f> DoubleEndedIterator for Insts<'f> { - fn next_back(&mut self) -> Option { - let rval = self.tail; - if let Some(inst) = rval { - if self.head == self.tail { - self.head = None; - self.tail = None; - } else { - self.tail = self.layout.insts[inst].prev.into(); - } - } - rval - } -} - -/// A custom serialize and deserialize implementation for [`Layout`]. -/// -/// This doesn't use a derived implementation as [`Layout`] is a manual implementation of a linked -/// list. Storing it directly as a regular list saves a lot of space. -/// -/// The following format is used. (notated in EBNF form) -/// -/// ```plain -/// data = block_data * ; -/// block_data = "block_id" , "inst_count" , ( "inst_id" * ) ; -/// ``` -#[cfg(feature = "enable-serde")] -mod serde { - use ::serde::de::{Deserializer, Error, SeqAccess, Visitor}; - use ::serde::ser::{SerializeSeq, Serializer}; - use ::serde::{Deserialize, Serialize}; - use core::convert::TryFrom; - use core::fmt; - use core::marker::PhantomData; - - use super::*; - - impl Serialize for Layout { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let size = self.blocks().count() * 2 - + self - .blocks() - .map(|block| self.block_insts(block).count()) - .sum::(); - let mut seq = serializer.serialize_seq(Some(size))?; - for block in self.blocks() { - seq.serialize_element(&block)?; - seq.serialize_element(&u32::try_from(self.block_insts(block).count()).unwrap())?; - for inst in self.block_insts(block) { - seq.serialize_element(&inst)?; - } - } - seq.end() - } - } - - impl<'de> Deserialize<'de> for Layout { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_seq(LayoutVisitor { - marker: PhantomData, - }) - } - } - - struct LayoutVisitor { - marker: PhantomData Layout>, - } - - impl<'de> Visitor<'de> for LayoutVisitor { - type Value = Layout; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a `cranelift_codegen::ir::Layout`") - } - - fn visit_seq(self, mut access: M) -> Result - where - M: SeqAccess<'de>, - { - let mut layout = Layout::new(); - - while let Some(block) = access.next_element::()? { - layout.append_block(block); - - let count = access - .next_element::()? - .ok_or_else(|| Error::missing_field("count"))?; - for _ in 0..count { - let inst = access - .next_element::()? - .ok_or_else(|| Error::missing_field("inst"))?; - layout.append_inst(inst, block); - } - } - - Ok(layout) - } - } -} - -#[cfg(test)] -mod tests { - use super::Layout; - use crate::cursor::{Cursor, CursorPosition}; - use crate::entity::EntityRef; - use crate::ir::{Block, Inst, ProgramOrder, SourceLoc}; - use alloc::vec::Vec; - use core::cmp::Ordering; - - struct LayoutCursor<'f> { - /// Borrowed function layout. Public so it can be re-borrowed from this cursor. - pub layout: &'f mut Layout, - pos: CursorPosition, - } - - impl<'f> Cursor for LayoutCursor<'f> { - fn position(&self) -> CursorPosition { - self.pos - } - - fn set_position(&mut self, pos: CursorPosition) { - self.pos = pos; - } - - fn srcloc(&self) -> SourceLoc { - unimplemented!() - } - - fn set_srcloc(&mut self, _srcloc: SourceLoc) { - unimplemented!() - } - - fn layout(&self) -> &Layout { - self.layout - } - - fn layout_mut(&mut self) -> &mut Layout { - self.layout - } - } - - impl<'f> LayoutCursor<'f> { - /// Create a new `LayoutCursor` for `layout`. - /// The cursor holds a mutable reference to `layout` for its entire lifetime. - pub fn new(layout: &'f mut Layout) -> Self { - Self { - layout, - pos: CursorPosition::Nowhere, - } - } - } - - fn verify(layout: &mut Layout, blocks: &[(Block, &[Inst])]) { - // Check that blocks are inserted and instructions belong the right places. - // Check forward linkage with iterators. - // Check that layout sequence numbers are strictly monotonic. - { - let mut seq = 0; - let mut block_iter = layout.blocks(); - for &(block, insts) in blocks { - assert!(layout.is_block_inserted(block)); - assert_eq!(block_iter.next(), Some(block)); - assert!(layout.blocks[block].seq > seq); - seq = layout.blocks[block].seq; - - let mut inst_iter = layout.block_insts(block); - for &inst in insts { - assert_eq!(layout.inst_block(inst), Some(block)); - assert_eq!(inst_iter.next(), Some(inst)); - assert!(layout.insts[inst].seq > seq); - seq = layout.insts[inst].seq; - } - assert_eq!(inst_iter.next(), None); - } - assert_eq!(block_iter.next(), None); - } - - // Check backwards linkage with a cursor. - let mut cur = LayoutCursor::new(layout); - for &(block, insts) in blocks.into_iter().rev() { - assert_eq!(cur.prev_block(), Some(block)); - for &inst in insts.into_iter().rev() { - assert_eq!(cur.prev_inst(), Some(inst)); - } - assert_eq!(cur.prev_inst(), None); - } - assert_eq!(cur.prev_block(), None); - } - - #[test] - fn append_block() { - let mut layout = Layout::new(); - let e0 = Block::new(0); - let e1 = Block::new(1); - let e2 = Block::new(2); - - { - let imm = &layout; - assert!(!imm.is_block_inserted(e0)); - assert!(!imm.is_block_inserted(e1)); - } - verify(&mut layout, &[]); - - layout.append_block(e1); - assert!(!layout.is_block_inserted(e0)); - assert!(layout.is_block_inserted(e1)); - assert!(!layout.is_block_inserted(e2)); - let v: Vec = layout.blocks().collect(); - assert_eq!(v, [e1]); - - layout.append_block(e2); - assert!(!layout.is_block_inserted(e0)); - assert!(layout.is_block_inserted(e1)); - assert!(layout.is_block_inserted(e2)); - let v: Vec = layout.blocks().collect(); - assert_eq!(v, [e1, e2]); - - layout.append_block(e0); - assert!(layout.is_block_inserted(e0)); - assert!(layout.is_block_inserted(e1)); - assert!(layout.is_block_inserted(e2)); - let v: Vec = layout.blocks().collect(); - assert_eq!(v, [e1, e2, e0]); - - { - let imm = &layout; - let mut v = Vec::new(); - for e in imm { - v.push(e); - } - assert_eq!(v, [e1, e2, e0]); - } - - // Test cursor positioning. - let mut cur = LayoutCursor::new(&mut layout); - assert_eq!(cur.position(), CursorPosition::Nowhere); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.position(), CursorPosition::Nowhere); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.position(), CursorPosition::Nowhere); - - assert_eq!(cur.next_block(), Some(e1)); - assert_eq!(cur.position(), CursorPosition::Before(e1)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.position(), CursorPosition::After(e1)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.position(), CursorPosition::After(e1)); - assert_eq!(cur.next_block(), Some(e2)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.position(), CursorPosition::Before(e2)); - assert_eq!(cur.next_block(), Some(e0)); - assert_eq!(cur.next_block(), None); - assert_eq!(cur.position(), CursorPosition::Nowhere); - - // Backwards through the blocks. - assert_eq!(cur.prev_block(), Some(e0)); - assert_eq!(cur.position(), CursorPosition::After(e0)); - assert_eq!(cur.prev_block(), Some(e2)); - assert_eq!(cur.prev_block(), Some(e1)); - assert_eq!(cur.prev_block(), None); - assert_eq!(cur.position(), CursorPosition::Nowhere); - } - - #[test] - fn insert_block() { - let mut layout = Layout::new(); - let e0 = Block::new(0); - let e1 = Block::new(1); - let e2 = Block::new(2); - - { - let imm = &layout; - assert!(!imm.is_block_inserted(e0)); - assert!(!imm.is_block_inserted(e1)); - - let v: Vec = layout.blocks().collect(); - assert_eq!(v, []); - } - - layout.append_block(e1); - assert!(!layout.is_block_inserted(e0)); - assert!(layout.is_block_inserted(e1)); - assert!(!layout.is_block_inserted(e2)); - verify(&mut layout, &[(e1, &[])]); - - layout.insert_block(e2, e1); - assert!(!layout.is_block_inserted(e0)); - assert!(layout.is_block_inserted(e1)); - assert!(layout.is_block_inserted(e2)); - verify(&mut layout, &[(e2, &[]), (e1, &[])]); - - layout.insert_block(e0, e1); - assert!(layout.is_block_inserted(e0)); - assert!(layout.is_block_inserted(e1)); - assert!(layout.is_block_inserted(e2)); - verify(&mut layout, &[(e2, &[]), (e0, &[]), (e1, &[])]); - } - - #[test] - fn insert_block_after() { - let mut layout = Layout::new(); - let e0 = Block::new(0); - let e1 = Block::new(1); - let e2 = Block::new(2); - - layout.append_block(e1); - layout.insert_block_after(e2, e1); - verify(&mut layout, &[(e1, &[]), (e2, &[])]); - - layout.insert_block_after(e0, e1); - verify(&mut layout, &[(e1, &[]), (e0, &[]), (e2, &[])]); - } - - #[test] - fn append_inst() { - let mut layout = Layout::new(); - let e1 = Block::new(1); - - layout.append_block(e1); - let v: Vec = layout.block_insts(e1).collect(); - assert_eq!(v, []); - - let i0 = Inst::new(0); - let i1 = Inst::new(1); - let i2 = Inst::new(2); - - assert_eq!(layout.inst_block(i0), None); - assert_eq!(layout.inst_block(i1), None); - assert_eq!(layout.inst_block(i2), None); - - layout.append_inst(i1, e1); - assert_eq!(layout.inst_block(i0), None); - assert_eq!(layout.inst_block(i1), Some(e1)); - assert_eq!(layout.inst_block(i2), None); - let v: Vec = layout.block_insts(e1).collect(); - assert_eq!(v, [i1]); - - layout.append_inst(i2, e1); - assert_eq!(layout.inst_block(i0), None); - assert_eq!(layout.inst_block(i1), Some(e1)); - assert_eq!(layout.inst_block(i2), Some(e1)); - let v: Vec = layout.block_insts(e1).collect(); - assert_eq!(v, [i1, i2]); - - // Test double-ended instruction iterator. - let v: Vec = layout.block_insts(e1).rev().collect(); - assert_eq!(v, [i2, i1]); - - layout.append_inst(i0, e1); - verify(&mut layout, &[(e1, &[i1, i2, i0])]); - - // Test cursor positioning. - let mut cur = LayoutCursor::new(&mut layout).at_top(e1); - assert_eq!(cur.position(), CursorPosition::Before(e1)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.position(), CursorPosition::Before(e1)); - assert_eq!(cur.next_inst(), Some(i1)); - assert_eq!(cur.position(), CursorPosition::At(i1)); - assert_eq!(cur.next_inst(), Some(i2)); - assert_eq!(cur.next_inst(), Some(i0)); - assert_eq!(cur.prev_inst(), Some(i2)); - assert_eq!(cur.position(), CursorPosition::At(i2)); - assert_eq!(cur.next_inst(), Some(i0)); - assert_eq!(cur.position(), CursorPosition::At(i0)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.position(), CursorPosition::After(e1)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.position(), CursorPosition::After(e1)); - assert_eq!(cur.prev_inst(), Some(i0)); - assert_eq!(cur.prev_inst(), Some(i2)); - assert_eq!(cur.prev_inst(), Some(i1)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.position(), CursorPosition::Before(e1)); - - // Test remove_inst. - cur.goto_inst(i2); - assert_eq!(cur.remove_inst(), i2); - verify(cur.layout, &[(e1, &[i1, i0])]); - assert_eq!(cur.layout.inst_block(i2), None); - assert_eq!(cur.remove_inst(), i0); - verify(cur.layout, &[(e1, &[i1])]); - assert_eq!(cur.layout.inst_block(i0), None); - assert_eq!(cur.position(), CursorPosition::After(e1)); - cur.layout.remove_inst(i1); - verify(cur.layout, &[(e1, &[])]); - assert_eq!(cur.layout.inst_block(i1), None); - } - - #[test] - fn insert_inst() { - let mut layout = Layout::new(); - let e1 = Block::new(1); - - layout.append_block(e1); - let v: Vec = layout.block_insts(e1).collect(); - assert_eq!(v, []); - - let i0 = Inst::new(0); - let i1 = Inst::new(1); - let i2 = Inst::new(2); - - assert_eq!(layout.inst_block(i0), None); - assert_eq!(layout.inst_block(i1), None); - assert_eq!(layout.inst_block(i2), None); - - layout.append_inst(i1, e1); - assert_eq!(layout.inst_block(i0), None); - assert_eq!(layout.inst_block(i1), Some(e1)); - assert_eq!(layout.inst_block(i2), None); - let v: Vec = layout.block_insts(e1).collect(); - assert_eq!(v, [i1]); - - layout.insert_inst(i2, i1); - assert_eq!(layout.inst_block(i0), None); - assert_eq!(layout.inst_block(i1), Some(e1)); - assert_eq!(layout.inst_block(i2), Some(e1)); - let v: Vec = layout.block_insts(e1).collect(); - assert_eq!(v, [i2, i1]); - - layout.insert_inst(i0, i1); - verify(&mut layout, &[(e1, &[i2, i0, i1])]); - } - - #[test] - fn multiple_blocks() { - let mut layout = Layout::new(); - - let e0 = Block::new(0); - let e1 = Block::new(1); - - assert_eq!(layout.entry_block(), None); - layout.append_block(e0); - assert_eq!(layout.entry_block(), Some(e0)); - layout.append_block(e1); - assert_eq!(layout.entry_block(), Some(e0)); - - let i0 = Inst::new(0); - let i1 = Inst::new(1); - let i2 = Inst::new(2); - let i3 = Inst::new(3); - - layout.append_inst(i0, e0); - layout.append_inst(i1, e0); - layout.append_inst(i2, e1); - layout.append_inst(i3, e1); - - let v0: Vec = layout.block_insts(e0).collect(); - let v1: Vec = layout.block_insts(e1).collect(); - assert_eq!(v0, [i0, i1]); - assert_eq!(v1, [i2, i3]); - } - - #[test] - fn split_block() { - let mut layout = Layout::new(); - - let e0 = Block::new(0); - let e1 = Block::new(1); - let e2 = Block::new(2); - - let i0 = Inst::new(0); - let i1 = Inst::new(1); - let i2 = Inst::new(2); - let i3 = Inst::new(3); - - layout.append_block(e0); - layout.append_inst(i0, e0); - assert_eq!(layout.inst_block(i0), Some(e0)); - layout.split_block(e1, i0); - assert_eq!(layout.inst_block(i0), Some(e1)); - - { - let mut cur = LayoutCursor::new(&mut layout); - assert_eq!(cur.next_block(), Some(e0)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_block(), Some(e1)); - assert_eq!(cur.next_inst(), Some(i0)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_block(), None); - - // Check backwards links. - assert_eq!(cur.prev_block(), Some(e1)); - assert_eq!(cur.prev_inst(), Some(i0)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_block(), Some(e0)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_block(), None); - } - - layout.append_inst(i1, e0); - layout.append_inst(i2, e0); - layout.append_inst(i3, e0); - layout.split_block(e2, i2); - - assert_eq!(layout.inst_block(i0), Some(e1)); - assert_eq!(layout.inst_block(i1), Some(e0)); - assert_eq!(layout.inst_block(i2), Some(e2)); - assert_eq!(layout.inst_block(i3), Some(e2)); - - { - let mut cur = LayoutCursor::new(&mut layout); - assert_eq!(cur.next_block(), Some(e0)); - assert_eq!(cur.next_inst(), Some(i1)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_block(), Some(e2)); - assert_eq!(cur.next_inst(), Some(i2)); - assert_eq!(cur.next_inst(), Some(i3)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_block(), Some(e1)); - assert_eq!(cur.next_inst(), Some(i0)); - assert_eq!(cur.next_inst(), None); - assert_eq!(cur.next_block(), None); - - assert_eq!(cur.prev_block(), Some(e1)); - assert_eq!(cur.prev_inst(), Some(i0)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_block(), Some(e2)); - assert_eq!(cur.prev_inst(), Some(i3)); - assert_eq!(cur.prev_inst(), Some(i2)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_block(), Some(e0)); - assert_eq!(cur.prev_inst(), Some(i1)); - assert_eq!(cur.prev_inst(), None); - assert_eq!(cur.prev_block(), None); - } - - // Check `ProgramOrder`. - assert_eq!(layout.cmp(e2, e2), Ordering::Equal); - assert_eq!(layout.cmp(e2, i2), Ordering::Less); - assert_eq!(layout.cmp(i3, i2), Ordering::Greater); - - assert_eq!(layout.is_block_gap(i1, e2), true); - assert_eq!(layout.is_block_gap(i3, e1), true); - assert_eq!(layout.is_block_gap(i1, e1), false); - assert_eq!(layout.is_block_gap(i2, e1), false); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/libcall.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/libcall.rs deleted file mode 100644 index 5dbbd7232..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/libcall.rs +++ /dev/null @@ -1,215 +0,0 @@ -//! Naming well-known routines in the runtime library. - -use crate::ir::{types, ExternalName, FuncRef, Function, Opcode, Type}; -use core::fmt; -use core::str::FromStr; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// The name of a runtime library routine. -/// -/// Runtime library calls are generated for Cranelift IR instructions that don't have an equivalent -/// ISA instruction or an easy macro expansion. A `LibCall` is used as a well-known name to refer to -/// the runtime library routine. This way, Cranelift doesn't have to know about the naming -/// convention in the embedding VM's runtime library. -/// -/// This list is likely to grow over time. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum LibCall { - /// probe for stack overflow. These are emitted for functions which need - /// when the `enable_probestack` setting is true. - Probestack, - /// udiv.i64 - UdivI64, - /// sdiv.i64 - SdivI64, - /// urem.i64 - UremI64, - /// srem.i64 - SremI64, - /// ishl.i64 - IshlI64, - /// ushr.i64 - UshrI64, - /// sshr.i64 - SshrI64, - /// ceil.f32 - CeilF32, - /// ceil.f64 - CeilF64, - /// floor.f32 - FloorF32, - /// floor.f64 - FloorF64, - /// trunc.f32 - TruncF32, - /// frunc.f64 - TruncF64, - /// nearest.f32 - NearestF32, - /// nearest.f64 - NearestF64, - /// libc.memcpy - Memcpy, - /// libc.memset - Memset, - /// libc.memmove - Memmove, - /// libc.memcmp - Memcmp, - - /// Elf __tls_get_addr - ElfTlsGetAddr, - // When adding a new variant make sure to add it to `all_libcalls` too. -} - -impl fmt::Display for LibCall { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl FromStr for LibCall { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "Probestack" => Ok(Self::Probestack), - "UdivI64" => Ok(Self::UdivI64), - "SdivI64" => Ok(Self::SdivI64), - "UremI64" => Ok(Self::UremI64), - "SremI64" => Ok(Self::SremI64), - "IshlI64" => Ok(Self::IshlI64), - "UshrI64" => Ok(Self::UshrI64), - "SshrI64" => Ok(Self::SshrI64), - "CeilF32" => Ok(Self::CeilF32), - "CeilF64" => Ok(Self::CeilF64), - "FloorF32" => Ok(Self::FloorF32), - "FloorF64" => Ok(Self::FloorF64), - "TruncF32" => Ok(Self::TruncF32), - "TruncF64" => Ok(Self::TruncF64), - "NearestF32" => Ok(Self::NearestF32), - "NearestF64" => Ok(Self::NearestF64), - "Memcpy" => Ok(Self::Memcpy), - "Memset" => Ok(Self::Memset), - "Memmove" => Ok(Self::Memmove), - "Memcmp" => Ok(Self::Memcmp), - - "ElfTlsGetAddr" => Ok(Self::ElfTlsGetAddr), - _ => Err(()), - } - } -} - -impl LibCall { - /// Get the well-known library call name to use as a replacement for an instruction with the - /// given opcode and controlling type variable. - /// - /// Returns `None` if no well-known library routine name exists for that instruction. - pub fn for_inst(opcode: Opcode, ctrl_type: Type) -> Option { - Some(match ctrl_type { - types::I64 => match opcode { - Opcode::Udiv => Self::UdivI64, - Opcode::Sdiv => Self::SdivI64, - Opcode::Urem => Self::UremI64, - Opcode::Srem => Self::SremI64, - Opcode::Ishl => Self::IshlI64, - Opcode::Ushr => Self::UshrI64, - Opcode::Sshr => Self::SshrI64, - _ => return None, - }, - types::F32 => match opcode { - Opcode::Ceil => Self::CeilF32, - Opcode::Floor => Self::FloorF32, - Opcode::Trunc => Self::TruncF32, - Opcode::Nearest => Self::NearestF32, - _ => return None, - }, - types::F64 => match opcode { - Opcode::Ceil => Self::CeilF64, - Opcode::Floor => Self::FloorF64, - Opcode::Trunc => Self::TruncF64, - Opcode::Nearest => Self::NearestF64, - _ => return None, - }, - _ => return None, - }) - } - - /// Get a list of all known `LibCall`'s. - pub fn all_libcalls() -> &'static [LibCall] { - use LibCall::*; - &[ - Probestack, - UdivI64, - SdivI64, - UremI64, - SremI64, - IshlI64, - UshrI64, - SshrI64, - CeilF32, - CeilF64, - FloorF32, - FloorF64, - TruncF32, - TruncF64, - NearestF32, - NearestF64, - Memcpy, - Memset, - Memmove, - Memcmp, - ElfTlsGetAddr, - ] - } -} - -/// Get a function reference for the probestack function in `func`. -/// -/// If there is an existing reference, use it, otherwise make a new one. -pub fn get_probestack_funcref(func: &mut Function) -> Option { - find_funcref(LibCall::Probestack, func) -} - -/// Get the existing function reference for `libcall` in `func` if it exists. -fn find_funcref(libcall: LibCall, func: &Function) -> Option { - // We're assuming that all libcall function decls are at the end. - // If we get this wrong, worst case we'll have duplicate libcall decls which is harmless. - for (fref, func_data) in func.dfg.ext_funcs.iter().rev() { - match func_data.name { - ExternalName::LibCall(lc) => { - if lc == libcall { - return Some(fref); - } - } - _ => break, - } - } - None -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::string::ToString; - - #[test] - fn display() { - assert_eq!(LibCall::CeilF32.to_string(), "CeilF32"); - assert_eq!(LibCall::NearestF64.to_string(), "NearestF64"); - } - - #[test] - fn parsing() { - assert_eq!("FloorF32".parse(), Ok(LibCall::FloorF32)); - } - - #[test] - fn all_libcalls_to_from_string() { - for &libcall in LibCall::all_libcalls() { - assert_eq!(libcall.to_string().parse(), Ok(libcall)); - } - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/memflags.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/memflags.rs deleted file mode 100644 index 4ff76b623..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/memflags.rs +++ /dev/null @@ -1,169 +0,0 @@ -//! Memory operation flags. - -use core::fmt; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -enum FlagBit { - Notrap, - Aligned, - Readonly, - LittleEndian, - BigEndian, -} - -const NAMES: [&str; 5] = ["notrap", "aligned", "readonly", "little", "big"]; - -/// Endianness of a memory access. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] -pub enum Endianness { - /// Little-endian - Little, - /// Big-endian - Big, -} - -/// Flags for memory operations like load/store. -/// -/// Each of these flags introduce a limited form of undefined behavior. The flags each enable -/// certain optimizations that need to make additional assumptions. Generally, the semantics of a -/// program does not change when a flag is removed, but adding a flag will. -/// -/// In addition, the flags determine the endianness of the memory access. By default, -/// any memory access uses the native endianness determined by the target ISA. This can -/// be overridden for individual accesses by explicitly specifying little- or big-endian -/// semantics via the flags. -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct MemFlags { - bits: u8, -} - -impl MemFlags { - /// Create a new empty set of flags. - pub fn new() -> Self { - Self { bits: 0 } - } - - /// Create a set of flags representing an access from a "trusted" address, meaning it's - /// known to be aligned and non-trapping. - pub fn trusted() -> Self { - let mut result = Self::new(); - result.set_notrap(); - result.set_aligned(); - result - } - - /// Read a flag bit. - fn read(self, bit: FlagBit) -> bool { - self.bits & (1 << bit as usize) != 0 - } - - /// Set a flag bit. - fn set(&mut self, bit: FlagBit) { - self.bits |= 1 << bit as usize - } - - /// Set a flag bit by name. - /// - /// Returns true if the flag was found and set, false for an unknown flag name. - /// Will also return false when trying to set inconsistent endianness flags. - pub fn set_by_name(&mut self, name: &str) -> bool { - match NAMES.iter().position(|&s| s == name) { - Some(bit) => { - let bits = self.bits | 1 << bit; - if (bits & (1 << FlagBit::LittleEndian as usize)) != 0 - && (bits & (1 << FlagBit::BigEndian as usize)) != 0 - { - false - } else { - self.bits = bits; - true - } - } - None => false, - } - } - - /// Return endianness of the memory access. This will return the endianness - /// explicitly specified by the flags if any, and will default to the native - /// endianness otherwise. The native endianness has to be provided by the - /// caller since it is not explicitly encoded in CLIF IR -- this allows a - /// front end to create IR without having to know the target endianness. - pub fn endianness(self, native_endianness: Endianness) -> Endianness { - if self.read(FlagBit::LittleEndian) { - Endianness::Little - } else if self.read(FlagBit::BigEndian) { - Endianness::Big - } else { - native_endianness - } - } - - /// Set endianness of the memory access. - pub fn set_endianness(&mut self, endianness: Endianness) { - match endianness { - Endianness::Little => self.set(FlagBit::LittleEndian), - Endianness::Big => self.set(FlagBit::BigEndian), - }; - assert!(!(self.read(FlagBit::LittleEndian) && self.read(FlagBit::BigEndian))); - } - - /// Test if the `notrap` flag is set. - /// - /// Normally, trapping is part of the semantics of a load/store operation. If the platform - /// would cause a trap when accessing the effective address, the Cranelift memory operation is - /// also required to trap. - /// - /// The `notrap` flag tells Cranelift that the memory is *accessible*, which means that - /// accesses will not trap. This makes it possible to delete an unused load or a dead store - /// instruction. - pub fn notrap(self) -> bool { - self.read(FlagBit::Notrap) - } - - /// Set the `notrap` flag. - pub fn set_notrap(&mut self) { - self.set(FlagBit::Notrap) - } - - /// Test if the `aligned` flag is set. - /// - /// By default, Cranelift memory instructions work with any unaligned effective address. If the - /// `aligned` flag is set, the instruction is permitted to trap or return a wrong result if the - /// effective address is misaligned. - pub fn aligned(self) -> bool { - self.read(FlagBit::Aligned) - } - - /// Set the `aligned` flag. - pub fn set_aligned(&mut self) { - self.set(FlagBit::Aligned) - } - - /// Test if the `readonly` flag is set. - /// - /// Loads with this flag have no memory dependencies. - /// This results in undefined behavior if the dereferenced memory is mutated at any time - /// between when the function is called and when it is exited. - pub fn readonly(self) -> bool { - self.read(FlagBit::Readonly) - } - - /// Set the `readonly` flag. - pub fn set_readonly(&mut self) { - self.set(FlagBit::Readonly) - } -} - -impl fmt::Display for MemFlags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (i, n) in NAMES.iter().enumerate() { - if self.bits & (1 << i) != 0 { - write!(f, " {}", n)?; - } - } - Ok(()) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/mod.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/mod.rs deleted file mode 100644 index 713d2fd37..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/mod.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! Representation of Cranelift IR functions. - -mod atomic_rmw_op; -mod builder; -pub mod condcodes; -pub mod constant; -pub mod dfg; -pub mod entities; -mod extfunc; -mod extname; -pub mod function; -mod globalvalue; -mod heap; -pub mod immediates; -pub mod instructions; -pub mod jumptable; -pub mod layout; -pub(crate) mod libcall; -mod memflags; -mod progpoint; -mod sourceloc; -pub mod stackslot; -mod table; -mod trapcode; -pub mod types; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -pub use crate::ir::atomic_rmw_op::AtomicRmwOp; -pub use crate::ir::builder::{ - InsertBuilder, InstBuilder, InstBuilderBase, InstInserterBase, ReplaceBuilder, -}; -pub use crate::ir::constant::{ConstantData, ConstantPool}; -pub use crate::ir::dfg::{DataFlowGraph, ValueDef}; -pub use crate::ir::entities::{ - Block, Constant, FuncRef, GlobalValue, Heap, Immediate, Inst, JumpTable, SigRef, StackSlot, - Table, Value, -}; -pub use crate::ir::extfunc::{ - AbiParam, ArgumentExtension, ArgumentPurpose, ExtFuncData, Signature, -}; -pub use crate::ir::extname::ExternalName; -pub use crate::ir::function::{DisplayFunctionAnnotations, Function}; -pub use crate::ir::globalvalue::GlobalValueData; -pub use crate::ir::heap::{HeapData, HeapStyle}; -pub use crate::ir::instructions::{ - InstructionData, Opcode, ValueList, ValueListPool, VariableArgs, -}; -pub use crate::ir::jumptable::JumpTableData; -pub use crate::ir::layout::Layout; -pub use crate::ir::libcall::{get_probestack_funcref, LibCall}; -pub use crate::ir::memflags::{Endianness, MemFlags}; -pub use crate::ir::progpoint::{ExpandedProgramPoint, ProgramOrder, ProgramPoint}; -pub use crate::ir::sourceloc::SourceLoc; -pub use crate::ir::stackslot::{StackSlotData, StackSlotKind, StackSlots}; -pub use crate::ir::table::TableData; -pub use crate::ir::trapcode::TrapCode; -pub use crate::ir::types::Type; -pub use crate::value_label::LabelValueLoc; - -use crate::entity::{entity_impl, PrimaryMap, SecondaryMap}; - -/// Map of jump tables. -pub type JumpTables = PrimaryMap; - -/// Source locations for instructions. -pub type SourceLocs = SecondaryMap; - -/// Marked with a label value. -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct ValueLabel(u32); -entity_impl!(ValueLabel, "val"); - -/// A label of a Value. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct ValueLabelStart { - /// Source location when it is in effect - pub from: SourceLoc, - - /// The label index. - pub label: ValueLabel, -} - -/// Value label assignements: label starts or value aliases. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum ValueLabelAssignments { - /// Original value labels assigned at transform. - Starts(alloc::vec::Vec), - - /// A value alias to original value. - Alias { - /// Source location when it is in effect - from: SourceLoc, - - /// The label index. - value: Value, - }, -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/progpoint.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/progpoint.rs deleted file mode 100644 index 0152949e7..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/progpoint.rs +++ /dev/null @@ -1,164 +0,0 @@ -//! Program points. - -use crate::entity::EntityRef; -use crate::ir::{Block, Inst, ValueDef}; -use core::cmp; -use core::fmt; -use core::u32; - -/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can -/// begin or end. It can be either: -/// -/// 1. An instruction or -/// 2. A block header. -/// -/// This corresponds more or less to the lines in the textual form of Cranelift IR. -#[derive(PartialEq, Eq, Clone, Copy)] -pub struct ProgramPoint(u32); - -impl From for ProgramPoint { - fn from(inst: Inst) -> Self { - let idx = inst.index(); - debug_assert!(idx < (u32::MAX / 2) as usize); - Self((idx * 2) as u32) - } -} - -impl From for ProgramPoint { - fn from(block: Block) -> Self { - let idx = block.index(); - debug_assert!(idx < (u32::MAX / 2) as usize); - Self((idx * 2 + 1) as u32) - } -} - -impl From for ProgramPoint { - fn from(def: ValueDef) -> Self { - match def { - ValueDef::Result(inst, _) => inst.into(), - ValueDef::Param(block, _) => block.into(), - } - } -} - -/// An expanded program point directly exposes the variants, but takes twice the space to -/// represent. -#[derive(PartialEq, Eq, Clone, Copy)] -pub enum ExpandedProgramPoint { - /// An instruction in the function. - Inst(Inst), - /// A block header. - Block(Block), -} - -impl ExpandedProgramPoint { - /// Get the instruction we know is inside. - pub fn unwrap_inst(self) -> Inst { - match self { - Self::Inst(x) => x, - Self::Block(x) => panic!("expected inst: {}", x), - } - } -} - -impl From for ExpandedProgramPoint { - fn from(inst: Inst) -> Self { - Self::Inst(inst) - } -} - -impl From for ExpandedProgramPoint { - fn from(block: Block) -> Self { - Self::Block(block) - } -} - -impl From for ExpandedProgramPoint { - fn from(def: ValueDef) -> Self { - match def { - ValueDef::Result(inst, _) => inst.into(), - ValueDef::Param(block, _) => block.into(), - } - } -} - -impl From for ExpandedProgramPoint { - fn from(pp: ProgramPoint) -> Self { - if pp.0 & 1 == 0 { - Self::Inst(Inst::from_u32(pp.0 / 2)) - } else { - Self::Block(Block::from_u32(pp.0 / 2)) - } - } -} - -impl fmt::Display for ExpandedProgramPoint { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Self::Inst(x) => write!(f, "{}", x), - Self::Block(x) => write!(f, "{}", x), - } - } -} - -impl fmt::Display for ProgramPoint { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let epp: ExpandedProgramPoint = (*self).into(); - epp.fmt(f) - } -} - -impl fmt::Debug for ExpandedProgramPoint { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ExpandedProgramPoint({})", self) - } -} - -impl fmt::Debug for ProgramPoint { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ProgramPoint({})", self) - } -} - -/// Context for ordering program points. -/// -/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a -/// context providing the program order. -pub trait ProgramOrder { - /// Compare the program points `a` and `b` relative to this program order. - /// - /// Return `Less` if `a` appears in the program before `b`. - /// - /// This is declared as a generic such that it can be called with `Inst` and `Block` arguments - /// directly. Depending on the implementation, there is a good chance performance will be - /// improved for those cases where the type of either argument is known statically. - fn cmp(&self, a: A, b: B) -> cmp::Ordering - where - A: Into, - B: Into; - - /// Is the range from `inst` to `block` just the gap between consecutive blocks? - /// - /// This returns true if `inst` is the terminator in the block immediately before `block`. - fn is_block_gap(&self, inst: Inst, block: Block) -> bool; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::entity::EntityRef; - use crate::ir::{Block, Inst}; - use alloc::string::ToString; - - #[test] - fn convert() { - let i5 = Inst::new(5); - let b3 = Block::new(3); - - let pp1: ProgramPoint = i5.into(); - let pp2: ProgramPoint = b3.into(); - - assert_eq!(pp1.to_string(), "inst5"); - assert_eq!(pp2.to_string(), "block3"); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/sourceloc.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/sourceloc.rs deleted file mode 100644 index ccab62f89..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/sourceloc.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! Source locations. -//! -//! Cranelift tracks the original source location of each instruction, and preserves the source -//! location when instructions are transformed. - -use core::fmt; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// A source location. -/// -/// This is an opaque 32-bit number attached to each Cranelift IR instruction. Cranelift does not -/// interpret source locations in any way, they are simply preserved from the input to the output. -/// -/// The default source location uses the all-ones bit pattern `!0`. It is used for instructions -/// that can't be given a real source location. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct SourceLoc(u32); - -impl SourceLoc { - /// Create a new source location with the given bits. - pub fn new(bits: u32) -> Self { - Self(bits) - } - - /// Is this the default source location? - pub fn is_default(self) -> bool { - self == Default::default() - } - - /// Read the bits of this source location. - pub fn bits(self) -> u32 { - self.0 - } -} - -impl Default for SourceLoc { - fn default() -> Self { - Self(!0) - } -} - -impl fmt::Display for SourceLoc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.is_default() { - write!(f, "@-") - } else { - write!(f, "@{:04x}", self.0) - } - } -} - -#[cfg(test)] -mod tests { - use crate::ir::SourceLoc; - use alloc::string::ToString; - - #[test] - fn display() { - assert_eq!(SourceLoc::default().to_string(), "@-"); - assert_eq!(SourceLoc::new(0).to_string(), "@0000"); - assert_eq!(SourceLoc::new(16).to_string(), "@0010"); - assert_eq!(SourceLoc::new(0xabcdef).to_string(), "@abcdef"); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/stackslot.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/stackslot.rs deleted file mode 100644 index 4c30eb48b..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/stackslot.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Stack slots. -//! -//! The `StackSlotData` struct keeps track of a single stack slot in a function. -//! - -use crate::entity::PrimaryMap; -use crate::ir::StackSlot; -use core::fmt; -use core::str::FromStr; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// The size of an object on the stack, or the size of a stack frame. -/// -/// We don't use `usize` to represent object sizes on the target platform because Cranelift supports -/// cross-compilation, and `usize` is a type that depends on the host platform, not the target -/// platform. -pub type StackSize = u32; - -/// The kind of a stack slot. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum StackSlotKind { - /// An explicit stack slot. This is a chunk of stack memory for use by the `stack_load` - /// and `stack_store` instructions. - ExplicitSlot, -} - -impl FromStr for StackSlotKind { - type Err = (); - - fn from_str(s: &str) -> Result { - use self::StackSlotKind::*; - match s { - "explicit_slot" => Ok(ExplicitSlot), - _ => Err(()), - } - } -} - -impl fmt::Display for StackSlotKind { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use self::StackSlotKind::*; - f.write_str(match *self { - ExplicitSlot => "explicit_slot", - }) - } -} - -/// Contents of a stack slot. -#[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct StackSlotData { - /// The kind of stack slot. - pub kind: StackSlotKind, - - /// Size of stack slot in bytes. - pub size: StackSize, -} - -impl StackSlotData { - /// Create a stack slot with the specified byte size. - pub fn new(kind: StackSlotKind, size: StackSize) -> Self { - Self { kind, size } - } - - /// Get the alignment in bytes of this stack slot given the stack pointer alignment. - pub fn alignment(&self, max_align: StackSize) -> StackSize { - debug_assert!(max_align.is_power_of_two()); - // We want to find the largest power of two that divides both `self.size` and `max_align`. - // That is the same as isolating the rightmost bit in `x`. - let x = self.size | max_align; - // C.f. Hacker's delight. - x & x.wrapping_neg() - } -} - -impl fmt::Display for StackSlotData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} {}", self.kind, self.size) - } -} - -/// All allocated stack slots. -pub type StackSlots = PrimaryMap; - -#[cfg(test)] -mod tests { - use super::*; - use crate::ir::Function; - use alloc::string::ToString; - - #[test] - fn stack_slot() { - let mut func = Function::new(); - - let ss0 = func.create_stack_slot(StackSlotData::new(StackSlotKind::ExplicitSlot, 4)); - let ss1 = func.create_stack_slot(StackSlotData::new(StackSlotKind::ExplicitSlot, 8)); - assert_eq!(ss0.to_string(), "ss0"); - assert_eq!(ss1.to_string(), "ss1"); - - assert_eq!(func.stack_slots[ss0].size, 4); - assert_eq!(func.stack_slots[ss1].size, 8); - - assert_eq!(func.stack_slots[ss0].to_string(), "explicit_slot 4"); - assert_eq!(func.stack_slots[ss1].to_string(), "explicit_slot 8"); - } - - #[test] - fn alignment() { - let slot = StackSlotData::new(StackSlotKind::ExplicitSlot, 8); - - assert_eq!(slot.alignment(4), 4); - assert_eq!(slot.alignment(8), 8); - assert_eq!(slot.alignment(16), 8); - - let slot2 = StackSlotData::new(StackSlotKind::ExplicitSlot, 24); - - assert_eq!(slot2.alignment(4), 4); - assert_eq!(slot2.alignment(8), 8); - assert_eq!(slot2.alignment(16), 8); - assert_eq!(slot2.alignment(32), 8); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/table.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/table.rs deleted file mode 100644 index 713d1f5df..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/table.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Tables. - -use crate::ir::immediates::Uimm64; -use crate::ir::{GlobalValue, Type}; -use core::fmt; - -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// Information about a table declaration. -#[derive(Clone)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct TableData { - /// Global value giving the address of the start of the table. - pub base_gv: GlobalValue, - - /// Guaranteed minimum table size in elements. Table accesses before `min_size` don't need - /// bounds checking. - pub min_size: Uimm64, - - /// Global value giving the current bound of the table, in elements. - pub bound_gv: GlobalValue, - - /// The size of a table element, in bytes. - pub element_size: Uimm64, - - /// The index type for the table. - pub index_type: Type, -} - -impl fmt::Display for TableData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("dynamic")?; - write!( - f, - " {}, min {}, bound {}, element_size {}, index_type {}", - self.base_gv, self.min_size, self.bound_gv, self.element_size, self.index_type - ) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/trapcode.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/trapcode.rs deleted file mode 100644 index 3114114f6..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/trapcode.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Trap codes describing the reason for a trap. - -use core::fmt::{self, Display, Formatter}; -use core::str::FromStr; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; - -/// A trap code describing the reason for a trap. -/// -/// All trap instructions have an explicit trap code. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub enum TrapCode { - /// The current stack space was exhausted. - StackOverflow, - - /// A `heap_addr` instruction detected an out-of-bounds error. - /// - /// Note that not all out-of-bounds heap accesses are reported this way; - /// some are detected by a segmentation fault on the heap unmapped or - /// offset-guard pages. - HeapOutOfBounds, - - /// A wasm atomic operation was presented with a not-naturally-aligned linear-memory address. - HeapMisaligned, - - /// A `table_addr` instruction detected an out-of-bounds error. - TableOutOfBounds, - - /// Indirect call to a null table entry. - IndirectCallToNull, - - /// Signature mismatch on indirect call. - BadSignature, - - /// An integer arithmetic operation caused an overflow. - IntegerOverflow, - - /// An integer division by zero. - IntegerDivisionByZero, - - /// Failed float-to-int conversion. - BadConversionToInteger, - - /// Code that was supposed to have been unreachable was reached. - UnreachableCodeReached, - - /// Execution has potentially run too long and may be interrupted. - /// This trap is resumable. - Interrupt, - - /// A user-defined trap code. - User(u16), -} - -impl Display for TrapCode { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - use self::TrapCode::*; - let identifier = match *self { - StackOverflow => "stk_ovf", - HeapOutOfBounds => "heap_oob", - HeapMisaligned => "heap_misaligned", - TableOutOfBounds => "table_oob", - IndirectCallToNull => "icall_null", - BadSignature => "bad_sig", - IntegerOverflow => "int_ovf", - IntegerDivisionByZero => "int_divz", - BadConversionToInteger => "bad_toint", - UnreachableCodeReached => "unreachable", - Interrupt => "interrupt", - User(x) => return write!(f, "user{}", x), - }; - f.write_str(identifier) - } -} - -impl FromStr for TrapCode { - type Err = (); - - fn from_str(s: &str) -> Result { - use self::TrapCode::*; - match s { - "stk_ovf" => Ok(StackOverflow), - "heap_oob" => Ok(HeapOutOfBounds), - "heap_misaligned" => Ok(HeapMisaligned), - "table_oob" => Ok(TableOutOfBounds), - "icall_null" => Ok(IndirectCallToNull), - "bad_sig" => Ok(BadSignature), - "int_ovf" => Ok(IntegerOverflow), - "int_divz" => Ok(IntegerDivisionByZero), - "bad_toint" => Ok(BadConversionToInteger), - "unreachable" => Ok(UnreachableCodeReached), - "interrupt" => Ok(Interrupt), - _ if s.starts_with("user") => s[4..].parse().map(User).map_err(|_| ()), - _ => Err(()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::string::ToString; - - // Everything but user-defined codes. - const CODES: [TrapCode; 11] = [ - TrapCode::StackOverflow, - TrapCode::HeapOutOfBounds, - TrapCode::HeapMisaligned, - TrapCode::TableOutOfBounds, - TrapCode::IndirectCallToNull, - TrapCode::BadSignature, - TrapCode::IntegerOverflow, - TrapCode::IntegerDivisionByZero, - TrapCode::BadConversionToInteger, - TrapCode::UnreachableCodeReached, - TrapCode::Interrupt, - ]; - - #[test] - fn display() { - for r in &CODES { - let tc = *r; - assert_eq!(tc.to_string().parse(), Ok(tc)); - } - assert_eq!("bogus".parse::(), Err(())); - - assert_eq!(TrapCode::User(17).to_string(), "user17"); - assert_eq!("user22".parse(), Ok(TrapCode::User(22))); - assert_eq!("user".parse::(), Err(())); - assert_eq!("user-1".parse::(), Err(())); - assert_eq!("users".parse::(), Err(())); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/types.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/types.rs deleted file mode 100644 index 59e6a871b..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/ir/types.rs +++ /dev/null @@ -1,625 +0,0 @@ -//! Common types for the Cranelift code generator. - -use core::default::Default; -use core::fmt::{self, Debug, Display, Formatter}; -use cranelift_codegen_shared::constants; -#[cfg(feature = "enable-serde")] -use serde::{Deserialize, Serialize}; -use target_lexicon::{PointerWidth, Triple}; - -/// The type of an SSA value. -/// -/// The `INVALID` type isn't a real type, and is used as a placeholder in the IR where a type -/// field is present put no type is needed, such as the controlling type variable for a -/// non-polymorphic instruction. -/// -/// Basic integer types: `I8`, `I16`, `I32`, `I64`, and `I128`. These types are sign-agnostic. -/// -/// Basic floating point types: `F32` and `F64`. IEEE single and double precision. -/// -/// Boolean types: `B1`, `B8`, `B16`, `B32`, `B64`, and `B128`. These all encode 'true' or 'false'. The -/// larger types use redundant bits. -/// -/// SIMD vector types have power-of-two lanes, up to 256. Lanes can be any int/float/bool type. -/// -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))] -pub struct Type(u8); - -/// Not a valid type. Can't be loaded or stored. Can't be part of a SIMD vector. -pub const INVALID: Type = Type(0); - -// Include code generated by `cranelift-codegen/meta/gen_types.rs`. This file contains constant -// definitions for all the scalar types as well as common vector types for 64, 128, 256, and -// 512-bit SIMD vectors. -include!(concat!(env!("OUT_DIR"), "/types.rs")); - -impl Type { - /// Get the lane type of this SIMD vector type. - /// - /// A lane type is the same as a SIMD vector type with one lane, so it returns itself. - pub fn lane_type(self) -> Self { - if self.0 < constants::VECTOR_BASE { - self - } else { - Self(constants::LANE_BASE | (self.0 & 0x0f)) - } - } - - /// The type transformation that returns the lane type of a type variable; it is just a - /// renaming of lane_type() to be used in context where we think in terms of type variable - /// transformations. - pub fn lane_of(self) -> Self { - self.lane_type() - } - - /// Get log_2 of the number of bits in a lane. - pub fn log2_lane_bits(self) -> u8 { - match self.lane_type() { - B1 => 0, - B8 | I8 => 3, - B16 | I16 => 4, - B32 | I32 | F32 | R32 => 5, - B64 | I64 | F64 | R64 => 6, - B128 | I128 => 7, - _ => 0, - } - } - - /// Get the number of bits in a lane. - pub fn lane_bits(self) -> u8 { - match self.lane_type() { - B1 => 1, - B8 | I8 => 8, - B16 | I16 => 16, - B32 | I32 | F32 | R32 => 32, - B64 | I64 | F64 | R64 => 64, - B128 | I128 => 128, - _ => 0, - } - } - - /// Get the (minimum, maximum) values represented by each lane in the type. - /// Note that these are returned as unsigned 'bit patterns'. - pub fn bounds(self, signed: bool) -> (u128, u128) { - if signed { - match self.lane_type() { - I8 => (i8::MIN as u128, i8::MAX as u128), - I16 => (i16::MIN as u128, i16::MAX as u128), - I32 => (i32::MIN as u128, i32::MAX as u128), - I64 => (i64::MIN as u128, i64::MAX as u128), - I128 => (i128::MIN as u128, i128::MAX as u128), - _ => unimplemented!(), - } - } else { - match self.lane_type() { - I8 => (u8::MIN as u128, u8::MAX as u128), - I16 => (u16::MIN as u128, u16::MAX as u128), - I32 => (u32::MIN as u128, u32::MAX as u128), - I64 => (u64::MIN as u128, u64::MAX as u128), - I128 => (u128::MIN, u128::MAX), - _ => unimplemented!(), - } - } - } - - /// Get an integer type with the requested number of bits. - /// - /// For the same thing but in *bytes*, use [`Self::int_with_byte_size`]. - pub fn int(bits: u16) -> Option { - match bits { - 8 => Some(I8), - 16 => Some(I16), - 32 => Some(I32), - 64 => Some(I64), - 128 => Some(I128), - _ => None, - } - } - - /// Get an integer type with the requested number of bytes. - /// - /// For the same thing but in *bits*, use [`Self::int`]. - pub fn int_with_byte_size(bytes: u16) -> Option { - Self::int(bytes.checked_mul(8)?) - } - - /// Get a type with the same number of lanes as `self`, but using `lane` as the lane type. - fn replace_lanes(self, lane: Self) -> Self { - debug_assert!(lane.is_lane() && !self.is_special()); - Self((lane.0 & 0x0f) | (self.0 & 0xf0)) - } - - /// Get a type with the same number of lanes as this type, but with the lanes replaced by - /// booleans of the same size. - /// - /// Lane types are treated as vectors with one lane, so they are converted to the multi-bit - /// boolean types. - pub fn as_bool_pedantic(self) -> Self { - // Replace the low 4 bits with the boolean version, preserve the high 4 bits. - self.replace_lanes(match self.lane_type() { - B8 | I8 => B8, - B16 | I16 => B16, - B32 | I32 | F32 => B32, - B64 | I64 | F64 => B64, - R32 | R64 => panic!("Reference types should not convert to bool"), - B128 | I128 => B128, - _ => B1, - }) - } - - /// Get a type with the same number of lanes as this type, but with the lanes replaced by - /// booleans of the same size. - /// - /// Scalar types are all converted to `b1` which is usually what you want. - pub fn as_bool(self) -> Self { - if !self.is_vector() { - B1 - } else { - self.as_bool_pedantic() - } - } - - /// Get a type with the same number of lanes as this type, but with the lanes replaced by - /// integers of the same size. - /// - /// Scalar types follow this same rule, but `b1` is converted into `i8` - pub fn as_int(self) -> Self { - self.replace_lanes(match self.lane_type() { - I8 | B1 | B8 => I8, - I16 | B16 => I16, - I32 | B32 => I32, - I64 | B64 => I64, - I128 | B128 => I128, - _ => unimplemented!(), - }) - } - - /// Get a type with the same number of lanes as this type, but with lanes that are half the - /// number of bits. - pub fn half_width(self) -> Option { - Some(self.replace_lanes(match self.lane_type() { - I16 => I8, - I32 => I16, - I64 => I32, - I128 => I64, - F64 => F32, - B16 => B8, - B32 => B16, - B64 => B32, - B128 => B64, - _ => return None, - })) - } - - /// Get a type with the same number of lanes as this type, but with lanes that are twice the - /// number of bits. - pub fn double_width(self) -> Option { - Some(self.replace_lanes(match self.lane_type() { - I8 => I16, - I16 => I32, - I32 => I64, - I64 => I128, - F32 => F64, - B8 => B16, - B16 => B32, - B32 => B64, - B64 => B128, - _ => return None, - })) - } - - /// Is this the INVALID type? - pub fn is_invalid(self) -> bool { - self == INVALID - } - - /// Is this a special type? - pub fn is_special(self) -> bool { - self.0 < constants::LANE_BASE - } - - /// Is this a lane type? - /// - /// This is a scalar type that can also appear as the lane type of a SIMD vector. - pub fn is_lane(self) -> bool { - constants::LANE_BASE <= self.0 && self.0 < constants::VECTOR_BASE - } - - /// Is this a SIMD vector type? - /// - /// A vector type has 2 or more lanes. - pub fn is_vector(self) -> bool { - self.0 >= constants::VECTOR_BASE - } - - /// Is this a scalar boolean type? - pub fn is_bool(self) -> bool { - match self { - B1 | B8 | B16 | B32 | B64 | B128 => true, - _ => false, - } - } - - /// Is this a vector boolean type? - pub fn is_bool_vector(self) -> bool { - self.is_vector() && self.lane_type().is_bool() - } - - /// Is this a scalar integer type? - pub fn is_int(self) -> bool { - match self { - I8 | I16 | I32 | I64 | I128 => true, - _ => false, - } - } - - /// Is this a scalar floating point type? - pub fn is_float(self) -> bool { - match self { - F32 | F64 => true, - _ => false, - } - } - - /// Is this a CPU flags type? - pub fn is_flags(self) -> bool { - match self { - IFLAGS | FFLAGS => true, - _ => false, - } - } - - /// Is this a ref type? - pub fn is_ref(self) -> bool { - match self { - R32 | R64 => true, - _ => false, - } - } - - /// Get log_2 of the number of lanes in this SIMD vector type. - /// - /// All SIMD types have a lane count that is a power of two and no larger than 256, so this - /// will be a number in the range 0-8. - /// - /// A scalar type is the same as a SIMD vector type with one lane, so it returns 0. - pub fn log2_lane_count(self) -> u8 { - self.0.saturating_sub(constants::LANE_BASE) >> 4 - } - - /// Get the number of lanes in this SIMD vector type. - /// - /// A scalar type is the same as a SIMD vector type with one lane, so it returns 1. - pub fn lane_count(self) -> u16 { - 1 << self.log2_lane_count() - } - - /// Get the total number of bits used to represent this type. - pub fn bits(self) -> u16 { - u16::from(self.lane_bits()) * self.lane_count() - } - - /// Get the number of bytes used to store this type in memory. - pub fn bytes(self) -> u32 { - (u32::from(self.bits()) + 7) / 8 - } - - /// Get a SIMD vector type with `n` times more lanes than this one. - /// - /// If this is a scalar type, this produces a SIMD type with this as a lane type and `n` lanes. - /// - /// If this is already a SIMD vector type, this produces a SIMD vector type with `n * - /// self.lane_count()` lanes. - pub fn by(self, n: u16) -> Option { - if self.lane_bits() == 0 || !n.is_power_of_two() { - return None; - } - let log2_lanes: u32 = n.trailing_zeros(); - let new_type = u32::from(self.0) + (log2_lanes << 4); - if new_type < 0x100 { - Some(Self(new_type as u8)) - } else { - None - } - } - - /// Get a SIMD vector with half the number of lanes. - /// - /// There is no `double_vector()` method. Use `t.by(2)` instead. - pub fn half_vector(self) -> Option { - if self.is_vector() { - Some(Self(self.0 - 0x10)) - } else { - None - } - } - - /// Split the lane width in half and double the number of lanes to maintain the same bit-width. - /// - /// If this is a scalar type of `n` bits, it produces a SIMD vector type of `(n/2)x2`. - pub fn split_lanes(self) -> Option { - match self.half_width() { - Some(half_width) => half_width.by(2), - None => None, - } - } - - /// Merge lanes to half the number of lanes and double the lane width to maintain the same - /// bit-width. - /// - /// If this is a scalar type, it will return `None`. - pub fn merge_lanes(self) -> Option { - match self.double_width() { - Some(double_width) => double_width.half_vector(), - None => None, - } - } - - /// Index of this type, for use with hash tables etc. - pub fn index(self) -> usize { - usize::from(self.0) - } - - /// True iff: - /// - /// 1. `self.lane_count() == other.lane_count()` and - /// 2. `self.lane_bits() >= other.lane_bits()` - pub fn wider_or_equal(self, other: Self) -> bool { - self.lane_count() == other.lane_count() && self.lane_bits() >= other.lane_bits() - } - - /// Return the pointer type for the given target triple. - pub fn triple_pointer_type(triple: &Triple) -> Self { - match triple.pointer_width() { - Ok(PointerWidth::U16) => I16, - Ok(PointerWidth::U32) => I32, - Ok(PointerWidth::U64) => I64, - Err(()) => panic!("unable to determine architecture pointer width"), - } - } - - /// Coerces boolean types (scalar and vectors) into their integer counterparts. - /// B1 is converted into I8. - pub fn coerce_bools_to_ints(self) -> Self { - let is_scalar_bool = self.is_bool(); - let is_vector_bool = self.is_vector() && self.lane_type().is_bool(); - - if is_scalar_bool || is_vector_bool { - self.as_int() - } else { - self - } - } -} - -impl Display for Type { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.is_bool() { - write!(f, "b{}", self.lane_bits()) - } else if self.is_int() { - write!(f, "i{}", self.lane_bits()) - } else if self.is_float() { - write!(f, "f{}", self.lane_bits()) - } else if self.is_vector() { - write!(f, "{}x{}", self.lane_type(), self.lane_count()) - } else if self.is_ref() { - write!(f, "r{}", self.lane_bits()) - } else { - f.write_str(match *self { - IFLAGS => "iflags", - FFLAGS => "fflags", - INVALID => panic!("INVALID encountered"), - _ => panic!("Unknown Type(0x{:x})", self.0), - }) - } - } -} - -impl Debug for Type { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - if self.is_bool() { - write!(f, "types::B{}", self.lane_bits()) - } else if self.is_int() { - write!(f, "types::I{}", self.lane_bits()) - } else if self.is_float() { - write!(f, "types::F{}", self.lane_bits()) - } else if self.is_vector() { - write!(f, "{:?}X{}", self.lane_type(), self.lane_count()) - } else if self.is_ref() { - write!(f, "types::R{}", self.lane_bits()) - } else { - match *self { - INVALID => write!(f, "types::INVALID"), - IFLAGS => write!(f, "types::IFLAGS"), - FFLAGS => write!(f, "types::FFLAGS"), - _ => write!(f, "Type(0x{:x})", self.0), - } - } - } -} - -impl Default for Type { - fn default() -> Self { - INVALID - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloc::string::ToString; - - #[test] - fn basic_scalars() { - assert_eq!(INVALID, INVALID.lane_type()); - assert_eq!(0, INVALID.bits()); - assert_eq!(IFLAGS, IFLAGS.lane_type()); - assert_eq!(0, IFLAGS.bits()); - assert_eq!(FFLAGS, FFLAGS.lane_type()); - assert_eq!(0, FFLAGS.bits()); - assert_eq!(B1, B1.lane_type()); - assert_eq!(B8, B8.lane_type()); - assert_eq!(B16, B16.lane_type()); - assert_eq!(B32, B32.lane_type()); - assert_eq!(B64, B64.lane_type()); - assert_eq!(B128, B128.lane_type()); - assert_eq!(I8, I8.lane_type()); - assert_eq!(I16, I16.lane_type()); - assert_eq!(I32, I32.lane_type()); - assert_eq!(I64, I64.lane_type()); - assert_eq!(I128, I128.lane_type()); - assert_eq!(F32, F32.lane_type()); - assert_eq!(F64, F64.lane_type()); - assert_eq!(B1, B1.by(8).unwrap().lane_type()); - assert_eq!(I32, I32X4.lane_type()); - assert_eq!(F64, F64X2.lane_type()); - assert_eq!(R32, R32.lane_type()); - assert_eq!(R64, R64.lane_type()); - - assert_eq!(INVALID.lane_bits(), 0); - assert_eq!(IFLAGS.lane_bits(), 0); - assert_eq!(FFLAGS.lane_bits(), 0); - assert_eq!(B1.lane_bits(), 1); - assert_eq!(B8.lane_bits(), 8); - assert_eq!(B16.lane_bits(), 16); - assert_eq!(B32.lane_bits(), 32); - assert_eq!(B64.lane_bits(), 64); - assert_eq!(B128.lane_bits(), 128); - assert_eq!(I8.lane_bits(), 8); - assert_eq!(I16.lane_bits(), 16); - assert_eq!(I32.lane_bits(), 32); - assert_eq!(I64.lane_bits(), 64); - assert_eq!(I128.lane_bits(), 128); - assert_eq!(F32.lane_bits(), 32); - assert_eq!(F64.lane_bits(), 64); - assert_eq!(R32.lane_bits(), 32); - assert_eq!(R64.lane_bits(), 64); - } - - #[test] - fn typevar_functions() { - assert_eq!(INVALID.half_width(), None); - assert_eq!(INVALID.half_width(), None); - assert_eq!(FFLAGS.half_width(), None); - assert_eq!(B1.half_width(), None); - assert_eq!(B8.half_width(), None); - assert_eq!(B16.half_width(), Some(B8)); - assert_eq!(B32.half_width(), Some(B16)); - assert_eq!(B64.half_width(), Some(B32)); - assert_eq!(B128.half_width(), Some(B64)); - assert_eq!(I8.half_width(), None); - assert_eq!(I16.half_width(), Some(I8)); - assert_eq!(I32.half_width(), Some(I16)); - assert_eq!(I32X4.half_width(), Some(I16X4)); - assert_eq!(I64.half_width(), Some(I32)); - assert_eq!(I128.half_width(), Some(I64)); - assert_eq!(F32.half_width(), None); - assert_eq!(F64.half_width(), Some(F32)); - - assert_eq!(INVALID.double_width(), None); - assert_eq!(IFLAGS.double_width(), None); - assert_eq!(FFLAGS.double_width(), None); - assert_eq!(B1.double_width(), None); - assert_eq!(B8.double_width(), Some(B16)); - assert_eq!(B16.double_width(), Some(B32)); - assert_eq!(B32.double_width(), Some(B64)); - assert_eq!(B64.double_width(), Some(B128)); - assert_eq!(B128.double_width(), None); - assert_eq!(I8.double_width(), Some(I16)); - assert_eq!(I16.double_width(), Some(I32)); - assert_eq!(I32.double_width(), Some(I64)); - assert_eq!(I32X4.double_width(), Some(I64X4)); - assert_eq!(I64.double_width(), Some(I128)); - assert_eq!(I128.double_width(), None); - assert_eq!(F32.double_width(), Some(F64)); - assert_eq!(F64.double_width(), None); - } - - #[test] - fn vectors() { - let big = F64.by(256).unwrap(); - assert_eq!(big.lane_bits(), 64); - assert_eq!(big.lane_count(), 256); - assert_eq!(big.bits(), 64 * 256); - - assert_eq!(big.half_vector().unwrap().to_string(), "f64x128"); - assert_eq!(B1.by(2).unwrap().half_vector().unwrap().to_string(), "b1"); - assert_eq!(I32.half_vector(), None); - assert_eq!(INVALID.half_vector(), None); - - // Check that the generated constants match the computed vector types. - assert_eq!(I32.by(4), Some(I32X4)); - assert_eq!(F64.by(8), Some(F64X8)); - } - - #[test] - fn format_scalars() { - assert_eq!(IFLAGS.to_string(), "iflags"); - assert_eq!(FFLAGS.to_string(), "fflags"); - assert_eq!(B1.to_string(), "b1"); - assert_eq!(B8.to_string(), "b8"); - assert_eq!(B16.to_string(), "b16"); - assert_eq!(B32.to_string(), "b32"); - assert_eq!(B64.to_string(), "b64"); - assert_eq!(B128.to_string(), "b128"); - assert_eq!(I8.to_string(), "i8"); - assert_eq!(I16.to_string(), "i16"); - assert_eq!(I32.to_string(), "i32"); - assert_eq!(I64.to_string(), "i64"); - assert_eq!(I128.to_string(), "i128"); - assert_eq!(F32.to_string(), "f32"); - assert_eq!(F64.to_string(), "f64"); - assert_eq!(R32.to_string(), "r32"); - assert_eq!(R64.to_string(), "r64"); - } - - #[test] - fn format_vectors() { - assert_eq!(B1.by(8).unwrap().to_string(), "b1x8"); - assert_eq!(B8.by(1).unwrap().to_string(), "b8"); - assert_eq!(B16.by(256).unwrap().to_string(), "b16x256"); - assert_eq!(B32.by(4).unwrap().by(2).unwrap().to_string(), "b32x8"); - assert_eq!(B64.by(8).unwrap().to_string(), "b64x8"); - assert_eq!(I8.by(64).unwrap().to_string(), "i8x64"); - assert_eq!(F64.by(2).unwrap().to_string(), "f64x2"); - assert_eq!(I8.by(3), None); - assert_eq!(I8.by(512), None); - assert_eq!(INVALID.by(4), None); - } - - #[test] - fn as_bool() { - assert_eq!(I32X4.as_bool(), B32X4); - assert_eq!(I32.as_bool(), B1); - assert_eq!(I32X4.as_bool_pedantic(), B32X4); - assert_eq!(I32.as_bool_pedantic(), B32); - } - - #[test] - fn as_int() { - assert_eq!(B32X4.as_int(), I32X4); - assert_eq!(B8X8.as_int(), I8X8); - assert_eq!(B1.as_int(), I8); - assert_eq!(B8.as_int(), I8); - assert_eq!(B128.as_int(), I128); - } - - #[test] - fn int_from_size() { - assert_eq!(Type::int(0), None); - assert_eq!(Type::int(8), Some(I8)); - assert_eq!(Type::int(33), None); - assert_eq!(Type::int(64), Some(I64)); - - assert_eq!(Type::int_with_byte_size(0), None); - assert_eq!(Type::int_with_byte_size(2), Some(I16)); - assert_eq!(Type::int_with_byte_size(6), None); - assert_eq!(Type::int_with_byte_size(16), Some(I128)); - - // Ensure `int_with_byte_size` handles overflow properly - let evil = 0xE001_u16; - assert_eq!(evil.wrapping_mul(8), 8, "check the constant is correct"); - assert_eq!(Type::int_with_byte_size(evil), None); - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/abi.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/abi.rs deleted file mode 100644 index 3fbfc8356..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/abi.rs +++ /dev/null @@ -1,1346 +0,0 @@ -//! Implementation of a standard AArch64 ABI. - -use crate::ir; -use crate::ir::types; -use crate::ir::types::*; -use crate::ir::MemFlags; -use crate::ir::Opcode; -use crate::ir::{ExternalName, LibCall}; -use crate::isa; -use crate::isa::aarch64::{inst::EmitState, inst::*}; -use crate::isa::unwind::UnwindInst; -use crate::machinst::*; -use crate::settings; -use crate::{CodegenError, CodegenResult}; -use alloc::boxed::Box; -use alloc::vec::Vec; -use regalloc::{RealReg, Reg, RegClass, Set, Writable}; -use smallvec::{smallvec, SmallVec}; - -// We use a generic implementation that factors out AArch64 and x64 ABI commonalities, because -// these ABIs are very similar. - -/// Support for the AArch64 ABI from the callee side (within a function body). -pub(crate) type AArch64ABICallee = ABICalleeImpl; - -/// Support for the AArch64 ABI from the caller side (at a callsite). -pub(crate) type AArch64ABICaller = ABICallerImpl; - -// Spidermonkey specific ABI convention. - -/// This is SpiderMonkey's `WasmTableCallSigReg`. -static BALDRDASH_SIG_REG: u8 = 10; - -/// This is SpiderMonkey's `WasmTlsReg`. -static BALDRDASH_TLS_REG: u8 = 23; - -/// Offset in stack-arg area to callee-TLS slot in Baldrdash-2020 calling convention. -static BALDRDASH_CALLEE_TLS_OFFSET: i64 = 0; -/// Offset in stack-arg area to caller-TLS slot in Baldrdash-2020 calling convention. -static BALDRDASH_CALLER_TLS_OFFSET: i64 = 8; - -// These two lists represent the registers the JIT may *not* use at any point in generated code. -// -// So these are callee-preserved from the JIT's point of view, and every register not in this list -// has to be caller-preserved by definition. -// -// Keep these lists in sync with the NonAllocatableMask set in Spidermonkey's -// Architecture-arm64.cpp. - -// Indexed by physical register number. -#[rustfmt::skip] -static BALDRDASH_JIT_CALLEE_SAVED_GPR: &[bool] = &[ - /* 0 = */ false, false, false, false, false, false, false, false, - /* 8 = */ false, false, false, false, false, false, false, false, - /* 16 = */ true /* x16 / ip1 */, true /* x17 / ip2 */, true /* x18 / TLS */, false, - /* 20 = */ false, false, false, false, - /* 24 = */ false, false, false, false, - // There should be 28, the pseudo stack pointer in this list, however the wasm stubs trash it - // gladly right now. - /* 28 = */ false, false, true /* x30 = FP */, false /* x31 = SP */ -]; - -#[rustfmt::skip] -static BALDRDASH_JIT_CALLEE_SAVED_FPU: &[bool] = &[ - /* 0 = */ false, false, false, false, false, false, false, false, - /* 8 = */ false, false, false, false, false, false, false, false, - /* 16 = */ false, false, false, false, false, false, false, false, - /* 24 = */ false, false, false, false, false, false, false, true /* v31 / d31 */ -]; - -/// This is the limit for the size of argument and return-value areas on the -/// stack. We place a reasonable limit here to avoid integer overflow issues -/// with 32-bit arithmetic: for now, 128 MB. -static STACK_ARG_RET_SIZE_LIMIT: u64 = 128 * 1024 * 1024; - -/// Try to fill a Baldrdash register, returning it if it was found. -fn try_fill_baldrdash_reg(call_conv: isa::CallConv, param: &ir::AbiParam) -> Option { - if call_conv.extends_baldrdash() { - match ¶m.purpose { - &ir::ArgumentPurpose::VMContext => { - // This is SpiderMonkey's `WasmTlsReg`. - Some(ABIArg::reg( - xreg(BALDRDASH_TLS_REG).to_real_reg(), - ir::types::I64, - param.extension, - param.purpose, - )) - } - &ir::ArgumentPurpose::SignatureId => { - // This is SpiderMonkey's `WasmTableCallSigReg`. - Some(ABIArg::reg( - xreg(BALDRDASH_SIG_REG).to_real_reg(), - ir::types::I64, - param.extension, - param.purpose, - )) - } - &ir::ArgumentPurpose::CalleeTLS => { - // This is SpiderMonkey's callee TLS slot in the extended frame of Wasm's ABI-2020. - assert!(call_conv == isa::CallConv::Baldrdash2020); - Some(ABIArg::stack( - BALDRDASH_CALLEE_TLS_OFFSET, - ir::types::I64, - ir::ArgumentExtension::None, - param.purpose, - )) - } - &ir::ArgumentPurpose::CallerTLS => { - // This is SpiderMonkey's caller TLS slot in the extended frame of Wasm's ABI-2020. - assert!(call_conv == isa::CallConv::Baldrdash2020); - Some(ABIArg::stack( - BALDRDASH_CALLER_TLS_OFFSET, - ir::types::I64, - ir::ArgumentExtension::None, - param.purpose, - )) - } - _ => None, - } - } else { - None - } -} - -impl Into for StackAMode { - fn into(self) -> AMode { - match self { - StackAMode::FPOffset(off, ty) => AMode::FPOffset(off, ty), - StackAMode::NominalSPOffset(off, ty) => AMode::NominalSPOffset(off, ty), - StackAMode::SPOffset(off, ty) => AMode::SPOffset(off, ty), - } - } -} - -// Returns the size of stack space needed to store the -// `int_reg` and `vec_reg`. -fn saved_reg_stack_size( - call_conv: isa::CallConv, - int_reg: &[Writable], - vec_reg: &[Writable], -) -> (usize, usize) { - // Round up to multiple of 2, to keep 16-byte stack alignment. - let int_save_bytes = (int_reg.len() + (int_reg.len() & 1)) * 8; - // The Baldrdash ABIs require saving and restoring the whole 16-byte - // SIMD & FP registers, so the necessary stack space is always a - // multiple of the mandatory 16-byte stack alignment. However, the - // Procedure Call Standard for the Arm 64-bit Architecture (AAPCS64, - // including several related ABIs such as the one used by Windows) - // mandates saving only the bottom 8 bytes of the vector registers, - // so in that case we round up the number of registers to ensure proper - // stack alignment (similarly to the situation with `int_reg`). - let vec_reg_size = if call_conv.extends_baldrdash() { 16 } else { 8 }; - let vec_save_padding = if call_conv.extends_baldrdash() { - 0 - } else { - vec_reg.len() & 1 - }; - let vec_save_bytes = (vec_reg.len() + vec_save_padding) * vec_reg_size; - - (int_save_bytes, vec_save_bytes) -} - -/// AArch64-specific ABI behavior. This struct just serves as an implementation -/// point for the trait; it is never actually instantiated. -pub(crate) struct AArch64MachineDeps; - -impl ABIMachineSpec for AArch64MachineDeps { - type I = Inst; - - fn word_bits() -> u32 { - 64 - } - - /// Return required stack alignment in bytes. - fn stack_align(_call_conv: isa::CallConv) -> u32 { - 16 - } - - fn compute_arg_locs( - call_conv: isa::CallConv, - _flags: &settings::Flags, - params: &[ir::AbiParam], - args_or_rets: ArgsOrRets, - add_ret_area_ptr: bool, - ) -> CodegenResult<(Vec, i64, Option)> { - let is_apple_cc = call_conv.extends_apple_aarch64(); - let is_baldrdash = call_conv.extends_baldrdash(); - let has_baldrdash_tls = call_conv == isa::CallConv::Baldrdash2020; - - // See AArch64 ABI (https://github.com/ARM-software/abi-aa/blob/2021Q1/aapcs64/aapcs64.rst#64parameter-passing), sections 6.4. - // - // MacOS aarch64 is slightly different, see also - // https://developer.apple.com/documentation/xcode/writing_arm64_code_for_apple_platforms. - // We are diverging from the MacOS aarch64 implementation in the - // following ways: - // - sign- and zero- extensions of data types less than 32 bits are not - // implemented yet. - // - we align the arguments stack space to a 16-bytes boundary, while - // the MacOS allows aligning only on 8 bytes. In practice it means we're - // slightly overallocating when calling, which is fine, and doesn't - // break our other invariants that the stack is always allocated in - // 16-bytes chunks. - - let mut next_xreg = 0; - let mut next_vreg = 0; - let mut next_stack: u64 = 0; - let mut ret = vec![]; - - if args_or_rets == ArgsOrRets::Args && has_baldrdash_tls { - // Baldrdash ABI-2020 always has two stack-arg slots reserved, for the callee and - // caller TLS-register values, respectively. - next_stack = 16; - } - - let (max_per_class_reg_vals, mut remaining_reg_vals) = match args_or_rets { - ArgsOrRets::Args => (8, 16), // x0-x7 and v0-v7 - - // Note on return values: on the regular ABI, we may return values - // in 8 registers for V128 and I64 registers independently of the - // number of register values returned in the other class. That is, - // we can return values in up to 8 integer and - // 8 vector registers at once. - // - // In Baldrdash and Wasmtime, we can only use one register for - // return value for all the register classes. That is, we can't - // return values in both one integer and one vector register; only - // one return value may be in a register. - ArgsOrRets::Rets => { - if is_baldrdash || call_conv.extends_wasmtime() { - (1, 1) // x0 or v0, but not both - } else { - (8, 16) // x0-x7 and v0-v7 - } - } - }; - - for i in 0..params.len() { - // Process returns backward, according to the SpiderMonkey ABI (which we - // adopt internally if `is_baldrdash` is set). - let param = match (args_or_rets, is_baldrdash) { - (ArgsOrRets::Args, _) => ¶ms[i], - (ArgsOrRets::Rets, false) => ¶ms[i], - (ArgsOrRets::Rets, true) => ¶ms[params.len() - 1 - i], - }; - - // Validate "purpose". - match ¶m.purpose { - &ir::ArgumentPurpose::VMContext - | &ir::ArgumentPurpose::Normal - | &ir::ArgumentPurpose::StackLimit - | &ir::ArgumentPurpose::SignatureId - | &ir::ArgumentPurpose::CallerTLS - | &ir::ArgumentPurpose::CalleeTLS - | &ir::ArgumentPurpose::StructReturn - | &ir::ArgumentPurpose::StructArgument(_) => {} - _ => panic!( - "Unsupported argument purpose {:?} in signature: {:?}", - param.purpose, params - ), - } - - assert!( - legal_type_for_machine(param.value_type), - "Invalid type for AArch64: {:?}", - param.value_type - ); - - let (rcs, reg_types) = Inst::rc_for_type(param.value_type)?; - - if let Some(param) = try_fill_baldrdash_reg(call_conv, param) { - assert!(rcs[0] == RegClass::I64); - ret.push(param); - continue; - } - - if let ir::ArgumentPurpose::StructArgument(size) = param.purpose { - let offset = next_stack as i64; - let size = size as u64; - assert!(size % 8 == 0, "StructArgument size is not properly aligned"); - next_stack += size; - ret.push(ABIArg::StructArg { - offset, - size, - purpose: param.purpose, - }); - continue; - } - - // Handle multi register params - // - // See AArch64 ABI (https://github.com/ARM-software/abi-aa/blob/2021Q1/aapcs64/aapcs64.rst#642parameter-passing-rules), (Section 6.4.2 Stage C). - // - // For arguments with alignment of 16 we round up the the register number - // to the next even value. So we can never allocate for example an i128 - // to X1 and X2, we have to skip one register and do X2, X3 - // (Stage C.8) - // Note: The Apple ABI deviates a bit here. They don't respect Stage C.8 - // and will happily allocate a i128 to X1 and X2 - // - // For integer types with alignment of 16 we also have the additional - // restriction of passing the lower half in Xn and the upper half in Xn+1 - // (Stage C.9) - // - // For examples of how LLVM handles this: https://godbolt.org/z/bhd3vvEfh - // - // On the Apple ABI it is unspecified if we can spill half the value into the stack - // i.e load the lower half into x7 and the upper half into the stack - // LLVM does not seem to do this, so we are going to replicate that behaviour - let is_multi_reg = rcs.len() >= 2; - if is_multi_reg { - assert!( - rcs.len() == 2, - "Unable to handle multi reg params with more than 2 regs" - ); - assert!( - rcs == &[RegClass::I64, RegClass::I64], - "Unable to handle non i64 regs" - ); - - let reg_class_space = max_per_class_reg_vals - next_xreg; - let reg_space = remaining_reg_vals; - - if reg_space >= 2 && reg_class_space >= 2 { - // The aarch64 ABI does not allow us to start a split argument - // at an odd numbered register. So we need to skip one register - // - // TODO: The Fast ABI should probably not skip the register - if !is_apple_cc && next_xreg % 2 != 0 { - next_xreg += 1; - } - - let lower_reg = xreg(next_xreg); - let upper_reg = xreg(next_xreg + 1); - - ret.push(ABIArg::Slots { - slots: vec![ - ABIArgSlot::Reg { - reg: lower_reg.to_real_reg(), - ty: param.value_type, - extension: param.extension, - }, - ABIArgSlot::Reg { - reg: upper_reg.to_real_reg(), - ty: param.value_type, - extension: param.extension, - }, - ], - purpose: param.purpose, - }); - - next_xreg += 2; - remaining_reg_vals -= 2; - continue; - } - } else { - // Single Register parameters - let rc = rcs[0]; - let next_reg = match rc { - RegClass::I64 => &mut next_xreg, - RegClass::V128 => &mut next_vreg, - _ => panic!("Invalid register class: {:?}", rc), - }; - - if *next_reg < max_per_class_reg_vals && remaining_reg_vals > 0 { - let reg = match rc { - RegClass::I64 => xreg(*next_reg), - RegClass::V128 => vreg(*next_reg), - _ => unreachable!(), - }; - ret.push(ABIArg::reg( - reg.to_real_reg(), - param.value_type, - param.extension, - param.purpose, - )); - *next_reg += 1; - remaining_reg_vals -= 1; - continue; - } - } - - // Spill to the stack - - // Compute the stack slot's size. - let size = (ty_bits(param.value_type) / 8) as u64; - - let size = if is_apple_cc - || (call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets) - { - // MacOS aarch64 and Wasmtime allow stack slots with - // sizes less than 8 bytes. They still need to be - // properly aligned on their natural data alignment, - // though. - size - } else { - // Every arg takes a minimum slot of 8 bytes. (16-byte stack - // alignment happens separately after all args.) - std::cmp::max(size, 8) - }; - - // Align the stack slot. - debug_assert!(size.is_power_of_two()); - next_stack = align_to(next_stack, size); - - let slots = reg_types - .iter() - .copied() - // Build the stack locations from each slot - .scan(next_stack, |next_stack, ty| { - let slot_offset = *next_stack as i64; - *next_stack += (ty_bits(ty) / 8) as u64; - - Some((ty, slot_offset)) - }) - .map(|(ty, offset)| ABIArgSlot::Stack { - offset, - ty, - extension: param.extension, - }) - .collect(); - - ret.push(ABIArg::Slots { - slots, - purpose: param.purpose, - }); - - next_stack += size; - } - - if args_or_rets == ArgsOrRets::Rets && is_baldrdash { - ret.reverse(); - } - - let extra_arg = if add_ret_area_ptr { - debug_assert!(args_or_rets == ArgsOrRets::Args); - if next_xreg < max_per_class_reg_vals && remaining_reg_vals > 0 { - ret.push(ABIArg::reg( - xreg(next_xreg).to_real_reg(), - I64, - ir::ArgumentExtension::None, - ir::ArgumentPurpose::Normal, - )); - } else { - ret.push(ABIArg::stack( - next_stack as i64, - I64, - ir::ArgumentExtension::None, - ir::ArgumentPurpose::Normal, - )); - next_stack += 8; - } - Some(ret.len() - 1) - } else { - None - }; - - next_stack = align_to(next_stack, 16); - - // To avoid overflow issues, limit the arg/return size to something - // reasonable -- here, 128 MB. - if next_stack > STACK_ARG_RET_SIZE_LIMIT { - return Err(CodegenError::ImplLimitExceeded); - } - - Ok((ret, next_stack as i64, extra_arg)) - } - - fn fp_to_arg_offset(call_conv: isa::CallConv, flags: &settings::Flags) -> i64 { - if call_conv.extends_baldrdash() { - let num_words = flags.baldrdash_prologue_words() as i64; - debug_assert!(num_words > 0, "baldrdash must set baldrdash_prologue_words"); - debug_assert_eq!(num_words % 2, 0, "stack must be 16-aligned"); - num_words * 8 - } else { - 16 // frame pointer + return address. - } - } - - fn gen_load_stack(mem: StackAMode, into_reg: Writable, ty: Type) -> Inst { - Inst::gen_load(into_reg, mem.into(), ty, MemFlags::trusted()) - } - - fn gen_store_stack(mem: StackAMode, from_reg: Reg, ty: Type) -> Inst { - Inst::gen_store(mem.into(), from_reg, ty, MemFlags::trusted()) - } - - fn gen_move(to_reg: Writable, from_reg: Reg, ty: Type) -> Inst { - Inst::gen_move(to_reg, from_reg, ty) - } - - fn gen_extend( - to_reg: Writable, - from_reg: Reg, - signed: bool, - from_bits: u8, - to_bits: u8, - ) -> Inst { - assert!(from_bits < to_bits); - Inst::Extend { - rd: to_reg, - rn: from_reg, - signed, - from_bits, - to_bits, - } - } - - fn gen_ret() -> Inst { - Inst::Ret - } - - fn gen_add_imm(into_reg: Writable, from_reg: Reg, imm: u32) -> SmallInstVec { - let imm = imm as u64; - let mut insts = SmallVec::new(); - if let Some(imm12) = Imm12::maybe_from_u64(imm) { - insts.push(Inst::AluRRImm12 { - alu_op: ALUOp::Add, - size: OperandSize::Size64, - rd: into_reg, - rn: from_reg, - imm12, - }); - } else { - let scratch2 = writable_tmp2_reg(); - assert_ne!(scratch2.to_reg(), from_reg); - insts.extend(Inst::load_constant(scratch2, imm.into())); - insts.push(Inst::AluRRRExtend { - alu_op: ALUOp::Add, - size: OperandSize::Size64, - rd: into_reg, - rn: from_reg, - rm: scratch2.to_reg(), - extendop: ExtendOp::UXTX, - }); - } - insts - } - - fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec { - let mut insts = SmallVec::new(); - insts.push(Inst::AluRRRExtend { - alu_op: ALUOp::SubS, - size: OperandSize::Size64, - rd: writable_zero_reg(), - rn: stack_reg(), - rm: limit_reg, - extendop: ExtendOp::UXTX, - }); - insts.push(Inst::TrapIf { - trap_code: ir::TrapCode::StackOverflow, - // Here `Lo` == "less than" when interpreting the two - // operands as unsigned integers. - kind: CondBrKind::Cond(Cond::Lo), - }); - insts - } - - fn gen_epilogue_placeholder() -> Inst { - Inst::EpiloguePlaceholder - } - - fn gen_get_stack_addr(mem: StackAMode, into_reg: Writable, _ty: Type) -> Inst { - let mem = mem.into(); - Inst::LoadAddr { rd: into_reg, mem } - } - - fn get_stacklimit_reg() -> Reg { - spilltmp_reg() - } - - fn gen_load_base_offset(into_reg: Writable, base: Reg, offset: i32, ty: Type) -> Inst { - let mem = AMode::RegOffset(base, offset as i64, ty); - Inst::gen_load(into_reg, mem, ty, MemFlags::trusted()) - } - - fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Inst { - let mem = AMode::RegOffset(base, offset as i64, ty); - Inst::gen_store(mem, from_reg, ty, MemFlags::trusted()) - } - - fn gen_sp_reg_adjust(amount: i32) -> SmallInstVec { - if amount == 0 { - return SmallVec::new(); - } - - let (amount, is_sub) = if amount > 0 { - (amount as u64, false) - } else { - (-amount as u64, true) - }; - - let alu_op = if is_sub { ALUOp::Sub } else { ALUOp::Add }; - - let mut ret = SmallVec::new(); - if let Some(imm12) = Imm12::maybe_from_u64(amount) { - let adj_inst = Inst::AluRRImm12 { - alu_op, - size: OperandSize::Size64, - rd: writable_stack_reg(), - rn: stack_reg(), - imm12, - }; - ret.push(adj_inst); - } else { - let tmp = writable_spilltmp_reg(); - let const_inst = Inst::load_constant(tmp, amount); - let adj_inst = Inst::AluRRRExtend { - alu_op, - size: OperandSize::Size64, - rd: writable_stack_reg(), - rn: stack_reg(), - rm: tmp.to_reg(), - extendop: ExtendOp::UXTX, - }; - ret.extend(const_inst); - ret.push(adj_inst); - } - ret - } - - fn gen_nominal_sp_adj(offset: i32) -> Inst { - Inst::VirtualSPOffsetAdj { - offset: offset as i64, - } - } - - fn gen_debug_frame_info( - flags: &settings::Flags, - _isa_flags: &Vec, - ) -> SmallInstVec { - let mut insts = SmallVec::new(); - if flags.unwind_info() { - insts.push(Inst::Unwind { - inst: UnwindInst::Aarch64SetPointerAuth { - return_addresses: false, - }, - }); - } - insts - } - - fn gen_prologue_frame_setup(flags: &settings::Flags) -> SmallInstVec { - let mut insts = SmallVec::new(); - - // stp fp (x29), lr (x30), [sp, #-16]! - insts.push(Inst::StoreP64 { - rt: fp_reg(), - rt2: link_reg(), - mem: PairAMode::PreIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(-16, types::I64).unwrap(), - ), - flags: MemFlags::trusted(), - }); - - if flags.unwind_info() { - insts.push(Inst::Unwind { - inst: UnwindInst::PushFrameRegs { - offset_upward_to_caller_sp: 16, // FP, LR - }, - }); - } - - // mov fp (x29), sp. This uses the ADDI rd, rs, 0 form of `MOV` because - // the usual encoding (`ORR`) does not work with SP. - insts.push(Inst::AluRRImm12 { - alu_op: ALUOp::Add, - size: OperandSize::Size64, - rd: writable_fp_reg(), - rn: stack_reg(), - imm12: Imm12 { - bits: 0, - shift12: false, - }, - }); - insts - } - - fn gen_epilogue_frame_restore(_: &settings::Flags) -> SmallInstVec { - let mut insts = SmallVec::new(); - - // N.B.: sp is already adjusted to the appropriate place by the - // clobber-restore code (which also frees the fixed frame). Hence, there - // is no need for the usual `mov sp, fp` here. - - // `ldp fp, lr, [sp], #16` - insts.push(Inst::LoadP64 { - rt: writable_fp_reg(), - rt2: writable_link_reg(), - mem: PairAMode::PostIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(16, types::I64).unwrap(), - ), - flags: MemFlags::trusted(), - }); - insts - } - - fn gen_probestack(_: u32) -> SmallInstVec { - // TODO: implement if we ever require stack probes on an AArch64 host - // (unlikely unless Lucet is ported) - smallvec![] - } - - // Returns stack bytes used as well as instructions. Does not adjust - // nominal SP offset; abi_impl generic code will do that. - fn gen_clobber_save( - call_conv: isa::CallConv, - setup_frame: bool, - flags: &settings::Flags, - clobbered_callee_saves: &Vec>, - fixed_frame_storage_size: u32, - _outgoing_args_size: u32, - ) -> (u64, SmallVec<[Inst; 16]>) { - let mut clobbered_int = vec![]; - let mut clobbered_vec = vec![]; - - for ® in clobbered_callee_saves.iter() { - match reg.to_reg().get_class() { - RegClass::I64 => clobbered_int.push(reg), - RegClass::V128 => clobbered_vec.push(reg), - class => panic!("Unexpected RegClass: {:?}", class), - } - } - - let (int_save_bytes, vec_save_bytes) = - saved_reg_stack_size(call_conv, &clobbered_int, &clobbered_vec); - let total_save_bytes = int_save_bytes + vec_save_bytes; - let clobber_size = total_save_bytes as i32; - let mut insts = SmallVec::new(); - - if flags.unwind_info() && setup_frame { - // The *unwind* frame (but not the actual frame) starts at the - // clobbers, just below the saved FP/LR pair. - insts.push(Inst::Unwind { - inst: UnwindInst::DefineNewFrame { - offset_downward_to_clobbers: clobber_size as u32, - offset_upward_to_caller_sp: 16, // FP, LR - }, - }); - } - - // We use pre-indexed addressing modes here, rather than the possibly - // more efficient "subtract sp once then used fixed offsets" scheme, - // because (i) we cannot necessarily guarantee that the offset of a - // clobber-save slot will be within a SImm7Scaled (+504-byte) offset - // range of the whole frame including other slots, it is more complex to - // conditionally generate a two-stage SP adjustment (clobbers then fixed - // frame) otherwise, and generally we just want to maintain simplicity - // here for maintainability. Because clobbers are at the top of the - // frame, just below FP, all that is necessary is to use the pre-indexed - // "push" `[sp, #-16]!` addressing mode. - // - // `frame_offset` tracks offset above start-of-clobbers for unwind-info - // purposes. - let mut clobber_offset = clobber_size as u32; - let clobber_offset_change = 16; - let iter = clobbered_int.chunks_exact(2); - - if let [rd] = iter.remainder() { - let rd = rd.to_reg().to_reg(); - - debug_assert_eq!(rd.get_class(), RegClass::I64); - // str rd, [sp, #-16]! - insts.push(Inst::Store64 { - rd, - mem: AMode::PreIndexed( - writable_stack_reg(), - SImm9::maybe_from_i64(-clobber_offset_change).unwrap(), - ), - flags: MemFlags::trusted(), - }); - - if flags.unwind_info() { - clobber_offset -= clobber_offset_change as u32; - insts.push(Inst::Unwind { - inst: UnwindInst::SaveReg { - clobber_offset, - reg: rd.to_real_reg(), - }, - }); - } - } - - let mut iter = iter.rev(); - - while let Some([rt, rt2]) = iter.next() { - // .to_reg().to_reg(): Writable --> RealReg --> Reg - let rt = rt.to_reg().to_reg(); - let rt2 = rt2.to_reg().to_reg(); - - debug_assert!(rt.get_class() == RegClass::I64); - debug_assert!(rt2.get_class() == RegClass::I64); - - // stp rt, rt2, [sp, #-16]! - insts.push(Inst::StoreP64 { - rt, - rt2, - mem: PairAMode::PreIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(-clobber_offset_change, types::I64).unwrap(), - ), - flags: MemFlags::trusted(), - }); - - if flags.unwind_info() { - clobber_offset -= clobber_offset_change as u32; - insts.push(Inst::Unwind { - inst: UnwindInst::SaveReg { - clobber_offset, - reg: rt.to_real_reg(), - }, - }); - insts.push(Inst::Unwind { - inst: UnwindInst::SaveReg { - clobber_offset: clobber_offset + (clobber_offset_change / 2) as u32, - reg: rt2.to_real_reg(), - }, - }); - } - } - - let store_vec_reg = |rd| { - if call_conv.extends_baldrdash() { - Inst::FpuStore128 { - rd, - mem: AMode::PreIndexed( - writable_stack_reg(), - SImm9::maybe_from_i64(-clobber_offset_change).unwrap(), - ), - flags: MemFlags::trusted(), - } - } else { - Inst::FpuStore64 { - rd, - mem: AMode::PreIndexed( - writable_stack_reg(), - SImm9::maybe_from_i64(-clobber_offset_change).unwrap(), - ), - flags: MemFlags::trusted(), - } - } - }; - let iter = clobbered_vec.chunks_exact(2); - - if let [rd] = iter.remainder() { - let rd = rd.to_reg().to_reg(); - - debug_assert_eq!(rd.get_class(), RegClass::V128); - insts.push(store_vec_reg(rd)); - - if flags.unwind_info() { - clobber_offset -= clobber_offset_change as u32; - insts.push(Inst::Unwind { - inst: UnwindInst::SaveReg { - clobber_offset, - reg: rd.to_real_reg(), - }, - }); - } - } - - let store_vec_reg_pair = |rt, rt2| { - if call_conv.extends_baldrdash() { - let clobber_offset_change = 32; - - ( - Inst::FpuStoreP128 { - rt, - rt2, - mem: PairAMode::PreIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(-clobber_offset_change, I8X16).unwrap(), - ), - flags: MemFlags::trusted(), - }, - clobber_offset_change as u32, - ) - } else { - let clobber_offset_change = 16; - - ( - Inst::FpuStoreP64 { - rt, - rt2, - mem: PairAMode::PreIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(-clobber_offset_change, F64).unwrap(), - ), - flags: MemFlags::trusted(), - }, - clobber_offset_change as u32, - ) - } - }; - let mut iter = iter.rev(); - - while let Some([rt, rt2]) = iter.next() { - let rt = rt.to_reg().to_reg(); - let rt2 = rt2.to_reg().to_reg(); - - debug_assert_eq!(rt.get_class(), RegClass::V128); - debug_assert_eq!(rt2.get_class(), RegClass::V128); - - let (inst, clobber_offset_change) = store_vec_reg_pair(rt, rt2); - - insts.push(inst); - - if flags.unwind_info() { - clobber_offset -= clobber_offset_change; - insts.push(Inst::Unwind { - inst: UnwindInst::SaveReg { - clobber_offset, - reg: rt.to_real_reg(), - }, - }); - insts.push(Inst::Unwind { - inst: UnwindInst::SaveReg { - clobber_offset: clobber_offset + clobber_offset_change / 2, - reg: rt2.to_real_reg(), - }, - }); - } - } - - // Allocate the fixed frame below the clobbers if necessary. - if fixed_frame_storage_size > 0 { - insts.extend(Self::gen_sp_reg_adjust(-(fixed_frame_storage_size as i32))); - } - - (total_save_bytes as u64, insts) - } - - fn gen_clobber_restore( - call_conv: isa::CallConv, - flags: &settings::Flags, - clobbers: &Set>, - fixed_frame_storage_size: u32, - _outgoing_args_size: u32, - ) -> SmallVec<[Inst; 16]> { - let mut insts = SmallVec::new(); - let (clobbered_int, clobbered_vec) = get_regs_restored_in_epilogue(call_conv, clobbers); - - // Free the fixed frame if necessary. - if fixed_frame_storage_size > 0 { - insts.extend(Self::gen_sp_reg_adjust(fixed_frame_storage_size as i32)); - } - - let load_vec_reg = |rd| { - if call_conv.extends_baldrdash() { - Inst::FpuLoad128 { - rd, - mem: AMode::PostIndexed( - writable_stack_reg(), - SImm9::maybe_from_i64(16).unwrap(), - ), - flags: MemFlags::trusted(), - } - } else { - Inst::FpuLoad64 { - rd, - mem: AMode::PostIndexed( - writable_stack_reg(), - SImm9::maybe_from_i64(16).unwrap(), - ), - flags: MemFlags::trusted(), - } - } - }; - let load_vec_reg_pair = |rt, rt2| { - if call_conv.extends_baldrdash() { - Inst::FpuLoadP128 { - rt, - rt2, - mem: PairAMode::PostIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(32, I8X16).unwrap(), - ), - flags: MemFlags::trusted(), - } - } else { - Inst::FpuLoadP64 { - rt, - rt2, - mem: PairAMode::PostIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(16, F64).unwrap(), - ), - flags: MemFlags::trusted(), - } - } - }; - - let mut iter = clobbered_vec.chunks_exact(2); - - while let Some([rt, rt2]) = iter.next() { - let rt = rt.map(|r| r.to_reg()); - let rt2 = rt2.map(|r| r.to_reg()); - - debug_assert_eq!(rt.to_reg().get_class(), RegClass::V128); - debug_assert_eq!(rt2.to_reg().get_class(), RegClass::V128); - insts.push(load_vec_reg_pair(rt, rt2)); - } - - debug_assert!(iter.remainder().len() <= 1); - - if let [rd] = iter.remainder() { - let rd = rd.map(|r| r.to_reg()); - - debug_assert_eq!(rd.to_reg().get_class(), RegClass::V128); - insts.push(load_vec_reg(rd)); - } - - let mut iter = clobbered_int.chunks_exact(2); - - while let Some([rt, rt2]) = iter.next() { - let rt = rt.map(|r| r.to_reg()); - let rt2 = rt2.map(|r| r.to_reg()); - - debug_assert_eq!(rt.to_reg().get_class(), RegClass::I64); - debug_assert_eq!(rt2.to_reg().get_class(), RegClass::I64); - // ldp rt, rt2, [sp], #16 - insts.push(Inst::LoadP64 { - rt, - rt2, - mem: PairAMode::PostIndexed( - writable_stack_reg(), - SImm7Scaled::maybe_from_i64(16, I64).unwrap(), - ), - flags: MemFlags::trusted(), - }); - } - - debug_assert!(iter.remainder().len() <= 1); - - if let [rd] = iter.remainder() { - let rd = rd.map(|r| r.to_reg()); - - debug_assert_eq!(rd.to_reg().get_class(), RegClass::I64); - // ldr rd, [sp], #16 - insts.push(Inst::ULoad64 { - rd, - mem: AMode::PostIndexed(writable_stack_reg(), SImm9::maybe_from_i64(16).unwrap()), - flags: MemFlags::trusted(), - }); - } - - // If this is Baldrdash-2020, restore the callee (i.e., our) TLS - // register. We may have allocated it for something else and clobbered - // it, but the ABI expects us to leave the TLS register unchanged. - if call_conv == isa::CallConv::Baldrdash2020 { - let off = BALDRDASH_CALLEE_TLS_OFFSET + Self::fp_to_arg_offset(call_conv, flags); - insts.push(Inst::gen_load( - writable_xreg(BALDRDASH_TLS_REG), - AMode::UnsignedOffset(fp_reg(), UImm12Scaled::maybe_from_i64(off, I64).unwrap()), - I64, - MemFlags::trusted(), - )); - } - - insts - } - - fn gen_call( - dest: &CallDest, - uses: Vec, - defs: Vec>, - opcode: ir::Opcode, - tmp: Writable, - callee_conv: isa::CallConv, - caller_conv: isa::CallConv, - ) -> SmallVec<[(InstIsSafepoint, Inst); 2]> { - let mut insts = SmallVec::new(); - match &dest { - &CallDest::ExtName(ref name, RelocDistance::Near) => insts.push(( - InstIsSafepoint::Yes, - Inst::Call { - info: Box::new(CallInfo { - dest: name.clone(), - uses, - defs, - opcode, - caller_callconv: caller_conv, - callee_callconv: callee_conv, - }), - }, - )), - &CallDest::ExtName(ref name, RelocDistance::Far) => { - insts.push(( - InstIsSafepoint::No, - Inst::LoadExtName { - rd: tmp, - name: Box::new(name.clone()), - offset: 0, - }, - )); - insts.push(( - InstIsSafepoint::Yes, - Inst::CallInd { - info: Box::new(CallIndInfo { - rn: tmp.to_reg(), - uses, - defs, - opcode, - caller_callconv: caller_conv, - callee_callconv: callee_conv, - }), - }, - )); - } - &CallDest::Reg(reg) => insts.push(( - InstIsSafepoint::Yes, - Inst::CallInd { - info: Box::new(CallIndInfo { - rn: *reg, - uses, - defs, - opcode, - caller_callconv: caller_conv, - callee_callconv: callee_conv, - }), - }, - )), - } - - insts - } - - fn gen_memcpy( - call_conv: isa::CallConv, - dst: Reg, - src: Reg, - size: usize, - ) -> SmallVec<[Self::I; 8]> { - // Baldrdash should not use struct args. - assert!(!call_conv.extends_baldrdash()); - let mut insts = SmallVec::new(); - let arg0 = writable_xreg(0); - let arg1 = writable_xreg(1); - let arg2 = writable_xreg(2); - insts.push(Inst::gen_move(arg0, dst, I64)); - insts.push(Inst::gen_move(arg1, src, I64)); - insts.extend(Inst::load_constant(arg2, size as u64).into_iter()); - insts.push(Inst::Call { - info: Box::new(CallInfo { - dest: ExternalName::LibCall(LibCall::Memcpy), - uses: vec![arg0.to_reg(), arg1.to_reg(), arg2.to_reg()], - defs: Self::get_regs_clobbered_by_call(call_conv), - opcode: Opcode::Call, - caller_callconv: call_conv, - callee_callconv: call_conv, - }), - }); - insts - } - - fn get_number_of_spillslots_for_value(rc: RegClass) -> u32 { - // We allocate in terms of 8-byte slots. - match rc { - RegClass::I64 => 1, - RegClass::V128 => 2, - _ => panic!("Unexpected register class!"), - } - } - - /// Get the current virtual-SP offset from an instruction-emission state. - fn get_virtual_sp_offset_from_state(s: &EmitState) -> i64 { - s.virtual_sp_offset - } - - /// Get the nominal-SP-to-FP offset from an instruction-emission state. - fn get_nominal_sp_to_fp(s: &EmitState) -> i64 { - s.nominal_sp_to_fp - } - - fn get_regs_clobbered_by_call(call_conv_of_callee: isa::CallConv) -> Vec> { - let mut caller_saved = Vec::new(); - for i in 0..29 { - let x = writable_xreg(i); - if is_reg_clobbered_by_call(call_conv_of_callee, x.to_reg().to_real_reg()) { - caller_saved.push(x); - } - } - for i in 0..32 { - let v = writable_vreg(i); - if is_reg_clobbered_by_call(call_conv_of_callee, v.to_reg().to_real_reg()) { - caller_saved.push(v); - } - } - caller_saved - } - - fn get_ext_mode( - call_conv: isa::CallConv, - specified: ir::ArgumentExtension, - ) -> ir::ArgumentExtension { - if call_conv.extends_baldrdash() { - // Baldrdash (SpiderMonkey) always extends args and return values to the full register. - specified - } else { - // No other supported ABI on AArch64 does so. - ir::ArgumentExtension::None - } - } - - fn get_clobbered_callee_saves( - call_conv: isa::CallConv, - regs: &Set>, - ) -> Vec> { - let mut regs: Vec> = regs - .iter() - .cloned() - .filter(|r| is_reg_saved_in_prologue(call_conv, r.to_reg())) - .collect(); - - // Sort registers for deterministic code output. We can do an unstable - // sort because the registers will be unique (there are no dups). - regs.sort_unstable_by_key(|r| r.to_reg().get_index()); - regs - } - - fn is_frame_setup_needed( - is_leaf: bool, - stack_args_size: u32, - num_clobbered_callee_saves: usize, - fixed_frame_storage_size: u32, - ) -> bool { - !is_leaf - // The function arguments that are passed on the stack are addressed - // relative to the Frame Pointer. - || stack_args_size > 0 - || num_clobbered_callee_saves > 0 - || fixed_frame_storage_size > 0 - } -} - -/// Is this type supposed to be seen on this machine? E.g. references of the -/// wrong width are invalid. -fn legal_type_for_machine(ty: Type) -> bool { - match ty { - R32 => false, - _ => true, - } -} - -/// Is the given register saved in the prologue if clobbered, i.e., is it a -/// callee-save? -fn is_reg_saved_in_prologue(call_conv: isa::CallConv, r: RealReg) -> bool { - if call_conv.extends_baldrdash() { - match r.get_class() { - RegClass::I64 => { - let enc = r.get_hw_encoding(); - return BALDRDASH_JIT_CALLEE_SAVED_GPR[enc]; - } - RegClass::V128 => { - let enc = r.get_hw_encoding(); - return BALDRDASH_JIT_CALLEE_SAVED_FPU[enc]; - } - _ => unimplemented!("baldrdash callee saved on non-i64 reg classes"), - }; - } - - match r.get_class() { - RegClass::I64 => { - // x19 - x28 inclusive are callee-saves. - r.get_hw_encoding() >= 19 && r.get_hw_encoding() <= 28 - } - RegClass::V128 => { - // v8 - v15 inclusive are callee-saves. - r.get_hw_encoding() >= 8 && r.get_hw_encoding() <= 15 - } - _ => panic!("Unexpected RegClass"), - } -} - -/// Return the set of all integer and vector registers that must be saved in the -/// prologue and restored in the epilogue, given the set of all registers -/// written by the function's body. -fn get_regs_restored_in_epilogue( - call_conv: isa::CallConv, - regs: &Set>, -) -> (Vec>, Vec>) { - let mut int_saves = vec![]; - let mut vec_saves = vec![]; - for ® in regs.iter() { - if is_reg_saved_in_prologue(call_conv, reg.to_reg()) { - match reg.to_reg().get_class() { - RegClass::I64 => int_saves.push(reg), - RegClass::V128 => vec_saves.push(reg), - _ => panic!("Unexpected RegClass"), - } - } - } - // Sort registers for deterministic code output. We can do an unstable sort because the - // registers will be unique (there are no dups). - int_saves.sort_unstable_by_key(|r| r.to_reg().get_index()); - vec_saves.sort_unstable_by_key(|r| r.to_reg().get_index()); - (int_saves, vec_saves) -} - -fn is_reg_clobbered_by_call(call_conv_of_callee: isa::CallConv, r: RealReg) -> bool { - if call_conv_of_callee.extends_baldrdash() { - match r.get_class() { - RegClass::I64 => { - let enc = r.get_hw_encoding(); - if !BALDRDASH_JIT_CALLEE_SAVED_GPR[enc] { - return true; - } - // Otherwise, fall through to preserve native's ABI caller-saved. - } - RegClass::V128 => { - let enc = r.get_hw_encoding(); - if !BALDRDASH_JIT_CALLEE_SAVED_FPU[enc] { - return true; - } - // Otherwise, fall through to preserve native's ABI caller-saved. - } - _ => unimplemented!("baldrdash callee saved on non-i64 reg classes"), - }; - } - - match r.get_class() { - RegClass::I64 => { - // x0 - x17 inclusive are caller-saves. - r.get_hw_encoding() <= 17 - } - RegClass::V128 => { - // v0 - v7 inclusive and v16 - v31 inclusive are caller-saves. The - // upper 64 bits of v8 - v15 inclusive are also caller-saves. - // However, because we cannot currently represent partial registers - // to regalloc.rs, we indicate here that every vector register is - // caller-save. Because this function is used at *callsites*, - // approximating in this direction (save more than necessary) is - // conservative and thus safe. - // - // Note that we set the 'not included in clobber set' flag in the - // regalloc.rs API when a call instruction's callee has the same ABI - // as the caller (the current function body); this is safe (anything - // clobbered by callee can be clobbered by caller as well) and - // avoids unnecessary saves of v8-v15 in the prologue even though we - // include them as defs here. - true - } - _ => panic!("Unexpected RegClass"), - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst.isle b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst.isle deleted file mode 100644 index 91fe89825..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst.isle +++ /dev/null @@ -1,1999 +0,0 @@ -;; Instruction formats. -(type MInst - (enum - ;; A no-op of zero size. - (Nop0) - - ;; A no-op that is one instruction large. - (Nop4) - - ;; An ALU operation with two register sources and a register destination. - (AluRRR - (alu_op ALUOp) - (size OperandSize) - (rd WritableReg) - (rn Reg) - (rm Reg)) - - ;; An ALU operation with three register sources and a register destination. - (AluRRRR - (alu_op ALUOp3) - (rd WritableReg) - (rn Reg) - (rm Reg) - (ra Reg)) - - ;; An ALU operation with a register source and an immediate-12 source, and a register - ;; destination. - (AluRRImm12 - (alu_op ALUOp) - (size OperandSize) - (rd WritableReg) - (rn Reg) - (imm12 Imm12)) - - ;; An ALU operation with a register source and an immediate-logic source, and a register destination. - (AluRRImmLogic - (alu_op ALUOp) - (size OperandSize) - (rd WritableReg) - (rn Reg) - (imml ImmLogic)) - - ;; An ALU operation with a register source and an immediate-shiftamt source, and a register destination. - (AluRRImmShift - (alu_op ALUOp) - (size OperandSize) - (rd WritableReg) - (rn Reg) - (immshift ImmShift)) - - ;; An ALU operation with two register sources, one of which can be shifted, and a register - ;; destination. - (AluRRRShift - (alu_op ALUOp) - (size OperandSize) - (rd WritableReg) - (rn Reg) - (rm Reg) - (shiftop ShiftOpAndAmt)) - - ;; An ALU operation with two register sources, one of which can be {zero,sign}-extended and - ;; shifted, and a register destination. - (AluRRRExtend - (alu_op ALUOp) - (size OperandSize) - (rd WritableReg) - (rn Reg) - (rm Reg) - (extendop ExtendOp)) - - ;; A bit op instruction with a single register source. - (BitRR - (op BitOp) - (size OperandSize) - (rd WritableReg) - (rn Reg)) - - ;; An unsigned (zero-extending) 8-bit load. - (ULoad8 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; A signed (sign-extending) 8-bit load. - (SLoad8 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; An unsigned (zero-extending) 16-bit load. - (ULoad16 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; A signed (sign-extending) 16-bit load. - (SLoad16 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; An unsigned (zero-extending) 32-bit load. - (ULoad32 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; A signed (sign-extending) 32-bit load. - (SLoad32 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; A 64-bit load. - (ULoad64 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; An 8-bit store. - (Store8 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; A 16-bit store. - (Store16 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; A 32-bit store. - (Store32 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; A 64-bit store. - (Store64 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; A store of a pair of registers. - (StoreP64 - (rt Reg) - (rt2 Reg) - (mem PairAMode) - (flags MemFlags)) - - ;; A load of a pair of registers. - (LoadP64 - (rt WritableReg) - (rt2 WritableReg) - (mem PairAMode) - (flags MemFlags)) - - ;; A MOV instruction. These are encoded as ORR's (AluRRR form) but we - ;; keep them separate at the `Inst` level for better pretty-printing - ;; and faster `is_move()` logic. - (Mov64 - (rd WritableReg) - (rm Reg)) - - ;; A 32-bit MOV. Zeroes the top 32 bits of the destination. This is - ;; effectively an alias for an unsigned 32-to-64-bit extension. - (Mov32 - (rd WritableReg) - (rm Reg)) - - ;; A MOVZ with a 16-bit immediate. - (MovZ - (rd WritableReg) - (imm MoveWideConst) - (size OperandSize)) - - ;; A MOVN with a 16-bit immediate. - (MovN - (rd WritableReg) - (imm MoveWideConst) - (size OperandSize)) - - ;; A MOVK with a 16-bit immediate. - (MovK - (rd WritableReg) - (imm MoveWideConst) - (size OperandSize)) - - ;; A sign- or zero-extend operation. - (Extend - (rd WritableReg) - (rn Reg) - (signed bool) - (from_bits u8) - (to_bits u8)) - - ;; A conditional-select operation. - (CSel - (rd WritableReg) - (cond Cond) - (rn Reg) - (rm Reg)) - - ;; A conditional-set operation. - (CSet - (rd WritableReg) - (cond Cond)) - - ;; A conditional-set-mask operation. - (CSetm - (rd WritableReg) - (cond Cond)) - - ;; A conditional comparison with an immediate. - (CCmpImm - (size OperandSize) - (rn Reg) - (imm UImm5) - (nzcv NZCV) - (cond Cond)) - - ;; A synthetic insn, which is a load-linked store-conditional loop, that has the overall - ;; effect of atomically modifying a memory location in a particular way. Because we have - ;; no way to explain to the regalloc about earlyclobber registers, this instruction has - ;; completely fixed operand registers, and we rely on the RA's coalescing to remove copies - ;; in the surrounding code to the extent it can. The sequence is both preceded and - ;; followed by a fence which is at least as comprehensive as that of the `Fence` - ;; instruction below. This instruction is sequentially consistent. The operand - ;; conventions are: - ;; - ;; x25 (rd) address - ;; x26 (rd) second operand for `op` - ;; x27 (wr) old value - ;; x24 (wr) scratch reg; value afterwards has no meaning - ;; x28 (wr) scratch reg; value afterwards has no meaning - (AtomicRMWLoop - (ty Type) ;; I8, I16, I32 or I64 - (op AtomicRmwOp)) - - ;; An atomic read-modify-write operation. These instructions require the - ;; Large System Extension (LSE) ISA support (FEAT_LSE). The instructions have - ;; acquire-release semantics. - (AtomicRMW - (op AtomicRMWOp) - (rs Reg) - (rt WritableReg) - (rn Reg) - (ty Type)) - - ;; An atomic compare-and-swap operation. This instruction is sequentially consistent. - (AtomicCAS - (rs WritableReg) - (rt Reg) - (rn Reg) - (ty Type)) - - ;; Similar to AtomicRMWLoop, a compare-and-swap operation implemented using a load-linked - ;; store-conditional loop. - ;; This instruction is sequentially consistent. - ;; Note that the operand conventions, although very similar to AtomicRMWLoop, are different: - ;; - ;; x25 (rd) address - ;; x26 (rd) expected value - ;; x28 (rd) replacement value - ;; x27 (wr) old value - ;; x24 (wr) scratch reg; value afterwards has no meaning - (AtomicCASLoop - (ty Type) ;; I8, I16, I32 or I64 - ) - - ;; Read `access_ty` bits from address `rt`, either 8, 16, 32 or 64-bits, and put - ;; it in `rn`, optionally zero-extending to fill a word or double word result. - ;; This instruction is sequentially consistent. - (LoadAcquire - (access_ty Type) ;; I8, I16, I32 or I64 - (rt WritableReg) - (rn Reg)) - - ;; Write the lowest `ty` bits of `rt` to address `rn`. - ;; This instruction is sequentially consistent. - (StoreRelease - (access_ty Type) ;; I8, I16, I32 or I64 - (rt Reg) - (rn Reg)) - - ;; A memory fence. This must provide ordering to ensure that, at a minimum, neither loads - ;; nor stores may move forwards or backwards across the fence. Currently emitted as "dmb - ;; ish". This instruction is sequentially consistent. - (Fence) - - ;; FPU move. Note that this is distinct from a vector-register - ;; move; moving just 64 bits seems to be significantly faster. - (FpuMove64 - (rd WritableReg) - (rn Reg)) - - ;; Vector register move. - (FpuMove128 - (rd WritableReg) - (rn Reg)) - - ;; Move to scalar from a vector element. - (FpuMoveFromVec - (rd WritableReg) - (rn Reg) - (idx u8) - (size VectorSize)) - - ;; Zero-extend a SIMD & FP scalar to the full width of a vector register. - ;; 16-bit scalars require half-precision floating-point support (FEAT_FP16). - (FpuExtend - (rd WritableReg) - (rn Reg) - (size ScalarSize)) - - ;; 1-op FPU instruction. - (FpuRR - (fpu_op FPUOp1) - (rd WritableReg) - (rn Reg)) - - ;; 2-op FPU instruction. - (FpuRRR - (fpu_op FPUOp2) - (rd WritableReg) - (rn Reg) - (rm Reg)) - - (FpuRRI - (fpu_op FPUOpRI) - (rd WritableReg) - (rn Reg)) - - ;; 3-op FPU instruction. - (FpuRRRR - (fpu_op FPUOp3) - (rd WritableReg) - (rn Reg) - (rm Reg) - (ra Reg)) - - ;; FPU comparison, single-precision (32 bit). - (FpuCmp32 - (rn Reg) - (rm Reg)) - - ;; FPU comparison, double-precision (64 bit). - (FpuCmp64 - (rn Reg) - (rm Reg)) - - ;; Floating-point load, single-precision (32 bit). - (FpuLoad32 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; Floating-point store, single-precision (32 bit). - (FpuStore32 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; Floating-point load, double-precision (64 bit). - (FpuLoad64 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; Floating-point store, double-precision (64 bit). - (FpuStore64 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; Floating-point/vector load, 128 bit. - (FpuLoad128 - (rd WritableReg) - (mem AMode) - (flags MemFlags)) - - ;; Floating-point/vector store, 128 bit. - (FpuStore128 - (rd Reg) - (mem AMode) - (flags MemFlags)) - - ;; A load of a pair of floating-point registers, double precision (64-bit). - (FpuLoadP64 - (rt WritableReg) - (rt2 WritableReg) - (mem PairAMode) - (flags MemFlags)) - - ;; A store of a pair of floating-point registers, double precision (64-bit). - (FpuStoreP64 - (rt Reg) - (rt2 Reg) - (mem PairAMode) - (flags MemFlags)) - - ;; A load of a pair of floating-point registers, 128-bit. - (FpuLoadP128 - (rt WritableReg) - (rt2 WritableReg) - (mem PairAMode) - (flags MemFlags)) - - ;; A store of a pair of floating-point registers, 128-bit. - (FpuStoreP128 - (rt Reg) - (rt2 Reg) - (mem PairAMode) - (flags MemFlags)) - - (LoadFpuConst64 - (rd WritableReg) - (const_data u64)) - - (LoadFpuConst128 - (rd WritableReg) - (const_data u128)) - - ;; Conversion: FP -> integer. - (FpuToInt - (op FpuToIntOp) - (rd WritableReg) - (rn Reg)) - - ;; Conversion: integer -> FP. - (IntToFpu - (op IntToFpuOp) - (rd WritableReg) - (rn Reg)) - - ;; FP conditional select, 32 bit. - (FpuCSel32 - (rd WritableReg) - (rn Reg) - (rm Reg) - (cond Cond)) - - ;; FP conditional select, 64 bit. - (FpuCSel64 - (rd WritableReg) - (rn Reg) - (rm Reg) - (cond Cond)) - - ;; Round to integer. - (FpuRound - (op FpuRoundMode) - (rd WritableReg) - (rn Reg)) - - ;; Move from a GPR to a vector register. The scalar value is parked in the lowest lane - ;; of the destination, and all other lanes are zeroed out. Currently only 32- and 64-bit - ;; transactions are supported. - (MovToFpu - (rd WritableReg) - (rn Reg) - (size ScalarSize)) - - ;; Loads a floating-point immediate. - (FpuMoveFPImm - (rd WritableReg) - (imm ASIMDFPModImm) - (size ScalarSize)) - - ;; Move to a vector element from a GPR. - (MovToVec - (rd WritableReg) - (rn Reg) - (idx u8) - (size VectorSize)) - - ;; Unsigned move from a vector element to a GPR. - (MovFromVec - (rd WritableReg) - (rn Reg) - (idx u8) - (size VectorSize)) - - ;; Signed move from a vector element to a GPR. - (MovFromVecSigned - (rd WritableReg) - (rn Reg) - (idx u8) - (size VectorSize) - (scalar_size OperandSize)) - - ;; Duplicate general-purpose register to vector. - (VecDup - (rd WritableReg) - (rn Reg) - (size VectorSize)) - - ;; Duplicate scalar to vector. - (VecDupFromFpu - (rd WritableReg) - (rn Reg) - (size VectorSize)) - - ;; Duplicate FP immediate to vector. - (VecDupFPImm - (rd WritableReg) - (imm ASIMDFPModImm) - (size VectorSize)) - - ;; Duplicate immediate to vector. - (VecDupImm - (rd WritableReg) - (imm ASIMDMovModImm) - (invert bool) - (size VectorSize)) - - ;; Vector extend. - (VecExtend - (t VecExtendOp) - (rd WritableReg) - (rn Reg) - (high_half bool)) - - ;; Move vector element to another vector element. - (VecMovElement - (rd WritableReg) - (rn Reg) - (dest_idx u8) - (src_idx u8) - (size VectorSize)) - - ;; Vector widening operation. - (VecRRLong - (op VecRRLongOp) - (rd WritableReg) - (rn Reg) - (high_half bool)) - - ;; Vector narrowing operation. - (VecRRNarrow - (op VecRRNarrowOp) - (rd WritableReg) - (rn Reg) - (high_half bool)) - - ;; 1-operand vector instruction that operates on a pair of elements. - (VecRRPair - (op VecPairOp) - (rd WritableReg) - (rn Reg)) - - ;; 2-operand vector instruction that produces a result with twice the - ;; lane width and half the number of lanes. - (VecRRRLong - (alu_op VecRRRLongOp) - (rd WritableReg) - (rn Reg) - (rm Reg) - (high_half bool)) - - ;; 1-operand vector instruction that extends elements of the input - ;; register and operates on a pair of elements. The output lane width - ;; is double that of the input. - (VecRRPairLong - (op VecRRPairLongOp) - (rd WritableReg) - (rn Reg)) - - ;; A vector ALU op. - (VecRRR - (alu_op VecALUOp) - (rd WritableReg) - (rn Reg) - (rm Reg) - (size VectorSize)) - - ;; Vector two register miscellaneous instruction. - (VecMisc - (op VecMisc2) - (rd WritableReg) - (rn Reg) - (size VectorSize)) - - ;; Vector instruction across lanes. - (VecLanes - (op VecLanesOp) - (rd WritableReg) - (rn Reg) - (size VectorSize)) - - ;; Vector shift by immediate Shift Left (immediate), Unsigned Shift Right (immediate) - ;; Signed Shift Right (immediate). These are somewhat unusual in that, for right shifts, - ;; the allowed range of `imm` values is 1 to lane-size-in-bits, inclusive. A zero - ;; right-shift cannot be encoded. Left shifts are "normal", though, having valid `imm` - ;; values from 0 to lane-size-in-bits - 1 inclusive. - (VecShiftImm - (op VecShiftImmOp) - (rd WritableReg) - (rn Reg) - (size VectorSize) - (imm u8)) - - ;; Vector extract - create a new vector, being the concatenation of the lowest `imm4` bytes - ;; of `rm` followed by the uppermost `16 - imm4` bytes of `rn`. - (VecExtract - (rd WritableReg) - (rn Reg) - (rm Reg) - (imm4 u8)) - - ;; Table vector lookup - single register table. The table consists of 8-bit elements and is - ;; stored in `rn`, while `rm` contains 8-bit element indices. `is_extension` specifies whether - ;; to emit a TBX or a TBL instruction, i.e. whether to leave the elements in the destination - ;; vector that correspond to out-of-range indices (greater than 15) unmodified or to set them - ;; to 0. - (VecTbl - (rd WritableReg) - (rn Reg) - (rm Reg) - (is_extension bool)) - - ;; Table vector lookup - two register table. The table consists of 8-bit elements and is - ;; stored in `rn` and `rn2`, while `rm` contains 8-bit element indices. `is_extension` - ;; specifies whether to emit a TBX or a TBL instruction, i.e. whether to leave the elements in - ;; the destination vector that correspond to out-of-range indices (greater than 31) unmodified - ;; or to set them to 0. The table registers `rn` and `rn2` must have consecutive numbers - ;; modulo 32, that is v31 and v0 (in that order) are consecutive registers. - (VecTbl2 - (rd WritableReg) - (rn Reg) - (rn2 Reg) - (rm Reg) - (is_extension bool)) - - ;; Load an element and replicate to all lanes of a vector. - (VecLoadReplicate - (rd WritableReg) - (rn Reg) - (size VectorSize)) - - ;; Vector conditional select, 128 bit. A synthetic instruction, which generates a 4-insn - ;; control-flow diamond. - (VecCSel - (rd WritableReg) - (rn Reg) - (rm Reg) - (cond Cond)) - - ;; Move to the NZCV flags (actually a `MSR NZCV, Xn` insn). - (MovToNZCV - (rn Reg)) - - ;; Move from the NZCV flags (actually a `MRS Xn, NZCV` insn). - (MovFromNZCV - (rd WritableReg)) - - ;; A machine call instruction. N.B.: this allows only a +/- 128MB offset (it uses a relocation - ;; of type `Reloc::Arm64Call`); if the destination distance is not `RelocDistance::Near`, the - ;; code should use a `LoadExtName` / `CallInd` sequence instead, allowing an arbitrary 64-bit - ;; target. - (Call - (info BoxCallInfo)) - - ;; A machine indirect-call instruction. - (CallInd - (info BoxCallIndInfo)) - - ;; ---- branches (exactly one must appear at end of BB) ---- - - ;; A machine return instruction. - (Ret) - - ;; A placeholder instruction, generating no code, meaning that a function epilogue must be - ;; inserted there. - (EpiloguePlaceholder) - - ;; An unconditional branch. - (Jump - (dest BranchTarget)) - - ;; A conditional branch. Contains two targets; at emission time, both are emitted, but - ;; the MachBuffer knows to truncate the trailing branch if fallthrough. We optimize the - ;; choice of taken/not_taken (inverting the branch polarity as needed) based on the - ;; fallthrough at the time of lowering. - (CondBr - (taken BranchTarget) - (not_taken BranchTarget) - (kind CondBrKind)) - - ;; A conditional trap: execute a `udf` if the condition is true. This is - ;; one VCode instruction because it uses embedded control flow; it is - ;; logically a single-in, single-out region, but needs to appear as one - ;; unit to the register allocator. - ;; - ;; The `CondBrKind` gives the conditional-branch condition that will - ;; *execute* the embedded `Inst`. (In the emitted code, we use the inverse - ;; of this condition in a branch that skips the trap instruction.) - (TrapIf - (kind CondBrKind) - (trap_code TrapCode)) - - ;; An indirect branch through a register, augmented with set of all - ;; possible successors. - (IndirectBr - (rn Reg) - (targets VecMachLabel)) - - ;; A "break" instruction, used for e.g. traps and debug breakpoints. - (Brk) - - ;; An instruction guaranteed to always be undefined and to trigger an illegal instruction at - ;; runtime. - (Udf - (trap_code TrapCode)) - - ;; Compute the address (using a PC-relative offset) of a memory location, using the `ADR` - ;; instruction. Note that we take a simple offset, not a `MemLabel`, here, because `Adr` is - ;; only used for now in fixed lowering sequences with hardcoded offsets. In the future we may - ;; need full `MemLabel` support. - (Adr - (rd WritableReg) - ;; Offset in range -2^20 .. 2^20. - (off i32)) - - ;; Raw 32-bit word, used for inline constants and jump-table entries. - (Word4 - (data u32)) - - ;; Raw 64-bit word, used for inline constants. - (Word8 - (data u64)) - - ;; Jump-table sequence, as one compound instruction (see note in lower_inst.rs for rationale). - (JTSequence - (info BoxJTSequenceInfo) - (ridx Reg) - (rtmp1 WritableReg) - (rtmp2 WritableReg)) - - ;; Load an inline symbol reference. - (LoadExtName - (rd WritableReg) - (name BoxExternalName) - (offset i64)) - - ;; Load address referenced by `mem` into `rd`. - (LoadAddr - (rd WritableReg) - (mem AMode)) - - ;; Marker, no-op in generated code: SP "virtual offset" is adjusted. This - ;; controls how AMode::NominalSPOffset args are lowered. - (VirtualSPOffsetAdj - (offset i64)) - - ;; Meta-insn, no-op in generated code: emit constant/branch veneer island - ;; at this point (with a guard jump around it) if less than the needed - ;; space is available before the next branch deadline. See the `MachBuffer` - ;; implementation in `machinst/buffer.rs` for the overall algorithm. In - ;; brief, we retain a set of "pending/unresolved label references" from - ;; branches as we scan forward through instructions to emit machine code; - ;; if we notice we're about to go out of range on an unresolved reference, - ;; we stop, emit a bunch of "veneers" (branches in a form that has a longer - ;; range, e.g. a 26-bit-offset unconditional jump), and point the original - ;; label references to those. This is an "island" because it comes in the - ;; middle of the code. - ;; - ;; This meta-instruction is a necessary part of the logic that determines - ;; where to place islands. Ordinarily, we want to place them between basic - ;; blocks, so we compute the worst-case size of each block, and emit the - ;; island before starting a block if we would exceed a deadline before the - ;; end of the block. However, some sequences (such as an inline jumptable) - ;; are variable-length and not accounted for by this logic; so these - ;; lowered sequences include an `EmitIsland` to trigger island generation - ;; where necessary. - (EmitIsland - ;; The needed space before the next deadline. - (needed_space CodeOffset)) - - ;; A call to the `ElfTlsGetAddr` libcall. Returns address of TLS symbol in x0. - (ElfTlsGetAddr - (symbol ExternalName)) - - ;; A definition of a value label. - (ValueLabelMarker - (reg Reg) - (label ValueLabel)) - - ;; An unwind pseudo-instruction. - (Unwind - (inst UnwindInst)) -)) - -;; An ALU operation. This can be paired with several instruction formats -;; below (see `Inst`) in any combination. -(type ALUOp - (enum - (Add) - (Sub) - (Orr) - (OrrNot) - (And) - (AndS) - (AndNot) - ;; XOR (AArch64 calls this "EOR") - (Eor) - ;; XNOR (AArch64 calls this "EOR-NOT") - (EorNot) - ;; Add, setting flags - (AddS) - ;; Sub, setting flags - (SubS) - ;; Signed multiply, high-word result - (SMulH) - ;; Unsigned multiply, high-word result - (UMulH) - (SDiv) - (UDiv) - (RotR) - (Lsr) - (Asr) - (Lsl) - ;; Add with carry - (Adc) - ;; Add with carry, settings flags - (AdcS) - ;; Subtract with carry - (Sbc) - ;; Subtract with carry, settings flags - (SbcS) -)) - -;; An ALU operation with three arguments. -(type ALUOp3 - (enum - ;; Multiply-add - (MAdd32) - ;; Multiply-add - (MAdd64) - ;; Multiply-sub - (MSub32) - ;; Multiply-sub - (MSub64) -)) - -(type UImm5 (primitive UImm5)) -(type Imm12 (primitive Imm12)) -(type ImmLogic (primitive ImmLogic)) -(type ImmShift (primitive ImmShift)) -(type ShiftOpAndAmt (primitive ShiftOpAndAmt)) -(type MoveWideConst (primitive MoveWideConst)) -(type NZCV (primitive NZCV)) -(type ASIMDFPModImm (primitive ASIMDFPModImm)) -(type ASIMDMovModImm (primitive ASIMDMovModImm)) - -(type BoxCallInfo (primitive BoxCallInfo)) -(type BoxCallIndInfo (primitive BoxCallIndInfo)) -(type CondBrKind (primitive CondBrKind)) -(type BranchTarget (primitive BranchTarget)) -(type BoxJTSequenceInfo (primitive BoxJTSequenceInfo)) -(type CodeOffset (primitive CodeOffset)) - -(type ExtendOp extern - (enum - (UXTB) - (UXTH) - (UXTW) - (UXTX) - (SXTB) - (SXTH) - (SXTW) - (SXTX) -)) - -;; An operation on the bits of a register. This can be paired with several instruction formats -;; below (see `Inst`) in any combination. -(type BitOp - (enum - ;; Bit reverse - (RBit) - (Clz) - (Cls) -)) - -(type AMode extern (enum)) -(type PairAMode extern (enum)) -(type FPUOpRI extern (enum)) - -(type OperandSize extern - (enum Size32 - Size64)) - -;; Helper for calculating the `OperandSize` corresponding to a type -(decl operand_size (Type) OperandSize) -(rule (operand_size (fits_in_32 _ty)) (OperandSize.Size32)) -(rule (operand_size (fits_in_64 _ty)) (OperandSize.Size64)) - -(type ScalarSize extern - (enum Size8 - Size16 - Size32 - Size64 - Size128)) - -(type Cond extern - (enum - (Eq) - (Ne) - (Hs) - (Lo) - (Mi) - (Pl) - (Vs) - (Vc) - (Hi) - (Ls) - (Ge) - (Lt) - (Gt) - (Le) - (Al) - (Nv) -)) - -(type VectorSize extern - (enum - (Size8x8) - (Size8x16) - (Size16x4) - (Size16x8) - (Size32x2) - (Size32x4) - (Size64x2) -)) - -;; Helper for calculating the `VectorSize` corresponding to a type -(decl vector_size (Type) VectorSize) -(rule (vector_size (multi_lane 8 16)) (VectorSize.Size8x16)) -(rule (vector_size (multi_lane 16 8)) (VectorSize.Size16x8)) -(rule (vector_size (multi_lane 32 4)) (VectorSize.Size32x4)) -(rule (vector_size (multi_lane 64 2)) (VectorSize.Size64x2)) - -;; A floating-point unit (FPU) operation with one arg. -(type FPUOp1 - (enum - (Abs32) - (Abs64) - (Neg32) - (Neg64) - (Sqrt32) - (Sqrt64) - (Cvt32To64) - (Cvt64To32) -)) - -;; A floating-point unit (FPU) operation with two args. -(type FPUOp2 - (enum - (Add32) - (Add64) - (Sub32) - (Sub64) - (Mul32) - (Mul64) - (Div32) - (Div64) - (Max32) - (Max64) - (Min32) - (Min64) - ;; Signed saturating add - (Sqadd64) - ;; Unsigned saturating add - (Uqadd64) - ;; Signed saturating subtract - (Sqsub64) - ;; Unsigned saturating subtract - (Uqsub64) -)) - -;; A floating-point unit (FPU) operation with three args. -(type FPUOp3 - (enum - (MAdd32) - (MAdd64) -)) - -;; A conversion from an FP to an integer value. -(type FpuToIntOp - (enum - (F32ToU32) - (F32ToI32) - (F32ToU64) - (F32ToI64) - (F64ToU32) - (F64ToI32) - (F64ToU64) - (F64ToI64) -)) - -;; A conversion from an integer to an FP value. -(type IntToFpuOp - (enum - (U32ToF32) - (I32ToF32) - (U32ToF64) - (I32ToF64) - (U64ToF32) - (I64ToF32) - (U64ToF64) - (I64ToF64) -)) - -;; Modes for FP rounding ops: round down (floor) or up (ceil), or toward zero (trunc), or to -;; nearest, and for 32- or 64-bit FP values. -(type FpuRoundMode - (enum - (Minus32) - (Minus64) - (Plus32) - (Plus64) - (Zero32) - (Zero64) - (Nearest32) - (Nearest64) -)) - -;; Type of vector element extensions. -(type VecExtendOp - (enum - ;; Signed extension of 8-bit elements - (Sxtl8) - ;; Signed extension of 16-bit elements - (Sxtl16) - ;; Signed extension of 32-bit elements - (Sxtl32) - ;; Unsigned extension of 8-bit elements - (Uxtl8) - ;; Unsigned extension of 16-bit elements - (Uxtl16) - ;; Unsigned extension of 32-bit elements - (Uxtl32) -)) - -;; A vector ALU operation. -(type VecALUOp - (enum - ;; Signed saturating add - (Sqadd) - ;; Unsigned saturating add - (Uqadd) - ;; Signed saturating subtract - (Sqsub) - ;; Unsigned saturating subtract - (Uqsub) - ;; Compare bitwise equal - (Cmeq) - ;; Compare signed greater than or equal - (Cmge) - ;; Compare signed greater than - (Cmgt) - ;; Compare unsigned higher - (Cmhs) - ;; Compare unsigned higher or same - (Cmhi) - ;; Floating-point compare equal - (Fcmeq) - ;; Floating-point compare greater than - (Fcmgt) - ;; Floating-point compare greater than or equal - (Fcmge) - ;; Bitwise and - (And) - ;; Bitwise bit clear - (Bic) - ;; Bitwise inclusive or - (Orr) - ;; Bitwise exclusive or - (Eor) - ;; Bitwise select - (Bsl) - ;; Unsigned maximum pairwise - (Umaxp) - ;; Add - (Add) - ;; Subtract - (Sub) - ;; Multiply - (Mul) - ;; Signed shift left - (Sshl) - ;; Unsigned shift left - (Ushl) - ;; Unsigned minimum - (Umin) - ;; Signed minimum - (Smin) - ;; Unsigned maximum - (Umax) - ;; Signed maximum - (Smax) - ;; Unsigned rounding halving add - (Urhadd) - ;; Floating-point add - (Fadd) - ;; Floating-point subtract - (Fsub) - ;; Floating-point divide - (Fdiv) - ;; Floating-point maximum - (Fmax) - ;; Floating-point minimum - (Fmin) - ;; Floating-point multiply - (Fmul) - ;; Add pairwise - (Addp) - ;; Zip vectors (primary) [meaning, high halves] - (Zip1) - ;; Signed saturating rounding doubling multiply returning high half - (Sqrdmulh) -)) - -;; A Vector miscellaneous operation with two registers. -(type VecMisc2 - (enum - ;; Bitwise NOT - (Not) - ;; Negate - (Neg) - ;; Absolute value - (Abs) - ;; Floating-point absolute value - (Fabs) - ;; Floating-point negate - (Fneg) - ;; Floating-point square root - (Fsqrt) - ;; Reverse elements in 64-bit doublewords - (Rev64) - ;; Floating-point convert to signed integer, rounding toward zero - (Fcvtzs) - ;; Floating-point convert to unsigned integer, rounding toward zero - (Fcvtzu) - ;; Signed integer convert to floating-point - (Scvtf) - ;; Unsigned integer convert to floating-point - (Ucvtf) - ;; Floating point round to integral, rounding towards nearest - (Frintn) - ;; Floating point round to integral, rounding towards zero - (Frintz) - ;; Floating point round to integral, rounding towards minus infinity - (Frintm) - ;; Floating point round to integral, rounding towards plus infinity - (Frintp) - ;; Population count per byte - (Cnt) - ;; Compare bitwise equal to 0 - (Cmeq0) -)) - -;; A vector widening operation with one argument. -(type VecRRLongOp - (enum - ;; Floating-point convert to higher precision long, 16-bit elements - (Fcvtl16) - ;; Floating-point convert to higher precision long, 32-bit elements - (Fcvtl32) - ;; Shift left long (by element size), 8-bit elements - (Shll8) - ;; Shift left long (by element size), 16-bit elements - (Shll16) - ;; Shift left long (by element size), 32-bit elements - (Shll32) -)) - -;; A vector narrowing operation with one argument. -(type VecRRNarrowOp - (enum - ;; Extract narrow, 16-bit elements - (Xtn16) - ;; Extract narrow, 32-bit elements - (Xtn32) - ;; Extract narrow, 64-bit elements - (Xtn64) - ;; Signed saturating extract narrow, 16-bit elements - (Sqxtn16) - ;; Signed saturating extract narrow, 32-bit elements - (Sqxtn32) - ;; Signed saturating extract narrow, 64-bit elements - (Sqxtn64) - ;; Signed saturating extract unsigned narrow, 16-bit elements - (Sqxtun16) - ;; Signed saturating extract unsigned narrow, 32-bit elements - (Sqxtun32) - ;; Signed saturating extract unsigned narrow, 64-bit elements - (Sqxtun64) - ;; Unsigned saturating extract narrow, 16-bit elements - (Uqxtn16) - ;; Unsigned saturating extract narrow, 32-bit elements - (Uqxtn32) - ;; Unsigned saturating extract narrow, 64-bit elements - (Uqxtn64) - ;; Floating-point convert to lower precision narrow, 32-bit elements - (Fcvtn32) - ;; Floating-point convert to lower precision narrow, 64-bit elements - (Fcvtn64) -)) - -(type VecRRRLongOp - (enum - ;; Signed multiply long. - (Smull8) - (Smull16) - (Smull32) - ;; Unsigned multiply long. - (Umull8) - (Umull16) - (Umull32) - ;; Unsigned multiply add long - (Umlal8) - (Umlal16) - (Umlal32) -)) - -;; A vector operation on a pair of elements with one register. -(type VecPairOp - (enum - ;; Add pair of elements - (Addp) -)) - -;; 1-operand vector instruction that extends elements of the input register -;; and operates on a pair of elements. -(type VecRRPairLongOp - (enum - ;; Sign extend and add pair of elements - (Saddlp8) - (Saddlp16) - ;; Unsigned extend and add pair of elements - (Uaddlp8) - (Uaddlp16) -)) - -;; An operation across the lanes of vectors. -(type VecLanesOp - (enum - ;; Integer addition across a vector - (Addv) - ;; Unsigned minimum across a vector - (Uminv) -)) - -;; A shift-by-immediate operation on each lane of a vector. -(type VecShiftImmOp - (enum - ;; Unsigned shift left - (Shl) - ;; Unsigned shift right - (Ushr) - ;; Signed shift right - (Sshr) -)) - -;; Atomic read-modify-write operations with acquire-release semantics -(type AtomicRMWOp - (enum - (Add) - (Clr) - (Eor) - (Set) - (Smax) - (Smin) - (Umax) - (Umin) -)) - -;; Extractor helpers for various immmediate constants ;;;;;;;;;;;;;;;;;;;;;;;;;; - -(decl move_wide_const_from_u64 (MoveWideConst) u64) -(extern extractor move_wide_const_from_u64 move_wide_const_from_u64) - -(decl move_wide_const_from_negated_u64 (MoveWideConst) u64) -(extern extractor move_wide_const_from_negated_u64 move_wide_const_from_negated_u64) - -(decl imm_logic_from_u64 (Type ImmLogic) u64) -(extern extractor imm_logic_from_u64 imm_logic_from_u64 (in out)) - -(decl imm_logic_from_imm64 (Type ImmLogic) Imm64) -(extern extractor imm_logic_from_imm64 imm_logic_from_imm64 (in out)) - -(decl imm_shift_from_imm64 (Type ImmShift) Imm64) -(extern extractor imm_shift_from_imm64 imm_shift_from_imm64 (in out)) - -(decl imm_shift_from_u8 (u8) ImmShift) -(extern constructor imm_shift_from_u8 imm_shift_from_u8) - -(decl imm12_from_u64 (Imm12) u64) -(extern extractor imm12_from_u64 imm12_from_u64) - -(decl u8_into_uimm5 (u8) UImm5) -(extern constructor u8_into_uimm5 u8_into_uimm5) - -(decl u8_into_imm12 (u8) Imm12) -(extern constructor u8_into_imm12 u8_into_imm12) - -(decl u64_into_imm_logic (Type u64) ImmLogic) -(extern constructor u64_into_imm_logic u64_into_imm_logic) - -(decl imm12_from_negated_u64 (Imm12) u64) -(extern extractor imm12_from_negated_u64 imm12_from_negated_u64) - -(decl lshl_from_imm64 (Type ShiftOpAndAmt) Imm64) -(extern extractor lshl_from_imm64 lshl_from_imm64 (in out)) - -(decl integral_ty (Type) Type) -(extern extractor integral_ty integral_ty) - -;; Helper to go directly from a `Value`, when it's an `iconst`, to an `Imm12`. -(decl imm12_from_value (Imm12) Value) -(extractor - (imm12_from_value n) - (def_inst (iconst (u64_from_imm64 (imm12_from_u64 n))))) - -;; Same as `imm12_from_value`, but tries negating the constant value. -(decl imm12_from_negated_value (Imm12) Value) -(extractor - (imm12_from_negated_value n) - (def_inst (iconst (u64_from_imm64 (imm12_from_negated_u64 n))))) - -;; Helper type to represent a value and an extend operation fused together. -(type ExtendedValue extern (enum)) -(decl extended_value_from_value (ExtendedValue) Value) -(extern extractor extended_value_from_value extended_value_from_value) - -;; Constructors used to poke at the fields of an `ExtendedValue`. -(decl put_extended_in_reg (ExtendedValue) Reg) -(extern constructor put_extended_in_reg put_extended_in_reg) -(decl get_extended_op (ExtendedValue) ExtendOp) -(extern constructor get_extended_op get_extended_op) - -(decl nzcv (bool bool bool bool) NZCV) -(extern constructor nzcv nzcv) - -(decl cond_br_zero (Reg) CondBrKind) -(extern constructor cond_br_zero cond_br_zero) - -(decl cond_br_cond (Cond) CondBrKind) -(extern constructor cond_br_cond cond_br_cond) - -;; Instruction creation helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Helper for creating the zero register. -(decl zero_reg () Reg) -(extern constructor zero_reg zero_reg) - -(decl writable_zero_reg () WritableReg) -(extern constructor writable_zero_reg writable_zero_reg) - -;; Helper for emitting `MInst.MovZ` instructions. -(decl movz (MoveWideConst OperandSize) Reg) -(rule (movz imm size) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.MovZ dst imm size)))) - dst)) - -;; Helper for emitting `MInst.MovN` instructions. -(decl movn (MoveWideConst OperandSize) Reg) -(rule (movn imm size) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.MovN dst imm size)))) - dst)) - -;; Helper for emitting `MInst.AluRRImmLogic` instructions. -(decl alu_rr_imm_logic (ALUOp Type Reg ImmLogic) Reg) -(rule (alu_rr_imm_logic op ty src imm) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRImmLogic op (operand_size ty) dst src imm)))) - dst)) - -;; Helper for emitting `MInst.AluRRImmShift` instructions. -(decl alu_rr_imm_shift (ALUOp Type Reg ImmShift) Reg) -(rule (alu_rr_imm_shift op ty src imm) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRImmShift op (operand_size ty) dst src imm)))) - dst)) - -;; Helper for emitting `MInst.AluRRR` instructions. -(decl alu_rrr (ALUOp Type Reg Reg) Reg) -(rule (alu_rrr op ty src1 src2) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRR op (operand_size ty) dst src1 src2)))) - dst)) - -;; Helper for emitting `MInst.VecRRR` instructions. -(decl vec_rrr (VecALUOp Reg Reg VectorSize) Reg) -(rule (vec_rrr op src1 src2 size) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecRRR op dst src1 src2 size)))) - dst)) - -;; Helper for emitting `MInst.VecLanes` instructions. -(decl vec_lanes (VecLanesOp Reg VectorSize) Reg) -(rule (vec_lanes op src size) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecLanes op dst src size)))) - dst)) - -;; Helper for emitting `MInst.VecDup` instructions. -(decl vec_dup (Reg VectorSize) Reg) -(rule (vec_dup src size) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecDup dst src size)))) - dst)) - -;; Helper for emitting `MInst.AluRRImm12` instructions. -(decl alu_rr_imm12 (ALUOp Type Reg Imm12) Reg) -(rule (alu_rr_imm12 op ty src imm) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRImm12 op (operand_size ty) dst src imm)))) - dst)) - -;; Helper for emitting `MInst.AluRRRShift` instructions. -(decl alu_rrr_shift (ALUOp Type Reg Reg ShiftOpAndAmt) Reg) -(rule (alu_rrr_shift op ty src1 src2 shift) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRRShift op (operand_size ty) dst src1 src2 shift)))) - dst)) - -;; Helper for emitting `MInst.AluRRRExtend` instructions. -(decl alu_rrr_extend (ALUOp Type Reg Reg ExtendOp) Reg) -(rule (alu_rrr_extend op ty src1 src2 extend) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRRExtend op (operand_size ty) dst src1 src2 extend)))) - dst)) - -;; Same as `alu_rrr_extend`, but takes an `ExtendedValue` packed "pair" instead -;; of a `Reg` and an `ExtendOp`. -(decl alu_rr_extend_reg (ALUOp Type Reg ExtendedValue) Reg) -(rule (alu_rr_extend_reg op ty src1 extended_reg) - (let ((src2 Reg (put_extended_in_reg extended_reg)) - (extend ExtendOp (get_extended_op extended_reg))) - (alu_rrr_extend op ty src1 src2 extend))) - -;; Helper for emitting `MInst.AluRRRR` instructions. -(decl alu_rrrr (ALUOp3 Reg Reg Reg) Reg) -(rule (alu_rrrr op src1 src2 src3) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.AluRRRR op dst src1 src2 src3)))) - dst)) - -;; Helper for emitting `MInst.BitRR` instructions. -(decl bit_rr (BitOp Type Reg) Reg) -(rule (bit_rr op ty src) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.BitRR op (operand_size ty) dst src)))) - dst)) - -;; Helper for emitting `adds` instructions. -(decl add_with_flags_paired (Type Reg Reg) ProducesFlags) -(rule (add_with_flags_paired ty src1 src2) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ProducesFlags.ProducesFlagsReturnsResultWithConsumer - (MInst.AluRRR (ALUOp.AddS) (operand_size ty) dst src1 src2) - dst))) - -;; Helper for emitting `adc` instructions. -(decl adc_paired (Type Reg Reg) ConsumesFlags) -(rule (adc_paired ty src1 src2) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsReturnsResultWithProducer - (MInst.AluRRR (ALUOp.Adc) (operand_size ty) dst src1 src2) - dst))) - -;; Helper for emitting `subs` instructions. -(decl sub_with_flags_paired (Type Reg Reg) ProducesFlags) -(rule (sub_with_flags_paired ty src1 src2) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ProducesFlags.ProducesFlagsReturnsResultWithConsumer - (MInst.AluRRR (ALUOp.SubS) (operand_size ty) dst src1 src2) - dst))) - -(decl cmp64_imm (Reg Imm12) ProducesFlags) -(rule (cmp64_imm src1 src2) - (ProducesFlags.ProducesFlagsSideEffect - (MInst.AluRRImm12 (ALUOp.SubS) (OperandSize.Size64) (writable_zero_reg) - src1 src2))) - -;; Helper for emitting `sbc` instructions. -(decl sbc_paired (Type Reg Reg) ConsumesFlags) -(rule (sbc_paired ty src1 src2) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsReturnsResultWithProducer - (MInst.AluRRR (ALUOp.Sbc) (operand_size ty) dst src1 src2) - dst))) - -;; Helper for emitting `MInst.VecMisc` instructions. -(decl vec_misc (VecMisc2 Reg VectorSize) Reg) -(rule (vec_misc op src size) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecMisc op dst src size)))) - dst)) - -;; Helper for emitting `MInst.VecRRRLong` instructions. -(decl vec_rrr_long (VecRRRLongOp Reg Reg bool) Reg) -(rule (vec_rrr_long op src1 src2 high_half) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecRRRLong op dst src1 src2 high_half)))) - dst)) - -;; Helper for emitting `MInst.VecRRRLong` instructions, but for variants -;; where the operation both reads and modifies the destination register. -;; -;; Currently this is only used for `VecRRRLongOp.Umlal*` -(decl vec_rrrr_long (VecRRRLongOp Reg Reg Reg bool) Reg) -(rule (vec_rrrr_long op src1 src2 src3 high_half) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_1 Unit (emit (MInst.FpuMove128 dst src1))) - (_2 Unit (emit (MInst.VecRRRLong op dst src2 src3 high_half)))) - dst)) - -;; Helper for emitting `MInst.VecRRNarrow` instructions. -(decl vec_rr_narrow (VecRRNarrowOp Reg bool) Reg) -(rule (vec_rr_narrow op src high_half) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecRRNarrow op dst src high_half)))) - dst)) - -;; Helper for emitting `MInst.VecRRLong` instructions. -(decl vec_rr_long (VecRRLongOp Reg bool) Reg) -(rule (vec_rr_long op src high_half) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.VecRRLong op dst src high_half)))) - dst)) - -;; Helper for emitting `MInst.MovToFpu` instructions. -(decl mov_to_fpu (Reg ScalarSize) Reg) -(rule (mov_to_fpu x size) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_ Unit (emit (MInst.MovToFpu dst x size)))) - dst)) - -;; Helper for emitting `MInst.MovToVec` instructions. -(decl mov_to_vec (Reg Reg u8 VectorSize) Reg) -(rule (mov_to_vec src1 src2 lane size) - (let ((dst WritableReg (temp_writable_reg $I8X16)) - (_1 Unit (emit (MInst.FpuMove128 dst src1))) - (_2 Unit (emit (MInst.MovToVec dst src2 lane size)))) - dst)) - -;; Helper for emitting `MInst.MovFromVec` instructions. -(decl mov_from_vec (Reg u8 VectorSize) Reg) -(rule (mov_from_vec rn idx size) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.MovFromVec dst rn idx size)))) - dst)) - -;; Helper for emitting `MInst.MovFromVecSigned` instructions. -(decl mov_from_vec_signed (Reg u8 VectorSize OperandSize) Reg) -(rule (mov_from_vec_signed rn idx size scalar_size) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.MovFromVecSigned dst rn idx size scalar_size)))) - dst)) - -;; Helper for emitting `MInst.Extend` instructions. -(decl extend (Reg bool u8 u8) Reg) -(rule (extend rn signed from_bits to_bits) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.Extend dst rn signed from_bits to_bits)))) - dst)) - -;; Helper for emitting `MInst.LoadAcquire` instructions. -(decl load_acquire (Type Reg) Reg) -(rule (load_acquire ty addr) - (let ((dst WritableReg (temp_writable_reg $I64)) - (_ Unit (emit (MInst.LoadAcquire ty dst addr)))) - dst)) - -;; Helper for generating a `tst` instruction. -;; -;; Produces a `ProducesFlags` rather than a register or emitted instruction -;; which must be paired with `with_flags*` helpers. -(decl tst_imm (Type Reg ImmLogic) ProducesFlags) -(rule (tst_imm ty reg imm) - (ProducesFlags.ProducesFlagsSideEffect - (MInst.AluRRImmLogic (ALUOp.AndS) - (operand_size ty) - (writable_zero_reg) - reg - imm))) - -;; Helper for generating a `CSel` instruction. -;; -;; Note that this doesn't actually emit anything, instead it produces a -;; `ConsumesFlags` instruction which must be consumed with `with_flags*` -;; helpers. -(decl csel (Cond Reg Reg) ConsumesFlags) -(rule (csel cond if_true if_false) - (let ((dst WritableReg (temp_writable_reg $I64))) - (ConsumesFlags.ConsumesFlagsReturnsReg - (MInst.CSel dst cond if_true if_false) - dst))) - -;; Helpers for generating `add` instructions. - -(decl add (Type Reg Reg) Reg) -(rule (add ty x y) (alu_rrr (ALUOp.Add) ty x y)) - -(decl add_imm (Type Reg Imm12) Reg) -(rule (add_imm ty x y) (alu_rr_imm12 (ALUOp.Add) ty x y)) - -(decl add_extend (Type Reg ExtendedValue) Reg) -(rule (add_extend ty x y) (alu_rr_extend_reg (ALUOp.Add) ty x y)) - -(decl add_shift (Type Reg Reg ShiftOpAndAmt) Reg) -(rule (add_shift ty x y z) (alu_rrr_shift (ALUOp.Add) ty x y z)) - -(decl add_vec (Reg Reg VectorSize) Reg) -(rule (add_vec x y size) (vec_rrr (VecALUOp.Add) x y size)) - -;; Helpers for generating `sub` instructions. - -(decl sub (Type Reg Reg) Reg) -(rule (sub ty x y) (alu_rrr (ALUOp.Sub) ty x y)) - -(decl sub_imm (Type Reg Imm12) Reg) -(rule (sub_imm ty x y) (alu_rr_imm12 (ALUOp.Sub) ty x y)) - -(decl sub_extend (Type Reg ExtendedValue) Reg) -(rule (sub_extend ty x y) (alu_rr_extend_reg (ALUOp.Sub) ty x y)) - -(decl sub_shift (Type Reg Reg ShiftOpAndAmt) Reg) -(rule (sub_shift ty x y z) (alu_rrr_shift (ALUOp.Sub) ty x y z)) - -(decl sub_vec (Reg Reg VectorSize) Reg) -(rule (sub_vec x y size) (vec_rrr (VecALUOp.Sub) x y size)) - -;; Helpers for generating `madd` instructions. - -(decl madd (Type Reg Reg Reg) Reg) -(rule (madd (fits_in_32 _ty) x y z) (madd32 x y z)) -(rule (madd $I64 x y z) (madd64 x y z)) - -(decl madd32 (Reg Reg Reg) Reg) -(rule (madd32 x y z) (alu_rrrr (ALUOp3.MAdd32) x y z)) - -(decl madd64 (Reg Reg Reg) Reg) -(rule (madd64 x y z) (alu_rrrr (ALUOp3.MAdd64) x y z)) - -;; Helpers for generating `msub` instructions. - -(decl msub64 (Reg Reg Reg) Reg) -(rule (msub64 x y z) (alu_rrrr (ALUOp3.MSub64) x y z)) - -;; Helper for generating `uqadd` instructions. -(decl uqadd (Reg Reg VectorSize) Reg) -(rule (uqadd x y size) (vec_rrr (VecALUOp.Uqadd) x y size)) - -;; Helper for generating `sqadd` instructions. -(decl sqadd (Reg Reg VectorSize) Reg) -(rule (sqadd x y size) (vec_rrr (VecALUOp.Sqadd) x y size)) - -;; Helper for generating `uqsub` instructions. -(decl uqsub (Reg Reg VectorSize) Reg) -(rule (uqsub x y size) (vec_rrr (VecALUOp.Uqsub) x y size)) - -;; Helper for generating `sqsub` instructions. -(decl sqsub (Reg Reg VectorSize) Reg) -(rule (sqsub x y size) (vec_rrr (VecALUOp.Sqsub) x y size)) - -;; Helper for generating `umulh` instructions. -(decl umulh (Type Reg Reg) Reg) -(rule (umulh ty x y) (alu_rrr (ALUOp.UMulH) ty x y)) - -;; Helper for generating `smulh` instructions. -(decl smulh (Type Reg Reg) Reg) -(rule (smulh ty x y) (alu_rrr (ALUOp.SMulH) ty x y)) - -;; Helper for generating `mul` instructions. -(decl mul (Reg Reg VectorSize) Reg) -(rule (mul x y size) (vec_rrr (VecALUOp.Mul) x y size)) - -;; Helper for generating `neg` instructions. -(decl neg (Reg VectorSize) Reg) -(rule (neg x size) (vec_misc (VecMisc2.Neg) x size)) - -;; Helper for generating `rev64` instructions. -(decl rev64 (Reg VectorSize) Reg) -(rule (rev64 x size) (vec_misc (VecMisc2.Rev64) x size)) - -;; Helper for generating `xtn64` instructions. -(decl xtn64 (Reg bool) Reg) -(rule (xtn64 x high_half) (vec_rr_narrow (VecRRNarrowOp.Xtn64) x high_half)) - -;; Helper for generating `addp` instructions. -(decl addp (Reg Reg VectorSize) Reg) -(rule (addp x y size) (vec_rrr (VecALUOp.Addp) x y size)) - -;; Helper for generating `addv` instructions. -(decl addv (Reg VectorSize) Reg) -(rule (addv x size) (vec_lanes (VecLanesOp.Addv) x size)) - -;; Helper for generating `shll32` instructions. -(decl shll32 (Reg bool) Reg) -(rule (shll32 x high_half) (vec_rr_long (VecRRLongOp.Shll32) x high_half)) - -;; Helper for generating `umlal32` instructions. -(decl umlal32 (Reg Reg Reg bool) Reg) -(rule (umlal32 x y z high_half) (vec_rrrr_long (VecRRRLongOp.Umlal32) x y z high_half)) - -;; Helper for generating `smull8` instructions. -(decl smull8 (Reg Reg bool) Reg) -(rule (smull8 x y high_half) (vec_rrr_long (VecRRRLongOp.Smull8) x y high_half)) - -;; Helper for generating `umull8` instructions. -(decl umull8 (Reg Reg bool) Reg) -(rule (umull8 x y high_half) (vec_rrr_long (VecRRRLongOp.Umull8) x y high_half)) - -;; Helper for generating `smull16` instructions. -(decl smull16 (Reg Reg bool) Reg) -(rule (smull16 x y high_half) (vec_rrr_long (VecRRRLongOp.Smull16) x y high_half)) - -;; Helper for generating `umull16` instructions. -(decl umull16 (Reg Reg bool) Reg) -(rule (umull16 x y high_half) (vec_rrr_long (VecRRRLongOp.Umull16) x y high_half)) - -;; Helper for generating `smull32` instructions. -(decl smull32 (Reg Reg bool) Reg) -(rule (smull32 x y high_half) (vec_rrr_long (VecRRRLongOp.Smull32) x y high_half)) - -;; Helper for generating `umull32` instructions. -(decl umull32 (Reg Reg bool) Reg) -(rule (umull32 x y high_half) (vec_rrr_long (VecRRRLongOp.Umull32) x y high_half)) - -;; Helper for generating `asr` instructions. -(decl asr (Type Reg Reg) Reg) -(rule (asr ty x y) (alu_rrr (ALUOp.Asr) ty x y)) - -(decl asr_imm (Type Reg ImmShift) Reg) -(rule (asr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Asr) ty x imm)) - -;; Helper for generating `lsr` instructions. -(decl lsr (Type Reg Reg) Reg) -(rule (lsr ty x y) (alu_rrr (ALUOp.Lsr) ty x y)) - -(decl lsr_imm (Type Reg ImmShift) Reg) -(rule (lsr_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsr) ty x imm)) - -;; Helper for generating `lsl` instructions. -(decl lsl (Type Reg Reg) Reg) -(rule (lsl ty x y) (alu_rrr (ALUOp.Lsl) ty x y)) - -(decl lsl_imm (Type Reg ImmShift) Reg) -(rule (lsl_imm ty x imm) (alu_rr_imm_shift (ALUOp.Lsl) ty x imm)) - -;; Helper for generating `udiv` instructions. -(decl a64_udiv (Type Reg Reg) Reg) -(rule (a64_udiv ty x y) (alu_rrr (ALUOp.UDiv) ty x y)) - -;; Helper for generating `sdiv` instructions. -(decl a64_sdiv (Type Reg Reg) Reg) -(rule (a64_sdiv ty x y) (alu_rrr (ALUOp.SDiv) ty x y)) - -;; Helper for generating `not` instructions. -(decl not (Reg VectorSize) Reg) -(rule (not x size) (vec_misc (VecMisc2.Not) x size)) - -;; Helpers for generating `orr_not` instructions. - -(decl orr_not (Type Reg Reg) Reg) -(rule (orr_not ty x y) (alu_rrr (ALUOp.OrrNot) ty x y)) - -(decl orr_not_shift (Type Reg Reg ShiftOpAndAmt) Reg) -(rule (orr_not_shift ty x y shift) (alu_rrr_shift (ALUOp.OrrNot) ty x y shift)) - -;; Helpers for generating `orr` instructions. - -(decl orr (Type Reg Reg) Reg) -(rule (orr ty x y) (alu_rrr (ALUOp.Orr) ty x y)) - -(decl orr_imm (Type Reg ImmLogic) Reg) -(rule (orr_imm ty x y) (alu_rr_imm_logic (ALUOp.Orr) ty x y)) - -(decl orr_vec (Reg Reg VectorSize) Reg) -(rule (orr_vec x y size) (vec_rrr (VecALUOp.Orr) x y size)) - -;; Helpers for generating `and` instructions. - -(decl and_imm (Type Reg ImmLogic) Reg) -(rule (and_imm ty x y) (alu_rr_imm_logic (ALUOp.And) ty x y)) - -(decl and_vec (Reg Reg VectorSize) Reg) -(rule (and_vec x y size) (vec_rrr (VecALUOp.And) x y size)) - -;; Helpers for generating `eor` instructions. -(decl eor_vec (Reg Reg VectorSize) Reg) -(rule (eor_vec x y size) (vec_rrr (VecALUOp.Eor) x y size)) - -;; Helpers for generating `bic` instructions. -(decl bic_vec (Reg Reg VectorSize) Reg) -(rule (bic_vec x y size) (vec_rrr (VecALUOp.Bic) x y size)) - -;; Helpers for generating `sshl` instructions. -(decl sshl (Reg Reg VectorSize) Reg) -(rule (sshl x y size) (vec_rrr (VecALUOp.Sshl) x y size)) - -;; Helpers for generating `ushl` instructions. -(decl ushl (Reg Reg VectorSize) Reg) -(rule (ushl x y size) (vec_rrr (VecALUOp.Ushl) x y size)) - -;; Helpers for generating `rotr` instructions. - -(decl a64_rotr (Type Reg Reg) Reg) -(rule (a64_rotr ty x y) (alu_rrr (ALUOp.RotR) ty x y)) - -(decl a64_rotr_imm (Type Reg ImmShift) Reg) -(rule (a64_rotr_imm ty x y) (alu_rr_imm_shift (ALUOp.RotR) ty x y)) - -;; Helpers for generating `rbit` instructions. - -(decl rbit (Type Reg) Reg) -(rule (rbit ty x) (bit_rr (BitOp.RBit) ty x)) - -;; Helpers for generating `clz` instructions. - -(decl a64_clz (Type Reg) Reg) -(rule (a64_clz ty x) (bit_rr (BitOp.Clz) ty x)) - -;; Helpers for generating `cls` instructions. - -(decl a64_cls (Type Reg) Reg) -(rule (a64_cls ty x) (bit_rr (BitOp.Cls) ty x)) - -;; Helpers for generating `eon` instructions. - -(decl eon (Type Reg Reg) Reg) -(rule (eon ty x y) (alu_rrr (ALUOp.EorNot) ty x y)) - -;; Helpers for generating `cnt` instructions. - -(decl vec_cnt (Reg VectorSize) Reg) -(rule (vec_cnt x size) (vec_misc (VecMisc2.Cnt) x size)) - -;; Immediate value helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(decl imm (Type u64) Reg) - -;; 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVZ -(rule (imm (integral_ty _ty) (move_wide_const_from_u64 n)) - (movz n (OperandSize.Size64))) - -;; 16-bit immediate (shifted by 0, 16, 32 or 48 bits) in MOVN -(rule (imm (integral_ty _ty) (move_wide_const_from_negated_u64 n)) - (movn n (OperandSize.Size64))) - -;; Weird logical-instruction immediate in ORI using zero register -(rule (imm (integral_ty _ty) (imm_logic_from_u64 <$I64 n)) - (orr_imm $I64 (zero_reg) n)) - -(decl load_constant64_full (u64) Reg) -(extern constructor load_constant64_full load_constant64_full) - -;; Fallback for integral 64-bit constants that uses lots of `movk` -(rule (imm (integral_ty _ty) n) - (load_constant64_full n)) - -;; Sign extension helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -;; Place a `Value` into a register, sign extending it to 32-bits -(decl put_in_reg_sext32 (Value) Reg) -(rule (put_in_reg_sext32 val @ (value_type (fits_in_32 ty))) - (extend val $true (ty_bits ty) 32)) - -;; 32/64-bit passthrough. -(rule (put_in_reg_sext32 val @ (value_type $I32)) val) -(rule (put_in_reg_sext32 val @ (value_type $I64)) val) - -;; Place a `Value` into a register, zero extending it to 32-bits -(decl put_in_reg_zext32 (Value) Reg) -(rule (put_in_reg_zext32 val @ (value_type (fits_in_32 ty))) - (extend val $false (ty_bits ty) 32)) - -;; 32/64-bit passthrough. -(rule (put_in_reg_zext32 val @ (value_type $I32)) val) -(rule (put_in_reg_zext32 val @ (value_type $I64)) val) - -;; Place a `Value` into a register, sign extending it to 64-bits -(decl put_in_reg_sext64 (Value) Reg) -(rule (put_in_reg_sext64 val @ (value_type (fits_in_32 ty))) - (extend val $true (ty_bits ty) 64)) - -;; 64-bit passthrough. -(rule (put_in_reg_sext64 val @ (value_type $I64)) val) - -;; Place a `Value` into a register, zero extending it to 64-bits -(decl put_in_reg_zext64 (Value) Reg) -(rule (put_in_reg_zext64 val @ (value_type (fits_in_32 ty))) - (extend val $false (ty_bits ty) 64)) - -;; 64-bit passthrough. -(rule (put_in_reg_zext64 val @ (value_type $I64)) val) - -;; Misc instruction helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -(decl trap_if_zero_divisor (Reg) Reg) -(rule (trap_if_zero_divisor reg) - (let ((_ Unit (emit (MInst.TrapIf (cond_br_zero reg) (trap_code_division_by_zero))))) - reg)) - -(decl size_from_ty (Type) OperandSize) -(rule (size_from_ty (fits_in_32 _ty)) (OperandSize.Size32)) -(rule (size_from_ty $I64) (OperandSize.Size64)) - -;; Check for signed overflow. The only case is min_value / -1. -;; The following checks must be done in 32-bit or 64-bit, depending -;; on the input type. -(decl trap_if_div_overflow (Type Reg Reg) Reg) -(rule (trap_if_div_overflow ty x y) - (let ( - ;; Check RHS is -1. - (_1 Unit (emit (MInst.AluRRImm12 (ALUOp.AddS) (operand_size ty) (writable_zero_reg) y (u8_into_imm12 1)))) - - ;; Check LHS is min_value, by subtracting 1 and branching if - ;; there is overflow. - (_2 Unit (emit (MInst.CCmpImm (size_from_ty ty) - x - (u8_into_uimm5 1) - (nzcv $false $false $false $false) - (Cond.Eq)))) - (_3 Unit (emit (MInst.TrapIf (cond_br_cond (Cond.Vs)) - (trap_code_integer_overflow)))) - ) - x)) - -;; An atomic load that can be sunk into another operation. -(type SinkableAtomicLoad extern (enum)) - -;; Extract a `SinkableAtomicLoad` that works with `Reg` from a value -;; operand. -(decl sinkable_atomic_load (SinkableAtomicLoad) Value) -(extern extractor sinkable_atomic_load sinkable_atomic_load) - -;; Sink a `SinkableLoad` into a `Reg`. -;; -;; This is a side-effectful operation that notifies the context that the -;; instruction that produced the `SinkableAtomicLoad` has been sunk into another -;; instruction, and no longer needs to be lowered. -(decl sink_atomic_load (SinkableAtomicLoad) Reg) -(extern constructor sink_atomic_load sink_atomic_load) - -;; Helper for generating either an `AluRRR`, `AluRRRShift`, or `AluRRImmLogic` -;; instruction depending on the input. Note that this requires that the `ALUOp` -;; specified is commutative. -(decl alu_rs_imm_logic_commutative (ALUOp Type Value Value) Reg) - -;; Base case of operating on registers. -(rule (alu_rs_imm_logic_commutative op ty x y) - (alu_rrr op ty (put_in_reg x) (put_in_reg y))) - -;; Special cases for when one operand is a constant. -(rule (alu_rs_imm_logic_commutative op ty x (def_inst (iconst (imm_logic_from_imm64 u8 { - self as u8 - } -} - -/// A shift operator amount. -#[derive(Clone, Copy, Debug)] -pub struct ShiftOpShiftImm(u8); - -impl ShiftOpShiftImm { - /// Maximum shift for shifted-register operands. - pub const MAX_SHIFT: u64 = 63; - - /// Create a new shiftop shift amount, if possible. - pub fn maybe_from_shift(shift: u64) -> Option { - if shift <= Self::MAX_SHIFT { - Some(ShiftOpShiftImm(shift as u8)) - } else { - None - } - } - - /// Return the shift amount. - pub fn value(self) -> u8 { - self.0 - } - - /// Mask down to a given number of bits. - pub fn mask(self, bits: u8) -> ShiftOpShiftImm { - ShiftOpShiftImm(self.0 & (bits - 1)) - } -} - -/// A shift operator with an amount, guaranteed to be within range. -#[derive(Copy, Clone, Debug)] -pub struct ShiftOpAndAmt { - op: ShiftOp, - shift: ShiftOpShiftImm, -} - -impl ShiftOpAndAmt { - pub fn new(op: ShiftOp, shift: ShiftOpShiftImm) -> ShiftOpAndAmt { - ShiftOpAndAmt { op, shift } - } - - /// Get the shift op. - pub fn op(&self) -> ShiftOp { - self.op - } - - /// Get the shift amount. - pub fn amt(&self) -> ShiftOpShiftImm { - self.shift - } -} - -/// An extend operator for a register. -#[derive(Clone, Copy, Debug)] -#[repr(u8)] -pub enum ExtendOp { - UXTB = 0b000, - UXTH = 0b001, - UXTW = 0b010, - UXTX = 0b011, - SXTB = 0b100, - SXTH = 0b101, - SXTW = 0b110, - SXTX = 0b111, -} - -impl ExtendOp { - /// Encoding of this op. - pub fn bits(self) -> u8 { - self as u8 - } -} - -//============================================================================= -// Instruction sub-components (memory addresses): definitions - -/// A reference to some memory address. -#[derive(Clone, Debug)] -pub enum MemLabel { - /// An address in the code, a constant pool or jumptable, with relative - /// offset from this instruction. This form must be used at emission time; - /// see `memlabel_finalize()` for how other forms are lowered to this one. - PCRel(i32), -} - -/// An addressing mode specified for a load/store operation. -#[derive(Clone, Debug)] -pub enum AMode { - // - // Real ARM64 addressing modes: - // - /// "post-indexed" mode as per AArch64 docs: postincrement reg after address computation. - PostIndexed(Writable, SImm9), - /// "pre-indexed" mode as per AArch64 docs: preincrement reg before address computation. - PreIndexed(Writable, SImm9), - - // N.B.: RegReg, RegScaled, and RegScaledExtended all correspond to - // what the ISA calls the "register offset" addressing mode. We split out - // several options here for more ergonomic codegen. - /// Register plus register offset. - RegReg(Reg, Reg), - - /// Register plus register offset, scaled by type's size. - RegScaled(Reg, Reg, Type), - - /// Register plus register offset, scaled by type's size, with index sign- or zero-extended - /// first. - RegScaledExtended(Reg, Reg, Type, ExtendOp), - - /// Register plus register offset, with index sign- or zero-extended first. - RegExtended(Reg, Reg, ExtendOp), - - /// Unscaled signed 9-bit immediate offset from reg. - Unscaled(Reg, SImm9), - - /// Scaled (by size of a type) unsigned 12-bit immediate offset from reg. - UnsignedOffset(Reg, UImm12Scaled), - - // - // virtual addressing modes that are lowered at emission time: - // - /// Reference to a "label": e.g., a symbol. - Label(MemLabel), - - /// Arbitrary offset from a register. Converted to generation of large - /// offsets with multiple instructions as necessary during code emission. - RegOffset(Reg, i64, Type), - - /// Offset from the stack pointer. - SPOffset(i64, Type), - - /// Offset from the frame pointer. - FPOffset(i64, Type), - - /// Offset from the "nominal stack pointer", which is where the real SP is - /// just after stack and spill slots are allocated in the function prologue. - /// At emission time, this is converted to `SPOffset` with a fixup added to - /// the offset constant. The fixup is a running value that is tracked as - /// emission iterates through instructions in linear order, and can be - /// adjusted up and down with [Inst::VirtualSPOffsetAdj]. - /// - /// The standard ABI is in charge of handling this (by emitting the - /// adjustment meta-instructions). It maintains the invariant that "nominal - /// SP" is where the actual SP is after the function prologue and before - /// clobber pushes. See the diagram in the documentation for - /// [crate::isa::aarch64::abi](the ABI module) for more details. - NominalSPOffset(i64, Type), -} - -impl AMode { - /// Memory reference using an address in a register. - pub fn reg(reg: Reg) -> AMode { - // Use UnsignedOffset rather than Unscaled to use ldr rather than ldur. - // This also does not use PostIndexed / PreIndexed as they update the register. - AMode::UnsignedOffset(reg, UImm12Scaled::zero(I64)) - } - - /// Memory reference using the sum of two registers as an address. - pub fn reg_plus_reg(reg1: Reg, reg2: Reg) -> AMode { - AMode::RegReg(reg1, reg2) - } - - /// Memory reference using `reg1 + sizeof(ty) * reg2` as an address. - pub fn reg_plus_reg_scaled(reg1: Reg, reg2: Reg, ty: Type) -> AMode { - AMode::RegScaled(reg1, reg2, ty) - } - - /// Memory reference using `reg1 + sizeof(ty) * reg2` as an address, with `reg2` sign- or - /// zero-extended as per `op`. - pub fn reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, ty: Type, op: ExtendOp) -> AMode { - AMode::RegScaledExtended(reg1, reg2, ty, op) - } - - /// Memory reference to a label: a global function or value, or data in the constant pool. - pub fn label(label: MemLabel) -> AMode { - AMode::Label(label) - } - - /// Does the address resolve to just a register value, with no offset or - /// other computation? - pub fn is_reg(&self) -> Option { - match self { - &AMode::UnsignedOffset(r, uimm12) if uimm12.value() == 0 => Some(r), - &AMode::Unscaled(r, imm9) if imm9.value() == 0 => Some(r), - &AMode::RegOffset(r, off, _) if off == 0 => Some(r), - &AMode::FPOffset(off, _) if off == 0 => Some(fp_reg()), - &AMode::SPOffset(off, _) if off == 0 => Some(stack_reg()), - _ => None, - } - } -} - -/// A memory argument to a load/store-pair. -#[derive(Clone, Debug)] -pub enum PairAMode { - SignedOffset(Reg, SImm7Scaled), - PreIndexed(Writable, SImm7Scaled), - PostIndexed(Writable, SImm7Scaled), -} - -//============================================================================= -// Instruction sub-components (conditions, branches and branch targets): -// definitions - -/// Condition for conditional branches. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[repr(u8)] -pub enum Cond { - Eq = 0, - Ne = 1, - Hs = 2, - Lo = 3, - Mi = 4, - Pl = 5, - Vs = 6, - Vc = 7, - Hi = 8, - Ls = 9, - Ge = 10, - Lt = 11, - Gt = 12, - Le = 13, - Al = 14, - Nv = 15, -} - -impl Cond { - /// Return the inverted condition. - pub fn invert(self) -> Cond { - match self { - Cond::Eq => Cond::Ne, - Cond::Ne => Cond::Eq, - - Cond::Hs => Cond::Lo, - Cond::Lo => Cond::Hs, - - Cond::Mi => Cond::Pl, - Cond::Pl => Cond::Mi, - - Cond::Vs => Cond::Vc, - Cond::Vc => Cond::Vs, - - Cond::Hi => Cond::Ls, - Cond::Ls => Cond::Hi, - - Cond::Ge => Cond::Lt, - Cond::Lt => Cond::Ge, - - Cond::Gt => Cond::Le, - Cond::Le => Cond::Gt, - - Cond::Al => Cond::Nv, - Cond::Nv => Cond::Al, - } - } - - /// Return the machine encoding of this condition. - pub fn bits(self) -> u32 { - self as u32 - } -} - -/// The kind of conditional branch: the common-case-optimized "reg-is-zero" / -/// "reg-is-nonzero" variants, or the generic one that tests the machine -/// condition codes. -#[derive(Clone, Copy, Debug)] -pub enum CondBrKind { - /// Condition: given register is zero. - Zero(Reg), - /// Condition: given register is nonzero. - NotZero(Reg), - /// Condition: the given condition-code test is true. - Cond(Cond), -} - -impl CondBrKind { - /// Return the inverted branch condition. - pub fn invert(self) -> CondBrKind { - match self { - CondBrKind::Zero(reg) => CondBrKind::NotZero(reg), - CondBrKind::NotZero(reg) => CondBrKind::Zero(reg), - CondBrKind::Cond(c) => CondBrKind::Cond(c.invert()), - } - } -} - -/// A branch target. Either unresolved (basic-block index) or resolved (offset -/// from end of current instruction). -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum BranchTarget { - /// An unresolved reference to a Label, as passed into - /// `lower_branch_group()`. - Label(MachLabel), - /// A fixed PC offset. - ResolvedOffset(i32), -} - -impl BranchTarget { - /// Return the target's label, if it is a label-based target. - pub fn as_label(self) -> Option { - match self { - BranchTarget::Label(l) => Some(l), - _ => None, - } - } - - /// Return the target's offset, if specified, or zero if label-based. - pub fn as_offset19_or_zero(self) -> u32 { - let off = match self { - BranchTarget::ResolvedOffset(off) => off >> 2, - _ => 0, - }; - assert!(off <= 0x3ffff); - assert!(off >= -0x40000); - (off as u32) & 0x7ffff - } - - /// Return the target's offset, if specified, or zero if label-based. - pub fn as_offset26_or_zero(self) -> u32 { - let off = match self { - BranchTarget::ResolvedOffset(off) => off >> 2, - _ => 0, - }; - assert!(off <= 0x1ffffff); - assert!(off >= -0x2000000); - (off as u32) & 0x3ffffff - } -} - -impl PrettyPrint for ShiftOpAndAmt { - fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String { - format!("{:?} {}", self.op(), self.amt().value()) - } -} - -impl PrettyPrint for ExtendOp { - fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String { - format!("{:?}", self) - } -} - -impl PrettyPrint for MemLabel { - fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String { - match self { - &MemLabel::PCRel(off) => format!("pc+{}", off), - } - } -} - -fn shift_for_type(ty: Type) -> usize { - match ty.bytes() { - 1 => 0, - 2 => 1, - 4 => 2, - 8 => 3, - 16 => 4, - _ => panic!("unknown type: {}", ty), - } -} - -impl PrettyPrint for AMode { - fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String { - match self { - &AMode::Unscaled(reg, simm9) => { - if simm9.value != 0 { - format!("[{}, {}]", reg.show_rru(mb_rru), simm9.show_rru(mb_rru)) - } else { - format!("[{}]", reg.show_rru(mb_rru)) - } - } - &AMode::UnsignedOffset(reg, uimm12) => { - if uimm12.value != 0 { - format!("[{}, {}]", reg.show_rru(mb_rru), uimm12.show_rru(mb_rru)) - } else { - format!("[{}]", reg.show_rru(mb_rru)) - } - } - &AMode::RegReg(r1, r2) => { - format!("[{}, {}]", r1.show_rru(mb_rru), r2.show_rru(mb_rru),) - } - &AMode::RegScaled(r1, r2, ty) => { - let shift = shift_for_type(ty); - format!( - "[{}, {}, LSL #{}]", - r1.show_rru(mb_rru), - r2.show_rru(mb_rru), - shift, - ) - } - &AMode::RegScaledExtended(r1, r2, ty, op) => { - let shift = shift_for_type(ty); - let size = match op { - ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32, - _ => OperandSize::Size64, - }; - let op = op.show_rru(mb_rru); - format!( - "[{}, {}, {} #{}]", - r1.show_rru(mb_rru), - show_ireg_sized(r2, mb_rru, size), - op, - shift - ) - } - &AMode::RegExtended(r1, r2, op) => { - let size = match op { - ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32, - _ => OperandSize::Size64, - }; - let op = op.show_rru(mb_rru); - format!( - "[{}, {}, {}]", - r1.show_rru(mb_rru), - show_ireg_sized(r2, mb_rru, size), - op, - ) - } - &AMode::Label(ref label) => label.show_rru(mb_rru), - &AMode::PreIndexed(r, simm9) => format!( - "[{}, {}]!", - r.to_reg().show_rru(mb_rru), - simm9.show_rru(mb_rru) - ), - &AMode::PostIndexed(r, simm9) => format!( - "[{}], {}", - r.to_reg().show_rru(mb_rru), - simm9.show_rru(mb_rru) - ), - // Eliminated by `mem_finalize()`. - &AMode::SPOffset(..) - | &AMode::FPOffset(..) - | &AMode::NominalSPOffset(..) - | &AMode::RegOffset(..) => { - panic!("Unexpected pseudo mem-arg mode (stack-offset or generic reg-offset)!") - } - } - } -} - -impl PrettyPrint for PairAMode { - fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String { - match self { - &PairAMode::SignedOffset(reg, simm7) => { - if simm7.value != 0 { - format!("[{}, {}]", reg.show_rru(mb_rru), simm7.show_rru(mb_rru)) - } else { - format!("[{}]", reg.show_rru(mb_rru)) - } - } - &PairAMode::PreIndexed(reg, simm7) => format!( - "[{}, {}]!", - reg.to_reg().show_rru(mb_rru), - simm7.show_rru(mb_rru) - ), - &PairAMode::PostIndexed(reg, simm7) => format!( - "[{}], {}", - reg.to_reg().show_rru(mb_rru), - simm7.show_rru(mb_rru) - ), - } - } -} - -impl PrettyPrint for Cond { - fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String { - let mut s = format!("{:?}", self); - s.make_ascii_lowercase(); - s - } -} - -impl PrettyPrint for BranchTarget { - fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String { - match self { - &BranchTarget::Label(label) => format!("label{:?}", label.get()), - &BranchTarget::ResolvedOffset(off) => format!("{}", off), - } - } -} - -/// Type used to communicate the operand size of a machine instruction, as AArch64 has 32- and -/// 64-bit variants of many instructions (and integer registers). -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum OperandSize { - Size32, - Size64, -} - -impl OperandSize { - /// 32-bit case? - pub fn is32(self) -> bool { - self == OperandSize::Size32 - } - /// 64-bit case? - pub fn is64(self) -> bool { - self == OperandSize::Size64 - } - /// Convert from an `is32` boolean flag to an `OperandSize`. - pub fn from_is32(is32: bool) -> OperandSize { - if is32 { - OperandSize::Size32 - } else { - OperandSize::Size64 - } - } - /// Convert from a needed width to the smallest size that fits. - pub fn from_bits>(bits: I) -> OperandSize { - let bits: usize = bits.into(); - assert!(bits <= 64); - if bits <= 32 { - OperandSize::Size32 - } else { - OperandSize::Size64 - } - } - - /// Convert from an integer type into the smallest size that fits. - pub fn from_ty(ty: Type) -> OperandSize { - debug_assert!(!ty.is_vector()); - - Self::from_bits(ty_bits(ty)) - } - - /// Convert to I32, I64, or I128. - pub fn to_ty(self) -> Type { - match self { - OperandSize::Size32 => I32, - OperandSize::Size64 => I64, - } - } - - pub fn sf_bit(&self) -> u32 { - match self { - OperandSize::Size32 => 0, - OperandSize::Size64 => 1, - } - } -} - -/// Type used to communicate the size of a scalar SIMD & FP operand. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum ScalarSize { - Size8, - Size16, - Size32, - Size64, - Size128, -} - -impl ScalarSize { - /// Convert from a needed width to the smallest size that fits. - pub fn from_bits>(bits: I) -> ScalarSize { - match bits.into().next_power_of_two() { - 8 => ScalarSize::Size8, - 16 => ScalarSize::Size16, - 32 => ScalarSize::Size32, - 64 => ScalarSize::Size64, - 128 => ScalarSize::Size128, - w => panic!("Unexpected type width: {}", w), - } - } - - /// Convert to an integer operand size. - pub fn operand_size(&self) -> OperandSize { - match self { - ScalarSize::Size32 => OperandSize::Size32, - ScalarSize::Size64 => OperandSize::Size64, - _ => panic!("Unexpected operand_size request for: {:?}", self), - } - } - - /// Convert from an integer operand size. - pub fn from_operand_size(size: OperandSize) -> ScalarSize { - match size { - OperandSize::Size32 => ScalarSize::Size32, - OperandSize::Size64 => ScalarSize::Size64, - } - } - - /// Convert from a type into the smallest size that fits. - pub fn from_ty(ty: Type) -> ScalarSize { - debug_assert!(!ty.is_vector()); - - Self::from_bits(ty_bits(ty)) - } - - /// Return the encoding bits that are used by some scalar FP instructions - /// for a particular operand size. - pub fn ftype(&self) -> u32 { - match self { - ScalarSize::Size16 => 0b11, - ScalarSize::Size32 => 0b00, - ScalarSize::Size64 => 0b01, - _ => panic!("Unexpected scalar FP operand size: {:?}", self), - } - } -} - -/// Type used to communicate the size of a vector operand. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum VectorSize { - Size8x8, - Size8x16, - Size16x4, - Size16x8, - Size32x2, - Size32x4, - Size64x2, -} - -impl VectorSize { - /// Get the vector operand size with the given scalar size as lane size. - pub fn from_lane_size(size: ScalarSize, is_128bit: bool) -> VectorSize { - match (size, is_128bit) { - (ScalarSize::Size8, false) => VectorSize::Size8x8, - (ScalarSize::Size8, true) => VectorSize::Size8x16, - (ScalarSize::Size16, false) => VectorSize::Size16x4, - (ScalarSize::Size16, true) => VectorSize::Size16x8, - (ScalarSize::Size32, false) => VectorSize::Size32x2, - (ScalarSize::Size32, true) => VectorSize::Size32x4, - (ScalarSize::Size64, true) => VectorSize::Size64x2, - _ => panic!("Unexpected scalar FP operand size: {:?}", size), - } - } - - /// Convert from a type into a vector operand size. - pub fn from_ty(ty: Type) -> VectorSize { - debug_assert!(ty.is_vector()); - - match ty { - B8X16 => VectorSize::Size8x16, - B16X8 => VectorSize::Size16x8, - B32X4 => VectorSize::Size32x4, - B64X2 => VectorSize::Size64x2, - F32X2 => VectorSize::Size32x2, - F32X4 => VectorSize::Size32x4, - F64X2 => VectorSize::Size64x2, - I8X8 => VectorSize::Size8x8, - I8X16 => VectorSize::Size8x16, - I16X4 => VectorSize::Size16x4, - I16X8 => VectorSize::Size16x8, - I32X2 => VectorSize::Size32x2, - I32X4 => VectorSize::Size32x4, - I64X2 => VectorSize::Size64x2, - _ => unimplemented!("Unsupported type: {}", ty), - } - } - - /// Get the integer operand size that corresponds to a lane of a vector with a certain size. - pub fn operand_size(&self) -> OperandSize { - match self { - VectorSize::Size64x2 => OperandSize::Size64, - _ => OperandSize::Size32, - } - } - - /// Get the scalar operand size that corresponds to a lane of a vector with a certain size. - pub fn lane_size(&self) -> ScalarSize { - match self { - VectorSize::Size8x8 => ScalarSize::Size8, - VectorSize::Size8x16 => ScalarSize::Size8, - VectorSize::Size16x4 => ScalarSize::Size16, - VectorSize::Size16x8 => ScalarSize::Size16, - VectorSize::Size32x2 => ScalarSize::Size32, - VectorSize::Size32x4 => ScalarSize::Size32, - VectorSize::Size64x2 => ScalarSize::Size64, - } - } - - pub fn is_128bits(&self) -> bool { - match self { - VectorSize::Size8x8 => false, - VectorSize::Size8x16 => true, - VectorSize::Size16x4 => false, - VectorSize::Size16x8 => true, - VectorSize::Size32x2 => false, - VectorSize::Size32x4 => true, - VectorSize::Size64x2 => true, - } - } - - /// Produces a `VectorSize` with lanes twice as wide. Note that if the resulting - /// size would exceed 128 bits, then the number of lanes is also halved, so as to - /// ensure that the result size is at most 128 bits. - pub fn widen(&self) -> VectorSize { - match self { - VectorSize::Size8x8 => VectorSize::Size16x8, - VectorSize::Size8x16 => VectorSize::Size16x8, - VectorSize::Size16x4 => VectorSize::Size32x4, - VectorSize::Size16x8 => VectorSize::Size32x4, - VectorSize::Size32x2 => VectorSize::Size64x2, - VectorSize::Size32x4 => VectorSize::Size64x2, - VectorSize::Size64x2 => unreachable!(), - } - } - - /// Produces a `VectorSize` that has the same lane width, but half as many lanes. - pub fn halve(&self) -> VectorSize { - match self { - VectorSize::Size8x16 => VectorSize::Size8x8, - VectorSize::Size16x8 => VectorSize::Size16x4, - VectorSize::Size32x4 => VectorSize::Size32x2, - _ => *self, - } - } - - /// Return the encoding bits that are used by some SIMD instructions - /// for a particular operand size. - pub fn enc_size(&self) -> (u32, u32) { - let q = self.is_128bits() as u32; - let size = match self.lane_size() { - ScalarSize::Size8 => 0b00, - ScalarSize::Size16 => 0b01, - ScalarSize::Size32 => 0b10, - ScalarSize::Size64 => 0b11, - _ => unreachable!(), - }; - - (q, size) - } -} diff --git a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/emit.rs b/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/emit.rs deleted file mode 100644 index d8670f5a1..000000000 --- a/collector/compile-benchmarks/cranelift-codegen-0.82.1/src/isa/aarch64/inst/emit.rs +++ /dev/null @@ -1,2828 +0,0 @@ -//! AArch64 ISA: binary code emission. - -use crate::binemit::{CodeOffset, Reloc, StackMap}; -use crate::ir::constant::ConstantData; -use crate::ir::types::*; -use crate::ir::{LibCall, MemFlags, TrapCode}; -use crate::isa::aarch64::inst::*; -use crate::machinst::ty_bits; - -use regalloc::{Reg, RegClass, Writable}; - -use core::convert::TryFrom; - -/// Memory label/reference finalization: convert a MemLabel to a PC-relative -/// offset, possibly emitting relocation(s) as necessary. -pub fn memlabel_finalize(_insn_off: CodeOffset, label: &MemLabel) -> i32 { - match label { - &MemLabel::PCRel(rel) => rel, - } -} - -/// Memory addressing mode finalization: convert "special" modes (e.g., -/// generic arbitrary stack offset) into real addressing modes, possibly by -/// emitting some helper instructions that come immediately before the use -/// of this amode. -pub fn mem_finalize( - insn_off: CodeOffset, - mem: &AMode, - state: &EmitState, -) -> (SmallVec<[Inst; 4]>, AMode) { - match mem { - &AMode::RegOffset(_, off, ty) - | &AMode::SPOffset(off, ty) - | &AMode::FPOffset(off, ty) - | &AMode::NominalSPOffset(off, ty) => { - let basereg = match mem { - &AMode::RegOffset(reg, _, _) => reg, - &AMode::SPOffset(..) | &AMode::NominalSPOffset(..) => stack_reg(), - &AMode::FPOffset(..) => fp_reg(), - _ => unreachable!(), - }; - let adj = match mem { - &AMode::NominalSPOffset(..) => { - log::trace!( - "mem_finalize: nominal SP offset {} + adj {} -> {}", - off, - state.virtual_sp_offset, - off + state.virtual_sp_offset - ); - state.virtual_sp_offset - } - _ => 0, - }; - let off = off + adj; - - if let Some(simm9) = SImm9::maybe_from_i64(off) { - let mem = AMode::Unscaled(basereg, simm9); - (smallvec![], mem) - } else if let Some(uimm12s) = UImm12Scaled::maybe_from_i64(off, ty) { - let mem = AMode::UnsignedOffset(basereg, uimm12s); - (smallvec![], mem) - } else { - let tmp = writable_spilltmp_reg(); - let mut const_insts = Inst::load_constant(tmp, off as u64); - // N.B.: we must use AluRRRExtend because AluRRR uses the "shifted register" form - // (AluRRRShift) instead, which interprets register 31 as the zero reg, not SP. SP - // is a valid base (for SPOffset) which we must handle here. - // Also, SP needs to be the first arg, not second. - let add_inst = Inst::AluRRRExtend { - alu_op: ALUOp::Add, - size: OperandSize::Size64, - rd: tmp, - rn: basereg, - rm: tmp.to_reg(), - extendop: ExtendOp::UXTX, - }; - const_insts.push(add_inst); - (const_insts, AMode::reg(tmp.to_reg())) - } - } - - &AMode::Label(ref label) => { - let off = memlabel_finalize(insn_off, label); - (smallvec![], AMode::Label(MemLabel::PCRel(off))) - } - - _ => (smallvec![], mem.clone()), - } -} - -/// Helper: get a ConstantData from a u64. -pub fn u64_constant(bits: u64) -> ConstantData { - let data = bits.to_le_bytes(); - ConstantData::from(&data[..]) -} - -//============================================================================= -// Instructions and subcomponents: emission - -fn machreg_to_gpr(m: Reg) -> u32 { - assert_eq!(m.get_class(), RegClass::I64); - u32::try_from(m.to_real_reg().get_hw_encoding()).unwrap() -} - -fn machreg_to_vec(m: Reg) -> u32 { - assert_eq!(m.get_class(), RegClass::V128); - u32::try_from(m.to_real_reg().get_hw_encoding()).unwrap() -} - -fn machreg_to_gpr_or_vec(m: Reg) -> u32 { - u32::try_from(m.to_real_reg().get_hw_encoding()).unwrap() -} - -pub(crate) fn enc_arith_rrr( - bits_31_21: u32, - bits_15_10: u32, - rd: Writable, - rn: Reg, - rm: Reg, -) -> u32 { - (bits_31_21 << 21) - | (bits_15_10 << 10) - | machreg_to_gpr(rd.to_reg()) - | (machreg_to_gpr(rn) << 5) - | (machreg_to_gpr(rm) << 16) -} - -fn enc_arith_rr_imm12( - bits_31_24: u32, - immshift: u32, - imm12: u32, - rn: Reg, - rd: Writable, -) -> u32 { - (bits_31_24 << 24) - | (immshift << 22) - | (imm12 << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rd.to_reg()) -} - -fn enc_arith_rr_imml(bits_31_23: u32, imm_bits: u32, rn: Reg, rd: Writable) -> u32 { - (bits_31_23 << 23) | (imm_bits << 10) | (machreg_to_gpr(rn) << 5) | machreg_to_gpr(rd.to_reg()) -} - -fn enc_arith_rrrr(top11: u32, rm: Reg, bit15: u32, ra: Reg, rn: Reg, rd: Writable) -> u32 { - (top11 << 21) - | (machreg_to_gpr(rm) << 16) - | (bit15 << 15) - | (machreg_to_gpr(ra) << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rd.to_reg()) -} - -fn enc_jump26(op_31_26: u32, off_26_0: u32) -> u32 { - assert!(off_26_0 < (1 << 26)); - (op_31_26 << 26) | off_26_0 -} - -fn enc_cmpbr(op_31_24: u32, off_18_0: u32, reg: Reg) -> u32 { - assert!(off_18_0 < (1 << 19)); - (op_31_24 << 24) | (off_18_0 << 5) | machreg_to_gpr(reg) -} - -fn enc_cbr(op_31_24: u32, off_18_0: u32, op_4: u32, cond: u32) -> u32 { - assert!(off_18_0 < (1 << 19)); - assert!(cond < (1 << 4)); - (op_31_24 << 24) | (off_18_0 << 5) | (op_4 << 4) | cond -} - -fn enc_conditional_br(taken: BranchTarget, kind: CondBrKind) -> u32 { - match kind { - CondBrKind::Zero(reg) => enc_cmpbr(0b1_011010_0, taken.as_offset19_or_zero(), reg), - CondBrKind::NotZero(reg) => enc_cmpbr(0b1_011010_1, taken.as_offset19_or_zero(), reg), - CondBrKind::Cond(c) => enc_cbr(0b01010100, taken.as_offset19_or_zero(), 0b0, c.bits()), - } -} - -const MOVE_WIDE_FIXED: u32 = 0x12800000; - -#[repr(u32)] -enum MoveWideOpcode { - MOVN = 0b00, - MOVZ = 0b10, - MOVK = 0b11, -} - -fn enc_move_wide( - op: MoveWideOpcode, - rd: Writable, - imm: MoveWideConst, - size: OperandSize, -) -> u32 { - assert!(imm.shift <= 0b11); - MOVE_WIDE_FIXED - | size.sf_bit() << 31 - | (op as u32) << 29 - | u32::from(imm.shift) << 21 - | u32::from(imm.bits) << 5 - | machreg_to_gpr(rd.to_reg()) -} - -fn enc_ldst_pair(op_31_22: u32, simm7: SImm7Scaled, rn: Reg, rt: Reg, rt2: Reg) -> u32 { - (op_31_22 << 22) - | (simm7.bits() << 15) - | (machreg_to_gpr(rt2) << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rt) -} - -fn enc_ldst_simm9(op_31_22: u32, simm9: SImm9, op_11_10: u32, rn: Reg, rd: Reg) -> u32 { - (op_31_22 << 22) - | (simm9.bits() << 12) - | (op_11_10 << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr_or_vec(rd) -} - -fn enc_ldst_uimm12(op_31_22: u32, uimm12: UImm12Scaled, rn: Reg, rd: Reg) -> u32 { - (op_31_22 << 22) - | (0b1 << 24) - | (uimm12.bits() << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr_or_vec(rd) -} - -fn enc_ldst_reg( - op_31_22: u32, - rn: Reg, - rm: Reg, - s_bit: bool, - extendop: Option, - rd: Reg, -) -> u32 { - let s_bit = if s_bit { 1 } else { 0 }; - let extend_bits = match extendop { - Some(ExtendOp::UXTW) => 0b010, - Some(ExtendOp::SXTW) => 0b110, - Some(ExtendOp::SXTX) => 0b111, - None => 0b011, // LSL - _ => panic!("bad extend mode for ld/st AMode"), - }; - (op_31_22 << 22) - | (1 << 21) - | (machreg_to_gpr(rm) << 16) - | (extend_bits << 13) - | (s_bit << 12) - | (0b10 << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr_or_vec(rd) -} - -pub(crate) fn enc_ldst_imm19(op_31_24: u32, imm19: u32, rd: Reg) -> u32 { - (op_31_24 << 24) | (imm19 << 5) | machreg_to_gpr_or_vec(rd) -} - -fn enc_ldst_vec(q: u32, size: u32, rn: Reg, rt: Writable) -> u32 { - debug_assert_eq!(q & 0b1, q); - debug_assert_eq!(size & 0b11, size); - 0b0_0_0011010_10_00000_110_0_00_00000_00000 - | q << 30 - | size << 10 - | machreg_to_gpr(rn) << 5 - | machreg_to_vec(rt.to_reg()) -} - -fn enc_ldst_vec_pair( - opc: u32, - amode: u32, - is_load: bool, - simm7: SImm7Scaled, - rn: Reg, - rt: Reg, - rt2: Reg, -) -> u32 { - debug_assert_eq!(opc & 0b11, opc); - debug_assert_eq!(amode & 0b11, amode); - - 0b00_10110_00_0_0000000_00000_00000_00000 - | opc << 30 - | amode << 23 - | (is_load as u32) << 22 - | simm7.bits() << 15 - | machreg_to_vec(rt2) << 10 - | machreg_to_gpr(rn) << 5 - | machreg_to_vec(rt) -} - -fn enc_vec_rrr(top11: u32, rm: Reg, bit15_10: u32, rn: Reg, rd: Writable) -> u32 { - (top11 << 21) - | (machreg_to_vec(rm) << 16) - | (bit15_10 << 10) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()) -} - -fn enc_vec_rrr_long( - q: u32, - u: u32, - size: u32, - bit14: u32, - rm: Reg, - rn: Reg, - rd: Writable, -) -> u32 { - debug_assert_eq!(q & 0b1, q); - debug_assert_eq!(u & 0b1, u); - debug_assert_eq!(size & 0b11, size); - debug_assert_eq!(bit14 & 0b1, bit14); - - 0b0_0_0_01110_00_1_00000_100000_00000_00000 - | q << 30 - | u << 29 - | size << 22 - | bit14 << 14 - | (machreg_to_vec(rm) << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()) -} - -fn enc_bit_rr(size: u32, opcode2: u32, opcode1: u32, rn: Reg, rd: Writable) -> u32 { - (0b01011010110 << 21) - | size << 31 - | opcode2 << 16 - | opcode1 << 10 - | machreg_to_gpr(rn) << 5 - | machreg_to_gpr(rd.to_reg()) -} - -pub(crate) fn enc_br(rn: Reg) -> u32 { - 0b1101011_0000_11111_000000_00000_00000 | (machreg_to_gpr(rn) << 5) -} - -pub(crate) fn enc_adr(off: i32, rd: Writable) -> u32 { - let off = u32::try_from(off).unwrap(); - let immlo = off & 3; - let immhi = (off >> 2) & ((1 << 19) - 1); - (0b00010000 << 24) | (immlo << 29) | (immhi << 5) | machreg_to_gpr(rd.to_reg()) -} - -fn enc_csel(rd: Writable, rn: Reg, rm: Reg, cond: Cond) -> u32 { - 0b100_11010100_00000_0000_00_00000_00000 - | (machreg_to_gpr(rm) << 16) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rd.to_reg()) - | (cond.bits() << 12) -} - -fn enc_fcsel(rd: Writable, rn: Reg, rm: Reg, cond: Cond, size: ScalarSize) -> u32 { - 0b000_11110_00_1_00000_0000_11_00000_00000 - | (size.ftype() << 22) - | (machreg_to_vec(rm) << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()) - | (cond.bits() << 12) -} - -fn enc_cset(rd: Writable, cond: Cond) -> u32 { - 0b100_11010100_11111_0000_01_11111_00000 - | machreg_to_gpr(rd.to_reg()) - | (cond.invert().bits() << 12) -} - -fn enc_csetm(rd: Writable, cond: Cond) -> u32 { - 0b110_11010100_11111_0000_00_11111_00000 - | machreg_to_gpr(rd.to_reg()) - | (cond.invert().bits() << 12) -} - -fn enc_ccmp_imm(size: OperandSize, rn: Reg, imm: UImm5, nzcv: NZCV, cond: Cond) -> u32 { - 0b0_1_1_11010010_00000_0000_10_00000_0_0000 - | size.sf_bit() << 31 - | imm.bits() << 16 - | cond.bits() << 12 - | machreg_to_gpr(rn) << 5 - | nzcv.bits() -} - -fn enc_bfm(opc: u8, size: OperandSize, rd: Writable, rn: Reg, immr: u8, imms: u8) -> u32 { - match size { - OperandSize::Size64 => { - debug_assert!(immr <= 63); - debug_assert!(imms <= 63); - } - OperandSize::Size32 => { - debug_assert!(immr <= 31); - debug_assert!(imms <= 31); - } - } - debug_assert_eq!(opc & 0b11, opc); - let n_bit = size.sf_bit(); - 0b0_00_100110_0_000000_000000_00000_00000 - | size.sf_bit() << 31 - | u32::from(opc) << 29 - | n_bit << 22 - | u32::from(immr) << 16 - | u32::from(imms) << 10 - | machreg_to_gpr(rn) << 5 - | machreg_to_gpr(rd.to_reg()) -} - -fn enc_vecmov(is_16b: bool, rd: Writable, rn: Reg) -> u32 { - 0b00001110_101_00000_00011_1_00000_00000 - | ((is_16b as u32) << 30) - | machreg_to_vec(rd.to_reg()) - | (machreg_to_vec(rn) << 16) - | (machreg_to_vec(rn) << 5) -} - -fn enc_fpurr(top22: u32, rd: Writable, rn: Reg) -> u32 { - (top22 << 10) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg()) -} - -fn enc_fpurrr(top22: u32, rd: Writable, rn: Reg, rm: Reg) -> u32 { - (top22 << 10) - | (machreg_to_vec(rm) << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()) -} - -fn enc_fpurrrr(top17: u32, rd: Writable, rn: Reg, rm: Reg, ra: Reg) -> u32 { - (top17 << 15) - | (machreg_to_vec(rm) << 16) - | (machreg_to_vec(ra) << 10) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()) -} - -fn enc_fcmp(size: ScalarSize, rn: Reg, rm: Reg) -> u32 { - 0b000_11110_00_1_00000_00_1000_00000_00000 - | (size.ftype() << 22) - | (machreg_to_vec(rm) << 16) - | (machreg_to_vec(rn) << 5) -} - -fn enc_fputoint(top16: u32, rd: Writable, rn: Reg) -> u32 { - (top16 << 16) | (machreg_to_vec(rn) << 5) | machreg_to_gpr(rd.to_reg()) -} - -fn enc_inttofpu(top16: u32, rd: Writable, rn: Reg) -> u32 { - (top16 << 16) | (machreg_to_gpr(rn) << 5) | machreg_to_vec(rd.to_reg()) -} - -fn enc_fround(top22: u32, rd: Writable, rn: Reg) -> u32 { - (top22 << 10) | (machreg_to_vec(rn) << 5) | machreg_to_vec(rd.to_reg()) -} - -fn enc_vec_rr_misc(qu: u32, size: u32, bits_12_16: u32, rd: Writable, rn: Reg) -> u32 { - debug_assert_eq!(qu & 0b11, qu); - debug_assert_eq!(size & 0b11, size); - debug_assert_eq!(bits_12_16 & 0b11111, bits_12_16); - let bits = 0b0_00_01110_00_10000_00000_10_00000_00000; - bits | qu << 29 - | size << 22 - | bits_12_16 << 12 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()) -} - -fn enc_vec_rr_pair(bits_12_16: u32, rd: Writable, rn: Reg) -> u32 { - debug_assert_eq!(bits_12_16 & 0b11111, bits_12_16); - - 0b010_11110_11_11000_11011_10_00000_00000 - | bits_12_16 << 12 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()) -} - -fn enc_vec_rr_pair_long(u: u32, enc_size: u32, rd: Writable, rn: Reg) -> u32 { - debug_assert_eq!(u & 0b1, u); - debug_assert_eq!(enc_size & 0b1, enc_size); - - 0b0_1_0_01110_00_10000_00_0_10_10_00000_00000 - | u << 29 - | enc_size << 22 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()) -} - -fn enc_vec_lanes(q: u32, u: u32, size: u32, opcode: u32, rd: Writable, rn: Reg) -> u32 { - debug_assert_eq!(q & 0b1, q); - debug_assert_eq!(u & 0b1, u); - debug_assert_eq!(size & 0b11, size); - debug_assert_eq!(opcode & 0b11111, opcode); - 0b0_0_0_01110_00_11000_0_0000_10_00000_00000 - | q << 30 - | u << 29 - | size << 22 - | opcode << 12 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()) -} - -fn enc_tbl(is_extension: bool, len: u32, rd: Writable, rn: Reg, rm: Reg) -> u32 { - debug_assert_eq!(len & 0b11, len); - 0b0_1_001110_000_00000_0_00_0_00_00000_00000 - | (machreg_to_vec(rm) << 16) - | len << 13 - | (is_extension as u32) << 12 - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()) -} - -fn enc_dmb_ish() -> u32 { - 0xD5033BBF -} - -fn enc_ldal(ty: Type, op: AtomicRMWOp, rs: Reg, rt: Writable, rn: Reg) -> u32 { - assert!(machreg_to_gpr(rt.to_reg()) != 31); - let sz = match ty { - I64 => 0b11, - I32 => 0b10, - I16 => 0b01, - I8 => 0b00, - _ => unreachable!(), - }; - let op = match op { - AtomicRMWOp::Add => 0b000, - AtomicRMWOp::Clr => 0b001, - AtomicRMWOp::Eor => 0b010, - AtomicRMWOp::Set => 0b011, - AtomicRMWOp::Smax => 0b100, - AtomicRMWOp::Smin => 0b101, - AtomicRMWOp::Umax => 0b110, - AtomicRMWOp::Umin => 0b111, - }; - 0b00_111_000_111_00000_0_000_00_00000_00000 - | (sz << 30) - | (machreg_to_gpr(rs) << 16) - | (op << 12) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rt.to_reg()) -} - -fn enc_ldar(ty: Type, rt: Writable, rn: Reg) -> u32 { - let sz = match ty { - I64 => 0b11, - I32 => 0b10, - I16 => 0b01, - I8 => 0b00, - _ => unreachable!(), - }; - 0b00_001000_1_1_0_11111_1_11111_00000_00000 - | (sz << 30) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rt.to_reg()) -} - -fn enc_stlr(ty: Type, rt: Reg, rn: Reg) -> u32 { - let sz = match ty { - I64 => 0b11, - I32 => 0b10, - I16 => 0b01, - I8 => 0b00, - _ => unreachable!(), - }; - 0b00_001000_100_11111_1_11111_00000_00000 - | (sz << 30) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rt) -} - -fn enc_ldaxr(ty: Type, rt: Writable, rn: Reg) -> u32 { - let sz = match ty { - I64 => 0b11, - I32 => 0b10, - I16 => 0b01, - I8 => 0b00, - _ => unreachable!(), - }; - 0b00_001000_0_1_0_11111_1_11111_00000_00000 - | (sz << 30) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rt.to_reg()) -} - -fn enc_stlxr(ty: Type, rs: Writable, rt: Reg, rn: Reg) -> u32 { - let sz = match ty { - I64 => 0b11, - I32 => 0b10, - I16 => 0b01, - I8 => 0b00, - _ => unreachable!(), - }; - 0b00_001000_000_00000_1_11111_00000_00000 - | (sz << 30) - | (machreg_to_gpr(rs.to_reg()) << 16) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rt) -} - -fn enc_cas(size: u32, rs: Writable, rt: Reg, rn: Reg) -> u32 { - debug_assert_eq!(size & 0b11, size); - - 0b00_0010001_1_1_00000_1_11111_00000_00000 - | size << 30 - | machreg_to_gpr(rs.to_reg()) << 16 - | machreg_to_gpr(rn) << 5 - | machreg_to_gpr(rt) -} - -fn enc_asimd_mod_imm(rd: Writable, q_op: u32, cmode: u32, imm: u8) -> u32 { - let abc = (imm >> 5) as u32; - let defgh = (imm & 0b11111) as u32; - - debug_assert_eq!(cmode & 0b1111, cmode); - debug_assert_eq!(q_op & 0b11, q_op); - - 0b0_0_0_0111100000_000_0000_01_00000_00000 - | (q_op << 29) - | (abc << 16) - | (cmode << 12) - | (defgh << 5) - | machreg_to_vec(rd.to_reg()) -} - -/// State carried between emissions of a sequence of instructions. -#[derive(Default, Clone, Debug)] -pub struct EmitState { - /// Addend to convert nominal-SP offsets to real-SP offsets at the current - /// program point. - pub(crate) virtual_sp_offset: i64, - /// Offset of FP from nominal-SP. - pub(crate) nominal_sp_to_fp: i64, - /// Safepoint stack map for upcoming instruction, as provided to `pre_safepoint()`. - stack_map: Option, - /// Current source-code location corresponding to instruction to be emitted. - cur_srcloc: SourceLoc, -} - -impl MachInstEmitState for EmitState { - fn new(abi: &dyn ABICallee) -> Self { - EmitState { - virtual_sp_offset: 0, - nominal_sp_to_fp: abi.frame_size() as i64, - stack_map: None, - cur_srcloc: SourceLoc::default(), - } - } - - fn pre_safepoint(&mut self, stack_map: StackMap) { - self.stack_map = Some(stack_map); - } - - fn pre_sourceloc(&mut self, srcloc: SourceLoc) { - self.cur_srcloc = srcloc; - } -} - -impl EmitState { - fn take_stack_map(&mut self) -> Option { - self.stack_map.take() - } - - fn clear_post_insn(&mut self) { - self.stack_map = None; - } - - fn cur_srcloc(&self) -> SourceLoc { - self.cur_srcloc - } -} - -/// Constant state used during function compilation. -pub struct EmitInfo(settings::Flags); - -impl EmitInfo { - pub(crate) fn new(flags: settings::Flags) -> Self { - Self(flags) - } -} - -impl MachInstEmit for Inst { - type State = EmitState; - type Info = EmitInfo; - - fn emit(&self, sink: &mut MachBuffer, emit_info: &Self::Info, state: &mut EmitState) { - // N.B.: we *must* not exceed the "worst-case size" used to compute - // where to insert islands, except when islands are explicitly triggered - // (with an `EmitIsland`). We check this in debug builds. This is `mut` - // to allow disabling the check for `JTSequence`, which is always - // emitted following an `EmitIsland`. - let mut start_off = sink.cur_offset(); - - match self { - &Inst::AluRRR { - alu_op, - size, - rd, - rn, - rm, - } => { - debug_assert!(match alu_op { - ALUOp::SDiv | ALUOp::UDiv | ALUOp::SMulH | ALUOp::UMulH => - size == OperandSize::Size64, - _ => true, - }); - let top11 = match alu_op { - ALUOp::Add => 0b00001011_000, - ALUOp::Adc => 0b00011010_000, - ALUOp::AdcS => 0b00111010_000, - ALUOp::Sub => 0b01001011_000, - ALUOp::Sbc => 0b01011010_000, - ALUOp::SbcS => 0b01111010_000, - ALUOp::Orr => 0b00101010_000, - ALUOp::And => 0b00001010_000, - ALUOp::AndS => 0b01101010_000, - ALUOp::Eor => 0b01001010_000, - ALUOp::OrrNot => 0b00101010_001, - ALUOp::AndNot => 0b00001010_001, - ALUOp::EorNot => 0b01001010_001, - ALUOp::AddS => 0b00101011_000, - ALUOp::SubS => 0b01101011_000, - ALUOp::SDiv => 0b10011010_110, - ALUOp::UDiv => 0b10011010_110, - ALUOp::RotR | ALUOp::Lsr | ALUOp::Asr | ALUOp::Lsl => 0b00011010_110, - ALUOp::SMulH => 0b10011011_010, - ALUOp::UMulH => 0b10011011_110, - }; - let top11 = top11 | size.sf_bit() << 10; - let bit15_10 = match alu_op { - ALUOp::SDiv => 0b000011, - ALUOp::UDiv => 0b000010, - ALUOp::RotR => 0b001011, - ALUOp::Lsr => 0b001001, - ALUOp::Asr => 0b001010, - ALUOp::Lsl => 0b001000, - ALUOp::SMulH | ALUOp::UMulH => 0b011111, - _ => 0b000000, - }; - debug_assert_ne!(writable_stack_reg(), rd); - // The stack pointer is the zero register in this context, so this might be an - // indication that something is wrong. - debug_assert_ne!(stack_reg(), rn); - debug_assert_ne!(stack_reg(), rm); - sink.put4(enc_arith_rrr(top11, bit15_10, rd, rn, rm)); - } - &Inst::AluRRRR { - alu_op, - rd, - rm, - rn, - ra, - } => { - let (top11, bit15) = match alu_op { - ALUOp3::MAdd32 => (0b0_00_11011_000, 0), - ALUOp3::MSub32 => (0b0_00_11011_000, 1), - ALUOp3::MAdd64 => (0b1_00_11011_000, 0), - ALUOp3::MSub64 => (0b1_00_11011_000, 1), - }; - sink.put4(enc_arith_rrrr(top11, rm, bit15, ra, rn, rd)); - } - &Inst::AluRRImm12 { - alu_op, - size, - rd, - rn, - ref imm12, - } => { - let top8 = match alu_op { - ALUOp::Add => 0b000_10001, - ALUOp::Sub => 0b010_10001, - ALUOp::AddS => 0b001_10001, - ALUOp::SubS => 0b011_10001, - _ => unimplemented!("{:?}", alu_op), - }; - let top8 = top8 | size.sf_bit() << 7; - sink.put4(enc_arith_rr_imm12( - top8, - imm12.shift_bits(), - imm12.imm_bits(), - rn, - rd, - )); - } - &Inst::AluRRImmLogic { - alu_op, - size, - rd, - rn, - ref imml, - } => { - let (top9, inv) = match alu_op { - ALUOp::Orr => (0b001_100100, false), - ALUOp::And => (0b000_100100, false), - ALUOp::AndS => (0b011_100100, false), - ALUOp::Eor => (0b010_100100, false), - ALUOp::OrrNot => (0b001_100100, true), - ALUOp::AndNot => (0b000_100100, true), - ALUOp::EorNot => (0b010_100100, true), - _ => unimplemented!("{:?}", alu_op), - }; - let top9 = top9 | size.sf_bit() << 8; - let imml = if inv { imml.invert() } else { imml.clone() }; - sink.put4(enc_arith_rr_imml(top9, imml.enc_bits(), rn, rd)); - } - - &Inst::AluRRImmShift { - alu_op, - size, - rd, - rn, - ref immshift, - } => { - let amt = immshift.value(); - let (top10, immr, imms) = match alu_op { - ALUOp::RotR => (0b0001001110, machreg_to_gpr(rn), u32::from(amt)), - ALUOp::Lsr => (0b0101001100, u32::from(amt), 0b011111), - ALUOp::Asr => (0b0001001100, u32::from(amt), 0b011111), - ALUOp::Lsl => { - let bits = if size.is64() { 64 } else { 32 }; - ( - 0b0101001100, - u32::from((bits - amt) % bits), - u32::from(bits - 1 - amt), - ) - } - _ => unimplemented!("{:?}", alu_op), - }; - let top10 = top10 | size.sf_bit() << 9 | size.sf_bit(); - let imms = match alu_op { - ALUOp::Lsr | ALUOp::Asr => imms | size.sf_bit() << 5, - _ => imms, - }; - sink.put4( - (top10 << 22) - | (immr << 16) - | (imms << 10) - | (machreg_to_gpr(rn) << 5) - | machreg_to_gpr(rd.to_reg()), - ); - } - - &Inst::AluRRRShift { - alu_op, - size, - rd, - rn, - rm, - ref shiftop, - } => { - let top11: u32 = match alu_op { - ALUOp::Add => 0b000_01011000, - ALUOp::AddS => 0b001_01011000, - ALUOp::Sub => 0b010_01011000, - ALUOp::SubS => 0b011_01011000, - ALUOp::Orr => 0b001_01010000, - ALUOp::And => 0b000_01010000, - ALUOp::AndS => 0b011_01010000, - ALUOp::Eor => 0b010_01010000, - ALUOp::OrrNot => 0b001_01010001, - ALUOp::EorNot => 0b010_01010001, - ALUOp::AndNot => 0b000_01010001, - _ => unimplemented!("{:?}", alu_op), - }; - let top11 = top11 | size.sf_bit() << 10; - let top11 = top11 | (u32::from(shiftop.op().bits()) << 1); - let bits_15_10 = u32::from(shiftop.amt().value()); - sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm)); - } - - &Inst::AluRRRExtend { - alu_op, - size, - rd, - rn, - rm, - extendop, - } => { - let top11: u32 = match alu_op { - ALUOp::Add => 0b00001011001, - ALUOp::Sub => 0b01001011001, - ALUOp::AddS => 0b00101011001, - ALUOp::SubS => 0b01101011001, - _ => unimplemented!("{:?}", alu_op), - }; - let top11 = top11 | size.sf_bit() << 10; - let bits_15_10 = u32::from(extendop.bits()) << 3; - sink.put4(enc_arith_rrr(top11, bits_15_10, rd, rn, rm)); - } - - &Inst::BitRR { - op, size, rd, rn, .. - } => { - let (op1, op2) = match op { - BitOp::RBit => (0b00000, 0b000000), - BitOp::Clz => (0b00000, 0b000100), - BitOp::Cls => (0b00000, 0b000101), - }; - sink.put4(enc_bit_rr(size.sf_bit(), op1, op2, rn, rd)) - } - - &Inst::ULoad8 { rd, ref mem, flags } - | &Inst::SLoad8 { rd, ref mem, flags } - | &Inst::ULoad16 { rd, ref mem, flags } - | &Inst::SLoad16 { rd, ref mem, flags } - | &Inst::ULoad32 { rd, ref mem, flags } - | &Inst::SLoad32 { rd, ref mem, flags } - | &Inst::ULoad64 { - rd, ref mem, flags, .. - } - | &Inst::FpuLoad32 { rd, ref mem, flags } - | &Inst::FpuLoad64 { rd, ref mem, flags } - | &Inst::FpuLoad128 { rd, ref mem, flags } => { - let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state); - - for inst in mem_insts.into_iter() { - inst.emit(sink, emit_info, state); - } - - // ldst encoding helpers take Reg, not Writable. - let rd = rd.to_reg(); - - // This is the base opcode (top 10 bits) for the "unscaled - // immediate" form (Unscaled). Other addressing modes will OR in - // other values for bits 24/25 (bits 1/2 of this constant). - let (op, bits) = match self { - &Inst::ULoad8 { .. } => (0b0011100001, 8), - &Inst::SLoad8 { .. } => (0b0011100010, 8), - &Inst::ULoad16 { .. } => (0b0111100001, 16), - &Inst::SLoad16 { .. } => (0b0111100010, 16), - &Inst::ULoad32 { .. } => (0b1011100001, 32), - &Inst::SLoad32 { .. } => (0b1011100010, 32), - &Inst::ULoad64 { .. } => (0b1111100001, 64), - &Inst::FpuLoad32 { .. } => (0b1011110001, 32), - &Inst::FpuLoad64 { .. } => (0b1111110001, 64), - &Inst::FpuLoad128 { .. } => (0b0011110011, 128), - _ => unreachable!(), - }; - - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() && !flags.notrap() { - // Register the offset at which the actual load instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - - match &mem { - &AMode::Unscaled(reg, simm9) => { - sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd)); - } - &AMode::UnsignedOffset(reg, uimm12scaled) => { - if uimm12scaled.value() != 0 { - assert_eq!(bits, ty_bits(uimm12scaled.scale_ty())); - } - sink.put4(enc_ldst_uimm12(op, uimm12scaled, reg, rd)); - } - &AMode::RegReg(r1, r2) => { - sink.put4(enc_ldst_reg( - op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd, - )); - } - &AMode::RegScaled(r1, r2, ty) | &AMode::RegScaledExtended(r1, r2, ty, _) => { - assert_eq!(bits, ty_bits(ty)); - let extendop = match &mem { - &AMode::RegScaled(..) => None, - &AMode::RegScaledExtended(_, _, _, op) => Some(op), - _ => unreachable!(), - }; - sink.put4(enc_ldst_reg( - op, r1, r2, /* scaled = */ true, extendop, rd, - )); - } - &AMode::RegExtended(r1, r2, extendop) => { - sink.put4(enc_ldst_reg( - op, - r1, - r2, - /* scaled = */ false, - Some(extendop), - rd, - )); - } - &AMode::Label(ref label) => { - let offset = match label { - // cast i32 to u32 (two's-complement) - &MemLabel::PCRel(off) => off as u32, - } / 4; - assert!(offset < (1 << 19)); - match self { - &Inst::ULoad32 { .. } => { - sink.put4(enc_ldst_imm19(0b00011000, offset, rd)); - } - &Inst::SLoad32 { .. } => { - sink.put4(enc_ldst_imm19(0b10011000, offset, rd)); - } - &Inst::FpuLoad32 { .. } => { - sink.put4(enc_ldst_imm19(0b00011100, offset, rd)); - } - &Inst::ULoad64 { .. } => { - sink.put4(enc_ldst_imm19(0b01011000, offset, rd)); - } - &Inst::FpuLoad64 { .. } => { - sink.put4(enc_ldst_imm19(0b01011100, offset, rd)); - } - &Inst::FpuLoad128 { .. } => { - sink.put4(enc_ldst_imm19(0b10011100, offset, rd)); - } - _ => panic!("Unspported size for LDR from constant pool!"), - } - } - &AMode::PreIndexed(reg, simm9) => { - sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg.to_reg(), rd)); - } - &AMode::PostIndexed(reg, simm9) => { - sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg.to_reg(), rd)); - } - // Eliminated by `mem_finalize()` above. - &AMode::SPOffset(..) | &AMode::FPOffset(..) | &AMode::NominalSPOffset(..) => { - panic!("Should not see stack-offset here!") - } - &AMode::RegOffset(..) => panic!("SHould not see generic reg-offset here!"), - } - } - - &Inst::Store8 { rd, ref mem, flags } - | &Inst::Store16 { rd, ref mem, flags } - | &Inst::Store32 { rd, ref mem, flags } - | &Inst::Store64 { rd, ref mem, flags } - | &Inst::FpuStore32 { rd, ref mem, flags } - | &Inst::FpuStore64 { rd, ref mem, flags } - | &Inst::FpuStore128 { rd, ref mem, flags } => { - let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state); - - for inst in mem_insts.into_iter() { - inst.emit(sink, emit_info, state); - } - - let (op, bits) = match self { - &Inst::Store8 { .. } => (0b0011100000, 8), - &Inst::Store16 { .. } => (0b0111100000, 16), - &Inst::Store32 { .. } => (0b1011100000, 32), - &Inst::Store64 { .. } => (0b1111100000, 64), - &Inst::FpuStore32 { .. } => (0b1011110000, 32), - &Inst::FpuStore64 { .. } => (0b1111110000, 64), - &Inst::FpuStore128 { .. } => (0b0011110010, 128), - _ => unreachable!(), - }; - - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() && !flags.notrap() { - // Register the offset at which the actual store instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - - match &mem { - &AMode::Unscaled(reg, simm9) => { - sink.put4(enc_ldst_simm9(op, simm9, 0b00, reg, rd)); - } - &AMode::UnsignedOffset(reg, uimm12scaled) => { - if uimm12scaled.value() != 0 { - assert_eq!(bits, ty_bits(uimm12scaled.scale_ty())); - } - sink.put4(enc_ldst_uimm12(op, uimm12scaled, reg, rd)); - } - &AMode::RegReg(r1, r2) => { - sink.put4(enc_ldst_reg( - op, r1, r2, /* scaled = */ false, /* extendop = */ None, rd, - )); - } - &AMode::RegScaled(r1, r2, _ty) | &AMode::RegScaledExtended(r1, r2, _ty, _) => { - let extendop = match &mem { - &AMode::RegScaled(..) => None, - &AMode::RegScaledExtended(_, _, _, op) => Some(op), - _ => unreachable!(), - }; - sink.put4(enc_ldst_reg( - op, r1, r2, /* scaled = */ true, extendop, rd, - )); - } - &AMode::RegExtended(r1, r2, extendop) => { - sink.put4(enc_ldst_reg( - op, - r1, - r2, - /* scaled = */ false, - Some(extendop), - rd, - )); - } - &AMode::Label(..) => { - panic!("Store to a MemLabel not implemented!"); - } - &AMode::PreIndexed(reg, simm9) => { - sink.put4(enc_ldst_simm9(op, simm9, 0b11, reg.to_reg(), rd)); - } - &AMode::PostIndexed(reg, simm9) => { - sink.put4(enc_ldst_simm9(op, simm9, 0b01, reg.to_reg(), rd)); - } - // Eliminated by `mem_finalize()` above. - &AMode::SPOffset(..) | &AMode::FPOffset(..) | &AMode::NominalSPOffset(..) => { - panic!("Should not see stack-offset here!") - } - &AMode::RegOffset(..) => panic!("SHould not see generic reg-offset here!"), - } - } - - &Inst::StoreP64 { - rt, - rt2, - ref mem, - flags, - } => { - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() && !flags.notrap() { - // Register the offset at which the actual store instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - match mem { - &PairAMode::SignedOffset(reg, simm7) => { - assert_eq!(simm7.scale_ty, I64); - sink.put4(enc_ldst_pair(0b1010100100, simm7, reg, rt, rt2)); - } - &PairAMode::PreIndexed(reg, simm7) => { - assert_eq!(simm7.scale_ty, I64); - sink.put4(enc_ldst_pair(0b1010100110, simm7, reg.to_reg(), rt, rt2)); - } - &PairAMode::PostIndexed(reg, simm7) => { - assert_eq!(simm7.scale_ty, I64); - sink.put4(enc_ldst_pair(0b1010100010, simm7, reg.to_reg(), rt, rt2)); - } - } - } - &Inst::LoadP64 { - rt, - rt2, - ref mem, - flags, - } => { - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() && !flags.notrap() { - // Register the offset at which the actual load instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - - let rt = rt.to_reg(); - let rt2 = rt2.to_reg(); - match mem { - &PairAMode::SignedOffset(reg, simm7) => { - assert_eq!(simm7.scale_ty, I64); - sink.put4(enc_ldst_pair(0b1010100101, simm7, reg, rt, rt2)); - } - &PairAMode::PreIndexed(reg, simm7) => { - assert_eq!(simm7.scale_ty, I64); - sink.put4(enc_ldst_pair(0b1010100111, simm7, reg.to_reg(), rt, rt2)); - } - &PairAMode::PostIndexed(reg, simm7) => { - assert_eq!(simm7.scale_ty, I64); - sink.put4(enc_ldst_pair(0b1010100011, simm7, reg.to_reg(), rt, rt2)); - } - } - } - &Inst::FpuLoadP64 { - rt, - rt2, - ref mem, - flags, - } - | &Inst::FpuLoadP128 { - rt, - rt2, - ref mem, - flags, - } => { - let srcloc = state.cur_srcloc(); - - if srcloc != SourceLoc::default() && !flags.notrap() { - // Register the offset at which the actual load instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - - let opc = match self { - &Inst::FpuLoadP64 { .. } => 0b01, - &Inst::FpuLoadP128 { .. } => 0b10, - _ => unreachable!(), - }; - let rt = rt.to_reg(); - let rt2 = rt2.to_reg(); - - match mem { - &PairAMode::SignedOffset(reg, simm7) => { - assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16); - sink.put4(enc_ldst_vec_pair(opc, 0b10, true, simm7, reg, rt, rt2)); - } - &PairAMode::PreIndexed(reg, simm7) => { - assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16); - sink.put4(enc_ldst_vec_pair( - opc, - 0b11, - true, - simm7, - reg.to_reg(), - rt, - rt2, - )); - } - &PairAMode::PostIndexed(reg, simm7) => { - assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16); - sink.put4(enc_ldst_vec_pair( - opc, - 0b01, - true, - simm7, - reg.to_reg(), - rt, - rt2, - )); - } - } - } - &Inst::FpuStoreP64 { - rt, - rt2, - ref mem, - flags, - } - | &Inst::FpuStoreP128 { - rt, - rt2, - ref mem, - flags, - } => { - let srcloc = state.cur_srcloc(); - - if srcloc != SourceLoc::default() && !flags.notrap() { - // Register the offset at which the actual store instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - - let opc = match self { - &Inst::FpuStoreP64 { .. } => 0b01, - &Inst::FpuStoreP128 { .. } => 0b10, - _ => unreachable!(), - }; - - match mem { - &PairAMode::SignedOffset(reg, simm7) => { - assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16); - sink.put4(enc_ldst_vec_pair(opc, 0b10, false, simm7, reg, rt, rt2)); - } - &PairAMode::PreIndexed(reg, simm7) => { - assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16); - sink.put4(enc_ldst_vec_pair( - opc, - 0b11, - false, - simm7, - reg.to_reg(), - rt, - rt2, - )); - } - &PairAMode::PostIndexed(reg, simm7) => { - assert!(simm7.scale_ty == F64 || simm7.scale_ty == I8X16); - sink.put4(enc_ldst_vec_pair( - opc, - 0b01, - false, - simm7, - reg.to_reg(), - rt, - rt2, - )); - } - } - } - &Inst::Mov64 { rd, rm } => { - assert!(rd.to_reg().get_class() == rm.get_class()); - assert!(rm.get_class() == RegClass::I64); - - // MOV to SP is interpreted as MOV to XZR instead. And our codegen - // should never MOV to XZR. - assert!(rd.to_reg() != stack_reg()); - - if rm == stack_reg() { - // We can't use ORR here, so use an `add rd, sp, #0` instead. - let imm12 = Imm12::maybe_from_u64(0).unwrap(); - sink.put4(enc_arith_rr_imm12( - 0b100_10001, - imm12.shift_bits(), - imm12.imm_bits(), - rm, - rd, - )); - } else { - // Encoded as ORR rd, rm, zero. - sink.put4(enc_arith_rrr(0b10101010_000, 0b000_000, rd, zero_reg(), rm)); - } - } - &Inst::Mov32 { rd, rm } => { - // MOV to SP is interpreted as MOV to XZR instead. And our codegen - // should never MOV to XZR. - assert!(machreg_to_gpr(rd.to_reg()) != 31); - // Encoded as ORR rd, rm, zero. - sink.put4(enc_arith_rrr(0b00101010_000, 0b000_000, rd, zero_reg(), rm)); - } - &Inst::MovZ { rd, imm, size } => { - sink.put4(enc_move_wide(MoveWideOpcode::MOVZ, rd, imm, size)) - } - &Inst::MovN { rd, imm, size } => { - sink.put4(enc_move_wide(MoveWideOpcode::MOVN, rd, imm, size)) - } - &Inst::MovK { rd, imm, size } => { - sink.put4(enc_move_wide(MoveWideOpcode::MOVK, rd, imm, size)) - } - &Inst::CSel { rd, rn, rm, cond } => { - sink.put4(enc_csel(rd, rn, rm, cond)); - } - &Inst::CSet { rd, cond } => { - sink.put4(enc_cset(rd, cond)); - } - &Inst::CSetm { rd, cond } => { - sink.put4(enc_csetm(rd, cond)); - } - &Inst::CCmpImm { - size, - rn, - imm, - nzcv, - cond, - } => { - sink.put4(enc_ccmp_imm(size, rn, imm, nzcv, cond)); - } - &Inst::AtomicRMW { ty, op, rs, rt, rn } => { - sink.put4(enc_ldal(ty, op, rs, rt, rn)); - } - &Inst::AtomicRMWLoop { ty, op } => { - /* Emit this: - again: - ldaxr{,b,h} x/w27, [x25] - op x28, x27, x26 // op is add,sub,and,orr,eor - stlxr{,b,h} w24, x/w28, [x25] - cbnz x24, again - - Operand conventions: - IN: x25 (addr), x26 (2nd arg for op) - OUT: x27 (old value), x24 (trashed), x28 (trashed) - - It is unfortunate that, per the ARM documentation, x28 cannot be used for - both the store-data and success-flag operands of stlxr. This causes the - instruction's behaviour to be "CONSTRAINED UNPREDICTABLE", so we use x24 - instead for the success-flag. - */ - // TODO: We should not hardcode registers here, a better idea would be to - // pass some scratch registers in the AtomicRMWLoop pseudo-instruction, and use those - let xzr = zero_reg(); - let x24 = xreg(24); - let x25 = xreg(25); - let x26 = xreg(26); - let x27 = xreg(27); - let x28 = xreg(28); - let x24wr = writable_xreg(24); - let x27wr = writable_xreg(27); - let x28wr = writable_xreg(28); - let again_label = sink.get_label(); - - // again: - sink.bind_label(again_label); - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() { - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - sink.put4(enc_ldaxr(ty, x27wr, x25)); // ldaxr x27, [x25] - let size = OperandSize::from_ty(ty); - - match op { - AtomicRmwOp::Xchg => {} // do nothing - AtomicRmwOp::Nand => { - // and x28, x27, x26 - // mvn x28, x28 - - Inst::AluRRR { - alu_op: ALUOp::And, - size, - rd: x28wr, - rn: x27, - rm: x26, - } - .emit(sink, emit_info, state); - - Inst::AluRRR { - alu_op: ALUOp::OrrNot, - size, - rd: x28wr, - rn: xzr, - rm: x28, - } - .emit(sink, emit_info, state); - } - AtomicRmwOp::Umin - | AtomicRmwOp::Umax - | AtomicRmwOp::Smin - | AtomicRmwOp::Smax => { - // cmp x27, x26 - // csel.op x28, x27, x26 - - let cond = match op { - AtomicRmwOp::Umin => Cond::Lo, - AtomicRmwOp::Umax => Cond::Hi, - AtomicRmwOp::Smin => Cond::Lt, - AtomicRmwOp::Smax => Cond::Gt, - _ => unreachable!(), - }; - - Inst::AluRRR { - alu_op: ALUOp::SubS, - size, - rd: writable_zero_reg(), - rn: x27, - rm: x26, - } - .emit(sink, emit_info, state); - - Inst::CSel { - cond, - rd: x28wr, - rn: x27, - rm: x26, - } - .emit(sink, emit_info, state); - } - _ => { - // add/sub/and/orr/eor x28, x27, x26 - let alu_op = match op { - AtomicRmwOp::Add => ALUOp::Add, - AtomicRmwOp::Sub => ALUOp::Sub, - AtomicRmwOp::And => ALUOp::And, - AtomicRmwOp::Or => ALUOp::Orr, - AtomicRmwOp::Xor => ALUOp::Eor, - AtomicRmwOp::Nand - | AtomicRmwOp::Umin - | AtomicRmwOp::Umax - | AtomicRmwOp::Smin - | AtomicRmwOp::Smax - | AtomicRmwOp::Xchg => unreachable!(), - }; - - Inst::AluRRR { - alu_op, - size, - rd: x28wr, - rn: x27, - rm: x26, - } - .emit(sink, emit_info, state); - } - } - - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() { - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - if op == AtomicRmwOp::Xchg { - sink.put4(enc_stlxr(ty, x24wr, x26, x25)); // stlxr w24, x26, [x25] - } else { - sink.put4(enc_stlxr(ty, x24wr, x28, x25)); // stlxr w24, x28, [x25] - } - - // cbnz w24, again - // Note, we're actually testing x24, and relying on the default zero-high-half - // rule in the assignment that `stlxr` does. - let br_offset = sink.cur_offset(); - sink.put4(enc_conditional_br( - BranchTarget::Label(again_label), - CondBrKind::NotZero(x24), - )); - sink.use_label_at_offset(br_offset, again_label, LabelUse::Branch19); - } - &Inst::AtomicCAS { rs, rt, rn, ty } => { - let size = match ty { - I8 => 0b00, - I16 => 0b01, - I32 => 0b10, - I64 => 0b11, - _ => panic!("Unsupported type: {}", ty), - }; - - sink.put4(enc_cas(size, rs, rt, rn)); - } - &Inst::AtomicCASLoop { ty } => { - /* Emit this: - again: - ldaxr{,b,h} x/w27, [x25] - cmp x27, x/w26 uxt{b,h} - b.ne out - stlxr{,b,h} w24, x/w28, [x25] - cbnz x24, again - out: - - Operand conventions: - IN: x25 (addr), x26 (expected value), x28 (replacement value) - OUT: x27 (old value), x24 (trashed) - */ - let x24 = xreg(24); - let x25 = xreg(25); - let x26 = xreg(26); - let x27 = xreg(27); - let x28 = xreg(28); - let xzrwr = writable_zero_reg(); - let x24wr = writable_xreg(24); - let x27wr = writable_xreg(27); - let again_label = sink.get_label(); - let out_label = sink.get_label(); - - // again: - sink.bind_label(again_label); - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() { - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - // ldaxr x27, [x25] - sink.put4(enc_ldaxr(ty, x27wr, x25)); - - // The top 32-bits are zero-extended by the ldaxr so we don't - // have to use UXTW, just the x-form of the register. - let (bit21, extend_op) = match ty { - I8 => (0b1, 0b000000), - I16 => (0b1, 0b001000), - _ => (0b0, 0b000000), - }; - let bits_31_21 = 0b111_01011_000 | bit21; - // cmp x27, x26 (== subs xzr, x27, x26) - sink.put4(enc_arith_rrr(bits_31_21, extend_op, xzrwr, x27, x26)); - - // b.ne out - let br_out_offset = sink.cur_offset(); - sink.put4(enc_conditional_br( - BranchTarget::Label(out_label), - CondBrKind::Cond(Cond::Ne), - )); - sink.use_label_at_offset(br_out_offset, out_label, LabelUse::Branch19); - - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() { - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - sink.put4(enc_stlxr(ty, x24wr, x28, x25)); // stlxr w24, x28, [x25] - - // cbnz w24, again. - // Note, we're actually testing x24, and relying on the default zero-high-half - // rule in the assignment that `stlxr` does. - let br_again_offset = sink.cur_offset(); - sink.put4(enc_conditional_br( - BranchTarget::Label(again_label), - CondBrKind::NotZero(x24), - )); - sink.use_label_at_offset(br_again_offset, again_label, LabelUse::Branch19); - - // out: - sink.bind_label(out_label); - } - &Inst::LoadAcquire { access_ty, rt, rn } => { - sink.put4(enc_ldar(access_ty, rt, rn)); - } - &Inst::StoreRelease { access_ty, rt, rn } => { - sink.put4(enc_stlr(access_ty, rt, rn)); - } - &Inst::Fence {} => { - sink.put4(enc_dmb_ish()); // dmb ish - } - &Inst::FpuMove64 { rd, rn } => { - sink.put4(enc_fpurr(0b000_11110_01_1_000000_10000, rd, rn)); - } - &Inst::FpuMove128 { rd, rn } => { - sink.put4(enc_vecmov(/* 16b = */ true, rd, rn)); - } - &Inst::FpuMoveFromVec { rd, rn, idx, size } => { - let (imm5, shift, mask) = match size.lane_size() { - ScalarSize::Size32 => (0b00100, 3, 0b011), - ScalarSize::Size64 => (0b01000, 4, 0b001), - _ => unimplemented!(), - }; - debug_assert_eq!(idx & mask, idx); - let imm5 = imm5 | ((idx as u32) << shift); - sink.put4( - 0b010_11110000_00000_000001_00000_00000 - | (imm5 << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::FpuExtend { rd, rn, size } => { - sink.put4(enc_fpurr( - 0b000_11110_00_1_000000_10000 | (size.ftype() << 13), - rd, - rn, - )); - } - &Inst::FpuRR { fpu_op, rd, rn } => { - let top22 = match fpu_op { - FPUOp1::Abs32 => 0b000_11110_00_1_000001_10000, - FPUOp1::Abs64 => 0b000_11110_01_1_000001_10000, - FPUOp1::Neg32 => 0b000_11110_00_1_000010_10000, - FPUOp1::Neg64 => 0b000_11110_01_1_000010_10000, - FPUOp1::Sqrt32 => 0b000_11110_00_1_000011_10000, - FPUOp1::Sqrt64 => 0b000_11110_01_1_000011_10000, - FPUOp1::Cvt32To64 => 0b000_11110_00_1_000101_10000, - FPUOp1::Cvt64To32 => 0b000_11110_01_1_000100_10000, - }; - sink.put4(enc_fpurr(top22, rd, rn)); - } - &Inst::FpuRRR { fpu_op, rd, rn, rm } => { - let top22 = match fpu_op { - FPUOp2::Add32 => 0b000_11110_00_1_00000_001010, - FPUOp2::Add64 => 0b000_11110_01_1_00000_001010, - FPUOp2::Sub32 => 0b000_11110_00_1_00000_001110, - FPUOp2::Sub64 => 0b000_11110_01_1_00000_001110, - FPUOp2::Mul32 => 0b000_11110_00_1_00000_000010, - FPUOp2::Mul64 => 0b000_11110_01_1_00000_000010, - FPUOp2::Div32 => 0b000_11110_00_1_00000_000110, - FPUOp2::Div64 => 0b000_11110_01_1_00000_000110, - FPUOp2::Max32 => 0b000_11110_00_1_00000_010010, - FPUOp2::Max64 => 0b000_11110_01_1_00000_010010, - FPUOp2::Min32 => 0b000_11110_00_1_00000_010110, - FPUOp2::Min64 => 0b000_11110_01_1_00000_010110, - FPUOp2::Sqadd64 => 0b010_11110_11_1_00000_000011, - FPUOp2::Uqadd64 => 0b011_11110_11_1_00000_000011, - FPUOp2::Sqsub64 => 0b010_11110_11_1_00000_001011, - FPUOp2::Uqsub64 => 0b011_11110_11_1_00000_001011, - }; - sink.put4(enc_fpurrr(top22, rd, rn, rm)); - } - &Inst::FpuRRI { fpu_op, rd, rn } => match fpu_op { - FPUOpRI::UShr32(imm) => { - debug_assert_eq!(32, imm.lane_size_in_bits); - sink.put4( - 0b0_0_1_011110_0000000_00_0_0_0_1_00000_00000 - | imm.enc() << 16 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()), - ) - } - FPUOpRI::UShr64(imm) => { - debug_assert_eq!(64, imm.lane_size_in_bits); - sink.put4( - 0b01_1_111110_0000000_00_0_0_0_1_00000_00000 - | imm.enc() << 16 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()), - ) - } - FPUOpRI::Sli64(imm) => { - debug_assert_eq!(64, imm.lane_size_in_bits); - sink.put4( - 0b01_1_111110_0000000_010101_00000_00000 - | imm.enc() << 16 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()), - ) - } - FPUOpRI::Sli32(imm) => { - debug_assert_eq!(32, imm.lane_size_in_bits); - sink.put4( - 0b0_0_1_011110_0000000_010101_00000_00000 - | imm.enc() << 16 - | machreg_to_vec(rn) << 5 - | machreg_to_vec(rd.to_reg()), - ) - } - }, - &Inst::FpuRRRR { - fpu_op, - rd, - rn, - rm, - ra, - } => { - let top17 = match fpu_op { - FPUOp3::MAdd32 => 0b000_11111_00_0_00000_0, - FPUOp3::MAdd64 => 0b000_11111_01_0_00000_0, - }; - sink.put4(enc_fpurrrr(top17, rd, rn, rm, ra)); - } - &Inst::VecMisc { op, rd, rn, size } => { - let (q, enc_size) = size.enc_size(); - let (u, bits_12_16, size) = match op { - VecMisc2::Not => (0b1, 0b00101, 0b00), - VecMisc2::Neg => (0b1, 0b01011, enc_size), - VecMisc2::Abs => (0b0, 0b01011, enc_size), - VecMisc2::Fabs => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b0, 0b01111, enc_size) - } - VecMisc2::Fneg => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b1, 0b01111, enc_size) - } - VecMisc2::Fsqrt => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b1, 0b11111, enc_size) - } - VecMisc2::Rev64 => { - debug_assert_ne!(VectorSize::Size64x2, size); - (0b0, 0b00000, enc_size) - } - VecMisc2::Fcvtzs => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b0, 0b11011, enc_size) - } - VecMisc2::Fcvtzu => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b1, 0b11011, enc_size) - } - VecMisc2::Scvtf => { - debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2); - (0b0, 0b11101, enc_size & 0b1) - } - VecMisc2::Ucvtf => { - debug_assert!(size == VectorSize::Size32x4 || size == VectorSize::Size64x2); - (0b1, 0b11101, enc_size & 0b1) - } - VecMisc2::Frintn => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b0, 0b11000, enc_size & 0b01) - } - VecMisc2::Frintz => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b0, 0b11001, enc_size) - } - VecMisc2::Frintm => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b0, 0b11001, enc_size & 0b01) - } - VecMisc2::Frintp => { - debug_assert!( - size == VectorSize::Size32x2 - || size == VectorSize::Size32x4 - || size == VectorSize::Size64x2 - ); - (0b0, 0b11000, enc_size) - } - VecMisc2::Cnt => { - debug_assert!(size == VectorSize::Size8x8 || size == VectorSize::Size8x16); - (0b0, 0b00101, enc_size) - } - VecMisc2::Cmeq0 => (0b0, 0b01001, enc_size), - }; - sink.put4(enc_vec_rr_misc((q << 1) | u, size, bits_12_16, rd, rn)); - } - &Inst::VecLanes { op, rd, rn, size } => { - let (q, size) = match size { - VectorSize::Size8x8 => (0b0, 0b00), - VectorSize::Size8x16 => (0b1, 0b00), - VectorSize::Size16x4 => (0b0, 0b01), - VectorSize::Size16x8 => (0b1, 0b01), - VectorSize::Size32x4 => (0b1, 0b10), - _ => unreachable!(), - }; - let (u, opcode) = match op { - VecLanesOp::Uminv => (0b1, 0b11010), - VecLanesOp::Addv => (0b0, 0b11011), - }; - sink.put4(enc_vec_lanes(q, u, size, opcode, rd, rn)); - } - &Inst::VecShiftImm { - op, - rd, - rn, - size, - imm, - } => { - let (is_shr, template) = match op { - VecShiftImmOp::Ushr => (true, 0b_011_011110_0000_000_000001_00000_00000_u32), - VecShiftImmOp::Sshr => (true, 0b_010_011110_0000_000_000001_00000_00000_u32), - VecShiftImmOp::Shl => (false, 0b_010_011110_0000_000_010101_00000_00000_u32), - }; - let imm = imm as u32; - // Deal with the somewhat strange encoding scheme for, and limits on, - // the shift amount. - let immh_immb = match (size, is_shr) { - (VectorSize::Size64x2, true) if imm >= 1 && imm <= 64 => { - 0b_1000_000_u32 | (64 - imm) - } - (VectorSize::Size32x4, true) if imm >= 1 && imm <= 32 => { - 0b_0100_000_u32 | (32 - imm) - } - (VectorSize::Size16x8, true) if imm >= 1 && imm <= 16 => { - 0b_0010_000_u32 | (16 - imm) - } - (VectorSize::Size8x16, true) if imm >= 1 && imm <= 8 => { - 0b_0001_000_u32 | (8 - imm) - } - (VectorSize::Size64x2, false) if imm <= 63 => 0b_1000_000_u32 | imm, - (VectorSize::Size32x4, false) if imm <= 31 => 0b_0100_000_u32 | imm, - (VectorSize::Size16x8, false) if imm <= 15 => 0b_0010_000_u32 | imm, - (VectorSize::Size8x16, false) if imm <= 7 => 0b_0001_000_u32 | imm, - _ => panic!( - "aarch64: Inst::VecShiftImm: emit: invalid op/size/imm {:?}, {:?}, {:?}", - op, size, imm - ), - }; - let rn_enc = machreg_to_vec(rn); - let rd_enc = machreg_to_vec(rd.to_reg()); - sink.put4(template | (immh_immb << 16) | (rn_enc << 5) | rd_enc); - } - &Inst::VecExtract { rd, rn, rm, imm4 } => { - if imm4 < 16 { - let template = 0b_01_101110_000_00000_0_0000_0_00000_00000_u32; - let rm_enc = machreg_to_vec(rm); - let rn_enc = machreg_to_vec(rn); - let rd_enc = machreg_to_vec(rd.to_reg()); - sink.put4( - template | (rm_enc << 16) | ((imm4 as u32) << 11) | (rn_enc << 5) | rd_enc, - ); - } else { - panic!( - "aarch64: Inst::VecExtract: emit: invalid extract index {}", - imm4 - ); - } - } - &Inst::VecTbl { - rd, - rn, - rm, - is_extension, - } => { - sink.put4(enc_tbl(is_extension, 0b00, rd, rn, rm)); - } - &Inst::VecTbl2 { - rd, - rn, - rn2, - rm, - is_extension, - } => { - assert_eq!(machreg_to_vec(rn2), (machreg_to_vec(rn) + 1) % 32); - sink.put4(enc_tbl(is_extension, 0b01, rd, rn, rm)); - } - &Inst::FpuCmp32 { rn, rm } => { - sink.put4(enc_fcmp(ScalarSize::Size32, rn, rm)); - } - &Inst::FpuCmp64 { rn, rm } => { - sink.put4(enc_fcmp(ScalarSize::Size64, rn, rm)); - } - &Inst::FpuToInt { op, rd, rn } => { - let top16 = match op { - // FCVTZS (32/32-bit) - FpuToIntOp::F32ToI32 => 0b000_11110_00_1_11_000, - // FCVTZU (32/32-bit) - FpuToIntOp::F32ToU32 => 0b000_11110_00_1_11_001, - // FCVTZS (32/64-bit) - FpuToIntOp::F32ToI64 => 0b100_11110_00_1_11_000, - // FCVTZU (32/64-bit) - FpuToIntOp::F32ToU64 => 0b100_11110_00_1_11_001, - // FCVTZS (64/32-bit) - FpuToIntOp::F64ToI32 => 0b000_11110_01_1_11_000, - // FCVTZU (64/32-bit) - FpuToIntOp::F64ToU32 => 0b000_11110_01_1_11_001, - // FCVTZS (64/64-bit) - FpuToIntOp::F64ToI64 => 0b100_11110_01_1_11_000, - // FCVTZU (64/64-bit) - FpuToIntOp::F64ToU64 => 0b100_11110_01_1_11_001, - }; - sink.put4(enc_fputoint(top16, rd, rn)); - } - &Inst::IntToFpu { op, rd, rn } => { - let top16 = match op { - // SCVTF (32/32-bit) - IntToFpuOp::I32ToF32 => 0b000_11110_00_1_00_010, - // UCVTF (32/32-bit) - IntToFpuOp::U32ToF32 => 0b000_11110_00_1_00_011, - // SCVTF (64/32-bit) - IntToFpuOp::I64ToF32 => 0b100_11110_00_1_00_010, - // UCVTF (64/32-bit) - IntToFpuOp::U64ToF32 => 0b100_11110_00_1_00_011, - // SCVTF (32/64-bit) - IntToFpuOp::I32ToF64 => 0b000_11110_01_1_00_010, - // UCVTF (32/64-bit) - IntToFpuOp::U32ToF64 => 0b000_11110_01_1_00_011, - // SCVTF (64/64-bit) - IntToFpuOp::I64ToF64 => 0b100_11110_01_1_00_010, - // UCVTF (64/64-bit) - IntToFpuOp::U64ToF64 => 0b100_11110_01_1_00_011, - }; - sink.put4(enc_inttofpu(top16, rd, rn)); - } - &Inst::LoadFpuConst64 { rd, const_data } => { - let inst = Inst::FpuLoad64 { - rd, - mem: AMode::Label(MemLabel::PCRel(8)), - flags: MemFlags::trusted(), - }; - inst.emit(sink, emit_info, state); - let inst = Inst::Jump { - dest: BranchTarget::ResolvedOffset(12), - }; - inst.emit(sink, emit_info, state); - sink.put8(const_data); - } - &Inst::LoadFpuConst128 { rd, const_data } => { - let inst = Inst::FpuLoad128 { - rd, - mem: AMode::Label(MemLabel::PCRel(8)), - flags: MemFlags::trusted(), - }; - inst.emit(sink, emit_info, state); - let inst = Inst::Jump { - dest: BranchTarget::ResolvedOffset(20), - }; - inst.emit(sink, emit_info, state); - - for i in const_data.to_le_bytes().iter() { - sink.put1(*i); - } - } - &Inst::FpuCSel32 { rd, rn, rm, cond } => { - sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size32)); - } - &Inst::FpuCSel64 { rd, rn, rm, cond } => { - sink.put4(enc_fcsel(rd, rn, rm, cond, ScalarSize::Size64)); - } - &Inst::FpuRound { op, rd, rn } => { - let top22 = match op { - FpuRoundMode::Minus32 => 0b000_11110_00_1_001_010_10000, - FpuRoundMode::Minus64 => 0b000_11110_01_1_001_010_10000, - FpuRoundMode::Plus32 => 0b000_11110_00_1_001_001_10000, - FpuRoundMode::Plus64 => 0b000_11110_01_1_001_001_10000, - FpuRoundMode::Zero32 => 0b000_11110_00_1_001_011_10000, - FpuRoundMode::Zero64 => 0b000_11110_01_1_001_011_10000, - FpuRoundMode::Nearest32 => 0b000_11110_00_1_001_000_10000, - FpuRoundMode::Nearest64 => 0b000_11110_01_1_001_000_10000, - }; - sink.put4(enc_fround(top22, rd, rn)); - } - &Inst::MovToFpu { rd, rn, size } => { - let template = match size { - ScalarSize::Size32 => 0b000_11110_00_1_00_111_000000_00000_00000, - ScalarSize::Size64 => 0b100_11110_01_1_00_111_000000_00000_00000, - _ => unreachable!(), - }; - sink.put4(template | (machreg_to_gpr(rn) << 5) | machreg_to_vec(rd.to_reg())); - } - &Inst::FpuMoveFPImm { rd, imm, size } => { - let size_code = match size { - ScalarSize::Size32 => 0b00, - ScalarSize::Size64 => 0b01, - _ => unimplemented!(), - }; - sink.put4( - 0b000_11110_00_1_00_000_000100_00000_00000 - | size_code << 22 - | ((imm.enc_bits() as u32) << 13) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::MovToVec { rd, rn, idx, size } => { - let (imm5, shift) = match size.lane_size() { - ScalarSize::Size8 => (0b00001, 1), - ScalarSize::Size16 => (0b00010, 2), - ScalarSize::Size32 => (0b00100, 3), - ScalarSize::Size64 => (0b01000, 4), - _ => unreachable!(), - }; - debug_assert_eq!(idx & (0b11111 >> shift), idx); - let imm5 = imm5 | ((idx as u32) << shift); - sink.put4( - 0b010_01110000_00000_0_0011_1_00000_00000 - | (imm5 << 16) - | (machreg_to_gpr(rn) << 5) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::MovFromVec { rd, rn, idx, size } => { - let (q, imm5, shift, mask) = match size { - VectorSize::Size8x16 => (0b0, 0b00001, 1, 0b1111), - VectorSize::Size16x8 => (0b0, 0b00010, 2, 0b0111), - VectorSize::Size32x4 => (0b0, 0b00100, 3, 0b0011), - VectorSize::Size64x2 => (0b1, 0b01000, 4, 0b0001), - _ => unreachable!(), - }; - debug_assert_eq!(idx & mask, idx); - let imm5 = imm5 | ((idx as u32) << shift); - sink.put4( - 0b000_01110000_00000_0_0111_1_00000_00000 - | (q << 30) - | (imm5 << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_gpr(rd.to_reg()), - ); - } - &Inst::MovFromVecSigned { - rd, - rn, - idx, - size, - scalar_size, - } => { - let (imm5, shift, half) = match size { - VectorSize::Size8x8 => (0b00001, 1, true), - VectorSize::Size8x16 => (0b00001, 1, false), - VectorSize::Size16x4 => (0b00010, 2, true), - VectorSize::Size16x8 => (0b00010, 2, false), - VectorSize::Size32x2 => { - debug_assert_ne!(scalar_size, OperandSize::Size32); - (0b00100, 3, true) - } - VectorSize::Size32x4 => { - debug_assert_ne!(scalar_size, OperandSize::Size32); - (0b00100, 3, false) - } - _ => panic!("Unexpected vector operand size"), - }; - debug_assert_eq!(idx & (0b11111 >> (half as u32 + shift)), idx); - let imm5 = imm5 | ((idx as u32) << shift); - sink.put4( - 0b000_01110000_00000_0_0101_1_00000_00000 - | (scalar_size.is64() as u32) << 30 - | (imm5 << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_gpr(rd.to_reg()), - ); - } - &Inst::VecDup { rd, rn, size } => { - let imm5 = match size { - VectorSize::Size8x16 => 0b00001, - VectorSize::Size16x8 => 0b00010, - VectorSize::Size32x4 => 0b00100, - VectorSize::Size64x2 => 0b01000, - _ => unimplemented!(), - }; - sink.put4( - 0b010_01110000_00000_000011_00000_00000 - | (imm5 << 16) - | (machreg_to_gpr(rn) << 5) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::VecDupFromFpu { rd, rn, size } => { - let imm5 = match size { - VectorSize::Size32x4 => 0b00100, - VectorSize::Size64x2 => 0b01000, - _ => unimplemented!(), - }; - sink.put4( - 0b010_01110000_00000_000001_00000_00000 - | (imm5 << 16) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::VecDupFPImm { rd, imm, size } => { - let imm = imm.enc_bits(); - let op = match size.lane_size() { - ScalarSize::Size32 => 0, - ScalarSize::Size64 => 1, - _ => unimplemented!(), - }; - let q_op = op | ((size.is_128bits() as u32) << 1); - - sink.put4(enc_asimd_mod_imm(rd, q_op, 0b1111, imm)); - } - &Inst::VecDupImm { - rd, - imm, - invert, - size, - } => { - let (imm, shift, shift_ones) = imm.value(); - let (op, cmode) = match size.lane_size() { - ScalarSize::Size8 => { - assert!(!invert); - assert_eq!(shift, 0); - - (0, 0b1110) - } - ScalarSize::Size16 => { - let s = shift & 8; - - assert!(!shift_ones); - assert_eq!(s, shift); - - (invert as u32, 0b1000 | (s >> 2)) - } - ScalarSize::Size32 => { - if shift_ones { - assert!(shift == 8 || shift == 16); - - (invert as u32, 0b1100 | (shift >> 4)) - } else { - let s = shift & 24; - - assert_eq!(s, shift); - - (invert as u32, 0b0000 | (s >> 2)) - } - } - ScalarSize::Size64 => { - assert!(!invert); - assert_eq!(shift, 0); - - (1, 0b1110) - } - _ => unreachable!(), - }; - let q_op = op | ((size.is_128bits() as u32) << 1); - - sink.put4(enc_asimd_mod_imm(rd, q_op, cmode, imm)); - } - &Inst::VecExtend { - t, - rd, - rn, - high_half, - } => { - let (u, immh) = match t { - VecExtendOp::Sxtl8 => (0b0, 0b001), - VecExtendOp::Sxtl16 => (0b0, 0b010), - VecExtendOp::Sxtl32 => (0b0, 0b100), - VecExtendOp::Uxtl8 => (0b1, 0b001), - VecExtendOp::Uxtl16 => (0b1, 0b010), - VecExtendOp::Uxtl32 => (0b1, 0b100), - }; - sink.put4( - 0b000_011110_0000_000_101001_00000_00000 - | ((high_half as u32) << 30) - | (u << 29) - | (immh << 19) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::VecRRLong { - op, - rd, - rn, - high_half, - } => { - let (u, size, bits_12_16) = match op { - VecRRLongOp::Fcvtl16 => (0b0, 0b00, 0b10111), - VecRRLongOp::Fcvtl32 => (0b0, 0b01, 0b10111), - VecRRLongOp::Shll8 => (0b1, 0b00, 0b10011), - VecRRLongOp::Shll16 => (0b1, 0b01, 0b10011), - VecRRLongOp::Shll32 => (0b1, 0b10, 0b10011), - }; - - sink.put4(enc_vec_rr_misc( - ((high_half as u32) << 1) | u, - size, - bits_12_16, - rd, - rn, - )); - } - &Inst::VecRRNarrow { - op, - rd, - rn, - high_half, - } => { - let (u, size, bits_12_16) = match op { - VecRRNarrowOp::Xtn16 => (0b0, 0b00, 0b10010), - VecRRNarrowOp::Xtn32 => (0b0, 0b01, 0b10010), - VecRRNarrowOp::Xtn64 => (0b0, 0b10, 0b10010), - VecRRNarrowOp::Sqxtn16 => (0b0, 0b00, 0b10100), - VecRRNarrowOp::Sqxtn32 => (0b0, 0b01, 0b10100), - VecRRNarrowOp::Sqxtn64 => (0b0, 0b10, 0b10100), - VecRRNarrowOp::Sqxtun16 => (0b1, 0b00, 0b10010), - VecRRNarrowOp::Sqxtun32 => (0b1, 0b01, 0b10010), - VecRRNarrowOp::Sqxtun64 => (0b1, 0b10, 0b10010), - VecRRNarrowOp::Uqxtn16 => (0b1, 0b00, 0b10100), - VecRRNarrowOp::Uqxtn32 => (0b1, 0b01, 0b10100), - VecRRNarrowOp::Uqxtn64 => (0b1, 0b10, 0b10100), - VecRRNarrowOp::Fcvtn32 => (0b0, 0b00, 0b10110), - VecRRNarrowOp::Fcvtn64 => (0b0, 0b01, 0b10110), - }; - - sink.put4(enc_vec_rr_misc( - ((high_half as u32) << 1) | u, - size, - bits_12_16, - rd, - rn, - )); - } - &Inst::VecMovElement { - rd, - rn, - dest_idx, - src_idx, - size, - } => { - let (imm5, shift) = match size.lane_size() { - ScalarSize::Size8 => (0b00001, 1), - ScalarSize::Size16 => (0b00010, 2), - ScalarSize::Size32 => (0b00100, 3), - ScalarSize::Size64 => (0b01000, 4), - _ => unreachable!(), - }; - let mask = 0b11111 >> shift; - debug_assert_eq!(dest_idx & mask, dest_idx); - debug_assert_eq!(src_idx & mask, src_idx); - let imm4 = (src_idx as u32) << (shift - 1); - let imm5 = imm5 | ((dest_idx as u32) << shift); - sink.put4( - 0b011_01110000_00000_0_0000_1_00000_00000 - | (imm5 << 16) - | (imm4 << 11) - | (machreg_to_vec(rn) << 5) - | machreg_to_vec(rd.to_reg()), - ); - } - &Inst::VecRRPair { op, rd, rn } => { - let bits_12_16 = match op { - VecPairOp::Addp => 0b11011, - }; - - sink.put4(enc_vec_rr_pair(bits_12_16, rd, rn)); - } - &Inst::VecRRRLong { - rd, - rn, - rm, - alu_op, - high_half, - } => { - let (u, size, bit14) = match alu_op { - VecRRRLongOp::Smull8 => (0b0, 0b00, 0b1), - VecRRRLongOp::Smull16 => (0b0, 0b01, 0b1), - VecRRRLongOp::Smull32 => (0b0, 0b10, 0b1), - VecRRRLongOp::Umull8 => (0b1, 0b00, 0b1), - VecRRRLongOp::Umull16 => (0b1, 0b01, 0b1), - VecRRRLongOp::Umull32 => (0b1, 0b10, 0b1), - VecRRRLongOp::Umlal8 => (0b1, 0b00, 0b0), - VecRRRLongOp::Umlal16 => (0b1, 0b01, 0b0), - VecRRRLongOp::Umlal32 => (0b1, 0b10, 0b0), - }; - sink.put4(enc_vec_rrr_long( - high_half as u32, - u, - size, - bit14, - rm, - rn, - rd, - )); - } - &Inst::VecRRPairLong { op, rd, rn } => { - let (u, size) = match op { - VecRRPairLongOp::Saddlp8 => (0b0, 0b0), - VecRRPairLongOp::Uaddlp8 => (0b1, 0b0), - VecRRPairLongOp::Saddlp16 => (0b0, 0b1), - VecRRPairLongOp::Uaddlp16 => (0b1, 0b1), - }; - - sink.put4(enc_vec_rr_pair_long(u, size, rd, rn)); - } - &Inst::VecRRR { - rd, - rn, - rm, - alu_op, - size, - } => { - let (q, enc_size) = size.enc_size(); - let is_float = match alu_op { - VecALUOp::Fcmeq - | VecALUOp::Fcmgt - | VecALUOp::Fcmge - | VecALUOp::Fadd - | VecALUOp::Fsub - | VecALUOp::Fdiv - | VecALUOp::Fmax - | VecALUOp::Fmin - | VecALUOp::Fmul => true, - _ => false, - }; - let enc_float_size = match (is_float, size) { - (true, VectorSize::Size32x2) => 0b0, - (true, VectorSize::Size32x4) => 0b0, - (true, VectorSize::Size64x2) => 0b1, - (true, _) => unimplemented!(), - _ => 0, - }; - - let (top11, bit15_10) = match alu_op { - VecALUOp::Sqadd => (0b000_01110_00_1 | enc_size << 1, 0b000011), - VecALUOp::Sqsub => (0b000_01110_00_1 | enc_size << 1, 0b001011), - VecALUOp::Uqadd => (0b001_01110_00_1 | enc_size << 1, 0b000011), - VecALUOp::Uqsub => (0b001_01110_00_1 | enc_size << 1, 0b001011), - VecALUOp::Cmeq => (0b001_01110_00_1 | enc_size << 1, 0b100011), - VecALUOp::Cmge => (0b000_01110_00_1 | enc_size << 1, 0b001111), - VecALUOp::Cmgt => (0b000_01110_00_1 | enc_size << 1, 0b001101), - VecALUOp::Cmhi => (0b001_01110_00_1 | enc_size << 1, 0b001101), - VecALUOp::Cmhs => (0b001_01110_00_1 | enc_size << 1, 0b001111), - VecALUOp::Fcmeq => (0b000_01110_00_1, 0b111001), - VecALUOp::Fcmgt => (0b001_01110_10_1, 0b111001), - VecALUOp::Fcmge => (0b001_01110_00_1, 0b111001), - // The following logical instructions operate on bytes, so are not encoded differently - // for the different vector types. - VecALUOp::And => (0b000_01110_00_1, 0b000111), - VecALUOp::Bic => (0b000_01110_01_1, 0b000111), - VecALUOp::Orr => (0b000_01110_10_1, 0b000111), - VecALUOp::Eor => (0b001_01110_00_1, 0b000111), - VecALUOp::Bsl => (0b001_01110_01_1, 0b000111), - VecALUOp::Umaxp => { - debug_assert_ne!(size, VectorSize::Size64x2); - - (0b001_01110_00_1 | enc_size << 1, 0b101001) - } - VecALUOp::Add => (0b000_01110_00_1 | enc_size << 1, 0b100001), - VecALUOp::Sub => (0b001_01110_00_1 | enc_size << 1, 0b100001), - VecALUOp::Mul => { - debug_assert_ne!(size, VectorSize::Size64x2); - (0b000_01110_00_1 | enc_size << 1, 0b100111) - } - VecALUOp::Sshl => (0b000_01110_00_1 | enc_size << 1, 0b010001), - VecALUOp::Ushl => (0b001_01110_00_1 | enc_size << 1, 0b010001), - VecALUOp::Umin => { - debug_assert_ne!(size, VectorSize::Size64x2); - - (0b001_01110_00_1 | enc_size << 1, 0b011011) - } - VecALUOp::Smin => { - debug_assert_ne!(size, VectorSize::Size64x2); - - (0b000_01110_00_1 | enc_size << 1, 0b011011) - } - VecALUOp::Umax => { - debug_assert_ne!(size, VectorSize::Size64x2); - - (0b001_01110_00_1 | enc_size << 1, 0b011001) - } - VecALUOp::Smax => { - debug_assert_ne!(size, VectorSize::Size64x2); - - (0b000_01110_00_1 | enc_size << 1, 0b011001) - } - VecALUOp::Urhadd => { - debug_assert_ne!(size, VectorSize::Size64x2); - - (0b001_01110_00_1 | enc_size << 1, 0b000101) - } - VecALUOp::Fadd => (0b000_01110_00_1, 0b110101), - VecALUOp::Fsub => (0b000_01110_10_1, 0b110101), - VecALUOp::Fdiv => (0b001_01110_00_1, 0b111111), - VecALUOp::Fmax => (0b000_01110_00_1, 0b111101), - VecALUOp::Fmin => (0b000_01110_10_1, 0b111101), - VecALUOp::Fmul => (0b001_01110_00_1, 0b110111), - VecALUOp::Addp => (0b000_01110_00_1 | enc_size << 1, 0b101111), - VecALUOp::Zip1 => (0b01001110_00_0 | enc_size << 1, 0b001110), - VecALUOp::Sqrdmulh => { - debug_assert!( - size.lane_size() == ScalarSize::Size16 - || size.lane_size() == ScalarSize::Size32 - ); - - (0b001_01110_00_1 | enc_size << 1, 0b101101) - } - }; - let top11 = if is_float { - top11 | enc_float_size << 1 - } else { - top11 - }; - sink.put4(enc_vec_rrr(top11 | q << 9, rm, bit15_10, rn, rd)); - } - &Inst::VecLoadReplicate { rd, rn, size } => { - let (q, size) = size.enc_size(); - - let srcloc = state.cur_srcloc(); - if srcloc != SourceLoc::default() { - // Register the offset at which the actual load instruction starts. - sink.add_trap(srcloc, TrapCode::HeapOutOfBounds); - } - - sink.put4(enc_ldst_vec(q, size, rn, rd)); - } - &Inst::VecCSel { rd, rn, rm, cond } => { - /* Emit this: - b.cond else - mov rd, rm - b out - else: - mov rd, rn - out: - - Note, we could do better in the cases where rd == rn or rd == rm. - */ - let else_label = sink.get_label(); - let out_label = sink.get_label(); - - // b.cond else - let br_else_offset = sink.cur_offset(); - sink.put4(enc_conditional_br( - BranchTarget::Label(else_label), - CondBrKind::Cond(cond), - )); - sink.use_label_at_offset(br_else_offset, else_label, LabelUse::Branch19); - - // mov rd, rm - sink.put4(enc_vecmov(/* 16b = */ true, rd, rm)); - - // b out - let b_out_offset = sink.cur_offset(); - sink.use_label_at_offset(b_out_offset, out_label, LabelUse::Branch26); - sink.add_uncond_branch(b_out_offset, b_out_offset + 4, out_label); - sink.put4(enc_jump26(0b000101, 0 /* will be fixed up later */)); - - // else: - sink.bind_label(else_label); - - // mov rd, rn - sink.put4(enc_vecmov(/* 16b = */ true, rd, rn)); - - // out: - sink.bind_label(out_label); - } - &Inst::MovToNZCV { rn } => { - sink.put4(0xd51b4200 | machreg_to_gpr(rn)); - } - &Inst::MovFromNZCV { rd } => { - sink.put4(0xd53b4200 | machreg_to_gpr(rd.to_reg())); - } - &Inst::Extend { - rd, - rn, - signed: false, - from_bits: 1, - to_bits, - } => { - assert!(to_bits <= 64); - // Reduce zero-extend-from-1-bit to: - // - and rd, rn, #1 - // Note: This is special cased as UBFX may take more cycles - // than AND on smaller cores. - let imml = ImmLogic::maybe_from_u64(1, I32).unwrap(); - Inst::AluRRImmLogic { - alu_op: ALUOp::And, - size: OperandSize::Size32, - rd, - rn, - imml, - } - .emit(sink, emit_info, state); - } - &Inst::Extend { - rd, - rn, - signed: false, - from_bits: 32, - to_bits: 64, - } => { - let mov = Inst::Mov32 { rd, rm: rn }; - mov.emit(sink, emit_info, state); - } - &Inst::Extend { - rd, - rn, - signed, - from_bits, - to_bits, - } => { - let (opc, size) = if signed { - (0b00, OperandSize::from_bits(to_bits)) - } else { - (0b10, OperandSize::Size32) - }; - sink.put4(enc_bfm(opc, size, rd, rn, 0, from_bits - 1)); - } - &Inst::Jump { ref dest } => { - let off = sink.cur_offset(); - // Indicate that the jump uses a label, if so, so that a fixup can occur later. - if let Some(l) = dest.as_label() { - sink.use_label_at_offset(off, l, LabelUse::Branch26); - sink.add_uncond_branch(off, off + 4, l); - } - // Emit the jump itself. - sink.put4(enc_jump26(0b000101, dest.as_offset26_or_zero())); - } - &Inst::Ret => { - sink.put4(0xd65f03c0); - } - &Inst::EpiloguePlaceholder => { - // Noop; this is just a placeholder for epilogues. - } - &Inst::Call { ref info } => { - if let Some(s) = state.take_stack_map() { - sink.add_stack_map(StackMapExtent::UpcomingBytes(4), s); - } - let loc = state.cur_srcloc(); - sink.add_reloc(loc, Reloc::Arm64Call, &info.dest, 0); - sink.put4(enc_jump26(0b100101, 0)); - if info.opcode.is_call() { - sink.add_call_site(loc, info.opcode); - } - } - &Inst::CallInd { ref info } => { - if let Some(s) = state.take_stack_map() { - sink.add_stack_map(StackMapExtent::UpcomingBytes(4), s); - } - sink.put4(0b1101011_0001_11111_000000_00000_00000 | (machreg_to_gpr(info.rn) << 5)); - let loc = state.cur_srcloc(); - if info.opcode.is_call() { - sink.add_call_site(loc, info.opcode); - } - } - &Inst::CondBr { - taken, - not_taken, - kind, - } => { - // Conditional part first. - let cond_off = sink.cur_offset(); - if let Some(l) = taken.as_label() { - sink.use_label_at_offset(cond_off, l, LabelUse::Branch19); - let inverted = enc_conditional_br(taken, kind.invert()).to_le_bytes(); - sink.add_cond_branch(cond_off, cond_off + 4, l, &inverted[..]); - } - sink.put4(enc_conditional_br(taken, kind)); - - // Unconditional part next. - let uncond_off = sink.cur_offset(); - if let Some(l) = not_taken.as_label() { - sink.use_label_at_offset(uncond_off, l, LabelUse::Branch26); - sink.add_uncond_branch(uncond_off, uncond_off + 4, l); - } - sink.put4(enc_jump26(0b000101, not_taken.as_offset26_or_zero())); - } - &Inst::TrapIf { kind, trap_code } => { - // condbr KIND, LABEL - let off = sink.cur_offset(); - let label = sink.get_label(); - sink.put4(enc_conditional_br( - BranchTarget::Label(label), - kind.invert(), - )); - sink.use_label_at_offset(off, label, LabelUse::Branch19); - // udf - let trap = Inst::Udf { trap_code }; - trap.emit(sink, emit_info, state); - // LABEL: - sink.bind_label(label); - } - &Inst::IndirectBr { rn, .. } => { - sink.put4(enc_br(rn)); - } - &Inst::Nop0 => {} - &Inst::Nop4 => { - sink.put4(0xd503201f); - } - &Inst::Brk => { - sink.put4(0xd4200000); - } - &Inst::Udf { trap_code } => { - let srcloc = state.cur_srcloc(); - sink.add_trap(srcloc, trap_code); - if let Some(s) = state.take_stack_map() { - sink.add_stack_map(StackMapExtent::UpcomingBytes(4), s); - } - sink.put4(0xd4a00000); - } - &Inst::Adr { rd, off } => { - assert!(off > -(1 << 20)); - assert!(off < (1 << 20)); - sink.put4(enc_adr(off, rd)); - } - &Inst::Word4 { data } => { - sink.put4(data); - } - &Inst::Word8 { data } => { - sink.put8(data); - } - &Inst::JTSequence { - ridx, - rtmp1, - rtmp2, - ref info, - .. - } => { - // This sequence is *one* instruction in the vcode, and is expanded only here at - // emission time, because we cannot allow the regalloc to insert spills/reloads in - // the middle; we depend on hardcoded PC-rel addressing below. - - // Branch to default when condition code from prior comparison indicates. - let br = enc_conditional_br(info.default_target, CondBrKind::Cond(Cond::Hs)); - // No need to inform the sink's branch folding logic about this branch, because it - // will not be merged with any other branch, flipped, or elided (it is not preceded - // or succeeded by any other branch). Just emit it with the label use. - let default_br_offset = sink.cur_offset(); - if let BranchTarget::Label(l) = info.default_target { - sink.use_label_at_offset(default_br_offset, l, LabelUse::Branch19); - } - sink.put4(br); - - // Save index in a tmp (the live range of ridx only goes to start of this - // sequence; rtmp1 or rtmp2 may overwrite it). - let inst = Inst::gen_move(rtmp2, ridx, I64); - inst.emit(sink, emit_info, state); - // Load address of jump table - let inst = Inst::Adr { rd: rtmp1, off: 16 }; - inst.emit(sink, emit_info, state); - // Load value out of jump table - let inst = Inst::SLoad32 { - rd: rtmp2, - mem: AMode::reg_plus_reg_scaled_extended( - rtmp1.to_reg(), - rtmp2.to_reg(), - I32, - ExtendOp::UXTW, - ), - flags: MemFlags::trusted(), - }; - inst.emit(sink, emit_info, state); - // Add base of jump table to jump-table-sourced block offset - let inst = Inst::AluRRR { - alu_op: ALUOp::Add, - size: OperandSize::Size64, - rd: rtmp1, - rn: rtmp1.to_reg(), - rm: rtmp2.to_reg(), - }; - inst.emit(sink, emit_info, state); - // Branch to computed address. (`targets` here is only used for successor queries - // and is not needed for emission.) - let inst = Inst::IndirectBr { - rn: rtmp1.to_reg(), - targets: vec![], - }; - inst.emit(sink, emit_info, state); - // Emit jump table (table of 32-bit offsets). - let jt_off = sink.cur_offset(); - for &target in info.targets.iter() { - let word_off = sink.cur_offset(); - // off_into_table is an addend here embedded in the label to be later patched - // at the end of codegen. The offset is initially relative to this jump table - // entry; with the extra addend, it'll be relative to the jump table's start, - // after patching. - let off_into_table = word_off - jt_off; - sink.use_label_at_offset( - word_off, - target.as_label().unwrap(), - LabelUse::PCRel32, - ); - sink.put4(off_into_table); - } - - // Lowering produces an EmitIsland before using a JTSequence, so we can safely - // disable the worst-case-size check in this case. - start_off = sink.cur_offset(); - } - &Inst::LoadExtName { - rd, - ref name, - offset, - } => { - let inst = Inst::ULoad64 { - rd, - mem: AMode::Label(MemLabel::PCRel(8)), - flags: MemFlags::trusted(), - }; - inst.emit(sink, emit_info, state); - let inst = Inst::Jump { - dest: BranchTarget::ResolvedOffset(12), - }; - inst.emit(sink, emit_info, state); - let srcloc = state.cur_srcloc(); - sink.add_reloc(srcloc, Reloc::Abs8, name, offset); - if emit_info.0.emit_all_ones_funcaddrs() { - sink.put8(u64::max_value()); - } else { - sink.put8(0); - } - } - &Inst::LoadAddr { rd, ref mem } => { - let (mem_insts, mem) = mem_finalize(sink.cur_offset(), mem, state); - for inst in mem_insts.into_iter() { - inst.emit(sink, emit_info, state); - } - - let (reg, index_reg, offset) = match mem { - AMode::RegExtended(r, idx, extendop) => (r, Some((idx, extendop)), 0), - AMode::Unscaled(r, simm9) => (r, None, simm9.value()), - AMode::UnsignedOffset(r, uimm12scaled) => { - (r, None, uimm12scaled.value() as i32) - } - _ => panic!("Unsupported case for LoadAddr: {:?}", mem), - }; - let abs_offset = if offset < 0 { - -offset as u64 - } else { - offset as u64 - }; - let alu_op = if offset < 0 { ALUOp::Sub } else { ALUOp::Add }; - - if let Some((idx, extendop)) = index_reg { - let add = Inst::AluRRRExtend { - alu_op: ALUOp::Add, - size: OperandSize::Size64, - rd, - rn: reg, - rm: idx, - extendop, - }; - - add.emit(sink, emit_info, state); - } else if offset == 0 { - if reg != rd.to_reg() { - let mov = Inst::Mov64 { rd, rm: reg }; - - mov.emit(sink, emit_info, state); - } - } else if let Some(imm12) = Imm12::maybe_from_u64(abs_offset) { - let add = Inst::AluRRImm12 { - alu_op, - size: OperandSize::Size64, - rd, - rn: reg, - imm12, - }; - add.emit(sink, emit_info, state); - } else { - // Use `tmp2` here: `reg` may be `spilltmp` if the `AMode` on this instruction - // was initially an `SPOffset`. Assert that `tmp2` is truly free to use. Note - // that no other instructions will be inserted here (we're emitting directly), - // and a live range of `tmp2` should not span this instruction, so this use - // should otherwise be correct. - debug_assert!(rd.to_reg() != tmp2_reg()); - debug_assert!(reg != tmp2_reg()); - let tmp = writable_tmp2_reg(); - for insn in Inst::load_constant(tmp, abs_offset).into_iter() { - insn.emit(sink, emit_info, state); - } - let add = Inst::AluRRR { - alu_op, - size: OperandSize::Size64, - rd, - rn: reg, - rm: tmp.to_reg(), - }; - add.emit(sink, emit_info, state); - } - } - &Inst::VirtualSPOffsetAdj { offset } => { - log::trace!( - "virtual sp offset adjusted by {} -> {}", - offset, - state.virtual_sp_offset + offset, - ); - state.virtual_sp_offset += offset; - } - &Inst::EmitIsland { needed_space } => { - if sink.island_needed(needed_space + 4) { - let jump_around_label = sink.get_label(); - let jmp = Inst::Jump { - dest: BranchTarget::Label(jump_around_label), - }; - jmp.emit(sink, emit_info, state); - sink.emit_island(needed_space + 4); - sink.bind_label(jump_around_label); - } - } - - &Inst::ElfTlsGetAddr { ref symbol } => { - // This is the instruction sequence that GCC emits for ELF GD TLS Relocations in aarch64 - // See: https://gcc.godbolt.org/z/KhMh5Gvra - - // adrp x0,