From 12a59ca5537d59ae5cd832ab0ec8563290fa13f2 Mon Sep 17 00:00:00 2001 From: Eric Schweitz Date: Wed, 29 Jan 2025 07:59:59 -0800 Subject: [PATCH] The QIR codegen is one of the oldest pieces of the project and really shows its age at this point. It was written before much of the rest of the core was implemented or even designed. These changes replace the existing QIR codegen for the C++ compiler. Unfortunately, there are a number of semantics violations from Python that need to be fixed before it can be switched over. Therefore, this PR leaves Python intact and using the old codegen passes. The purpose of these changes is two-fold. 1. Instead of converting all of the IR to the LLVM-IR dialect at the same time as converting to QIR, these two steps are bifurcated. This allows us to convert the quake dialect to other higher level operations first and before LLVM-IR dialect is introduced. This will be beneficial in short order... 2. Instead of piecemealing different flavors of QIR in completely ad hoc spaghetti plate passes, the flavor of QIR is specified as a mixin modifier class for a singular set of steps to convert to any flavor of QIR. This does mean that one will no longer be able to convert to the LLVM-IR dialect with QIR calls and then change their mind from chocolate QIR to strawberry QIR much later. Notes: - Remove the option to disable the qir profile preparation pass. This pass is not optional. The IR will be translated to an invalid state if the required function declarations are not created at all. - Make it clear that AggressiveEarlyInlining is a pipeline. Refactor the registration functions so that we're not block-copying things between the core and python (which was dropping things on the floor already). - Add a new pass, convert to QIR API. This pass will replace the cascade of passes to convert to full QIR and then convert some more to base profile or adaptive profile. - Refactor QIRFunctionNames.h. - Add a raft of declarations to the intrinsics table. This will dramatically reduce the amount of code in codegen and make maintenance much easier. - Add the analysis and prep pass. - Improve pipeline locality and performance. - Use the new code in the default code gen path for C++. - Workarounds for issue #2541 and issue #2539. Keep the old codegen for python. Too many bugs. - Update tests. Fix bugs in mock servers. Have python kernel builder add the cudaq-kernel attribute. (See issue 2541.) Signed-off-by: Eric Schweitz --- include/cudaq/Optimizer/Builder/Factory.h | 2 + include/cudaq/Optimizer/Builder/Intrinsics.h | 5 + include/cudaq/Optimizer/CodeGen/CodeGenOps.td | 18 + include/cudaq/Optimizer/CodeGen/Passes.h | 23 +- include/cudaq/Optimizer/CodeGen/Passes.td | 62 +- include/cudaq/Optimizer/CodeGen/Peephole.h | 1 + include/cudaq/Optimizer/CodeGen/Pipelines.h | 32 +- .../Optimizer/CodeGen/QIRAttributeNames.h | 2 + .../Optimizer/CodeGen/QIRFunctionNames.h | 39 +- .../Optimizer/CodeGen/QIROpaqueStructTypes.h | 94 + .../CodeGen/{QuakeToCC.h => QuakeToExecMgr.h} | 0 include/cudaq/Optimizer/InitAllPasses.h | 20 +- include/cudaq/Optimizer/Transforms/Passes.h | 2 +- lib/Optimizer/Builder/Factory.cpp | 16 + lib/Optimizer/Builder/Intrinsics.cpp | 165 ++ lib/Optimizer/CodeGen/CMakeLists.txt | 5 +- .../{ConvertToCC.cpp => ConvertToExecMgr.cpp} | 6 +- lib/Optimizer/CodeGen/ConvertToQIR.cpp | 1 + lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp | 1888 +++++++++++++++++ lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp | 15 +- lib/Optimizer/CodeGen/PassDetails.h | 2 + lib/Optimizer/CodeGen/Pipelines.cpp | 85 +- .../{QuakeToCC.cpp => QuakeToExecMgr.cpp} | 2 +- lib/Optimizer/CodeGen/QuakeToLLVM.cpp | 51 +- lib/Optimizer/CodeGen/VerifyNVQIRCalls.cpp | 9 +- lib/Optimizer/CodeGen/VerifyQIRProfile.cpp | 1 + .../CodeGen/WireSetsToProfileQIR.cpp | 13 +- lib/Optimizer/Dialect/CC/CCOps.cpp | 10 + .../Transforms/AggressiveEarlyInlining.cpp | 2 +- .../Transforms/GlobalizeArrayValues.cpp | 52 +- lib/Optimizer/Transforms/QuakeSynthesizer.cpp | 1 + python/cudaq/kernel/ast_bridge.py | 1 + python/cudaq/kernel/kernel_builder.py | 1 + .../cudaq/platform/py_alt_launch_kernel.cpp | 6 +- python/runtime/mlir/py_register_dialects.cpp | 14 +- python/tests/interop/test_interop.py | 4 +- python/tests/mlir/adjoint.py | 59 +- python/tests/mlir/ast_break.py | 2 +- python/tests/mlir/ast_compute_action.py | 2 +- python/tests/mlir/ast_conditionals.py | 28 +- python/tests/mlir/ast_continue.py | 2 +- python/tests/mlir/ast_control_kernel.py | 4 +- python/tests/mlir/ast_decrementing_range.py | 2 +- python/tests/mlir/ast_elif.py | 2 +- python/tests/mlir/ast_for_stdvec.py | 2 +- python/tests/mlir/ast_iterate_loop_init.py | 2 +- python/tests/mlir/ast_lambda_tuple_stmts.py | 2 +- python/tests/mlir/ast_list_comprehension.py | 2 +- python/tests/mlir/ast_list_init.py | 2 +- python/tests/mlir/ast_list_int.py | 6 +- python/tests/mlir/ast_qreg_slice.py | 2 +- python/tests/mlir/ast_veq_tuple_target.py | 2 +- python/tests/mlir/ast_while_loop.py | 4 +- python/tests/mlir/bool_var_scope.py | 2 +- python/tests/mlir/bug_1775.py | 2 +- python/tests/mlir/bug_1777.py | 2 +- python/tests/mlir/bug_1871.py | 6 +- python/tests/mlir/bug_1875.py | 2 +- python/tests/mlir/builderBug_332.py | 12 +- python/tests/mlir/call.py | 44 +- python/tests/mlir/conditional.py | 6 +- python/tests/mlir/control.py | 85 +- python/tests/mlir/control_toffoli.py | 2 +- python/tests/mlir/ctrl_gates.py | 12 +- python/tests/mlir/custom_op_builder.py | 2 +- python/tests/mlir/custom_operation.py | 4 +- .../mlir/fix_1130_bug_bad_arg_checking.py | 15 +- python/tests/mlir/float.py | 2 +- python/tests/mlir/ghz.py | 4 +- python/tests/mlir/list.py | 2 +- python/tests/mlir/measure.py | 4 +- python/tests/mlir/mixed_args.py | 2 +- python/tests/mlir/multi_qubit.py | 4 +- python/tests/mlir/multiple_floats.py | 2 +- python/tests/mlir/no_input.py | 2 +- python/tests/mlir/one_qubit.py | 4 +- python/tests/mlir/qalloc.py | 12 +- python/tests/mlir/qft.py | 2 +- python/tests/mlir/qreg_apply.py | 2 +- python/tests/mlir/qreg_iterable.py | 2 +- python/tests/mlir/quantum_type.py | 6 +- python/tests/mlir/rotation_gates.py | 4 +- python/tests/mlir/swap.py | 2 +- python/tests/mlir/tuple_assign.py | 2 +- python/utils/OpaqueArguments.h | 1 + runtime/common/RuntimeMLIRCommonImpl.h | 3 +- runtime/cudaq/builder/kernel_builder.cpp | 2 + runtime/nvqir/NVQIR.cpp | 64 +- targettests/execution/qir_string_labels.cpp | 37 +- test/AST-Quake/base_profile-1.cpp | 269 ++- test/AST-Quake/negated_control.cpp | 45 +- test/AST-Quake/pure_quantum_struct.cpp | 85 +- test/AST-Quake/qalloc_initialization.cpp | 404 +++- test/AST-Quake/to_qir.cpp | 68 +- test/NVQPP/struct_arg.cpp | 2 +- test/Translate/alloca_no_operand.qke | 82 +- test/Translate/base_profile-1.qke | 28 +- test/Translate/base_profile-2.qke | 11 +- test/Translate/base_profile-3.qke | 11 +- test/Translate/basic.qke | 100 +- test/Translate/const_array.qke | 14 +- test/Translate/custom_operation.qke | 41 +- test/Translate/emit-mlir.qke | 25 +- test/Translate/exp_pauli-1.qke | 35 +- test/Translate/exp_pauli-3.qke | 73 +- test/Translate/ghz.qke | 64 +- test/Translate/measure.qke | 24 +- test/Translate/qalloc_initfloat.qke | 3 +- test/Translate/qalloc_initialization.qke | 31 +- test/Translate/return_values.qke | 224 +- test/Translate/veq_or_qubit_control_args.qke | 43 +- tools/cudaq-qpud/RestServerMain.cpp | 1 + unittests/integration/builder_tester.cpp | 2 +- utils/mock_qpu/anyon/__init__.py | 2 +- utils/mock_qpu/braket/__init__.py | 2 +- utils/mock_qpu/ionq/__init__.py | 2 +- utils/mock_qpu/oqc/__init__.py | 2 +- utils/mock_qpu/quantinuum/__init__.py | 2 +- 118 files changed, 3705 insertions(+), 1126 deletions(-) create mode 100644 include/cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h rename include/cudaq/Optimizer/CodeGen/{QuakeToCC.h => QuakeToExecMgr.h} (100%) rename lib/Optimizer/CodeGen/{ConvertToCC.cpp => ConvertToExecMgr.cpp} (95%) create mode 100644 lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp rename lib/Optimizer/CodeGen/{QuakeToCC.cpp => QuakeToExecMgr.cpp} (99%) diff --git a/include/cudaq/Optimizer/Builder/Factory.h b/include/cudaq/Optimizer/Builder/Factory.h index d28e1c7411d..900736c4fec 100644 --- a/include/cudaq/Optimizer/Builder/Factory.h +++ b/include/cudaq/Optimizer/Builder/Factory.h @@ -186,6 +186,8 @@ std::optional maybeValueOfFloatConstant(mlir::Value v); /// \em{not} control dependent (other than on function entry). mlir::Value createLLVMTemporary(mlir::Location loc, mlir::OpBuilder &builder, mlir::Type type, std::size_t size = 1); +mlir::Value createTemporary(mlir::Location loc, mlir::OpBuilder &builder, + mlir::Type type, std::size_t size = 1); //===----------------------------------------------------------------------===// diff --git a/include/cudaq/Optimizer/Builder/Intrinsics.h b/include/cudaq/Optimizer/Builder/Intrinsics.h index e1d23a8e8df..d48a96dd89b 100644 --- a/include/cudaq/Optimizer/Builder/Intrinsics.h +++ b/include/cudaq/Optimizer/Builder/Intrinsics.h @@ -148,6 +148,11 @@ class IRBuilder : public mlir::OpBuilder { mlir::LogicalResult loadIntrinsic(mlir::ModuleOp module, llvm::StringRef name); + llvm::StringRef getIntrinsicText(llvm::StringRef name); + mlir::LogicalResult loadIntrinsicWithAliases(mlir::ModuleOp module, + llvm::StringRef name, + llvm::StringRef prefix); + std::string hashStringByContent(llvm::StringRef sref); /// Generates code that yields the size of any type that can be reified in diff --git a/include/cudaq/Optimizer/CodeGen/CodeGenOps.td b/include/cudaq/Optimizer/CodeGen/CodeGenOps.td index 25952dcc3d7..a977cc19221 100644 --- a/include/cudaq/Optimizer/CodeGen/CodeGenOps.td +++ b/include/cudaq/Optimizer/CodeGen/CodeGenOps.td @@ -47,4 +47,22 @@ def cgq_RAIIOp : CGQOp<"qmem_raii", [MemoryEffects<[MemAlloc, MemWrite]>]> { }]; } +def cgq_MaterializeConstantArrayOp : + CGQOp<"materialize_constant_array", [Pure]> { + let summary = "Macro to materialize a cc.const_array into memory."; + let description = [{ + This operation is the equivalent of creating some space in memory (such as + on the stack) and storing a cc.const_array value to that memory address. + This operation is needed for local rewrites, but is likely to be eliminated + if the constant array can be completely folded away. + }]; + + let arguments = (ins cc_ArrayType:$constArray); + let results = (outs cc_PointerType); + + let assemblyFormat = [{ + $constArray `:` functional-type(operands, results) attr-dict + }]; +} + #endif diff --git a/include/cudaq/Optimizer/CodeGen/Passes.h b/include/cudaq/Optimizer/CodeGen/Passes.h index c84bf17fcbe..8d0ec92bda5 100644 --- a/include/cudaq/Optimizer/CodeGen/Passes.h +++ b/include/cudaq/Optimizer/CodeGen/Passes.h @@ -32,17 +32,17 @@ namespace cudaq::opt { /// @param pm Pass Manager to add QIR passes to /// @param convertTo Expected to be `qir-base` or `qir-adaptive` (comes from the /// cudaq-translate command line `--convert-to` parameter) -/// @param performPrep Whether or not to perform the initial prep pass (normally -/// true, but false for the WireSet QIR path) -void addQIRProfilePipeline(mlir::OpPassManager &pm, llvm::StringRef convertTo, - bool performPrep = true); +/// \deprecated Replaced by the convert to QIR API pipeline. +void addQIRProfilePipeline(mlir::OpPassManager &pm, llvm::StringRef convertTo); + +void addQIRProfileVerify(mlir::OpPassManager &pm, llvm::StringRef convertTo); void addLowerToCCPipeline(mlir::OpPassManager &pm); void addWiresetToProfileQIRPipeline(mlir::OpPassManager &pm, llvm::StringRef profile); -/// @brief Verify that all `CallOp` targets are QIR- or NVQIR-defined functions -/// or in the provided allowed list. +/// Verify that all `CallOp` targets are QIR- or NVQIR-defined functions or in +/// the provided allowed list. std::unique_ptr createVerifyNVQIRCallOpsPass(const std::vector &allowedFuncs); @@ -61,7 +61,18 @@ void registerCodeGenDialect(mlir::DialectRegistry ®istry); mlir::LLVM::LLVMStructType lambdaAsPairOfPointers(mlir::MLIRContext *context); +/// The pipeline for lowering Quake code to the QIR API. There will be three +/// distinct flavors of QIR that can be generated with this pipeline. These +/// are `"qir"`, `"qir-base"`, and `"qir-adaptive"`. This pipeline should be run +/// before conversion to the LLVM-IR dialect. +void registerToQIRAPIPipeline(); +void addConvertToQIRAPIPipeline(mlir::OpPassManager &pm, mlir::StringRef api, + bool opaquePtr = false); + +/// The pipeline for lowering Quake code to the execution manager API. This +/// pipeline should be run before conversion to the LLVM-IR dialect. void registerToExecutionManagerCCPipeline(); + void registerWireSetToProfileQIRPipeline(); void populateCCTypeConversions(mlir::LLVMTypeConverter *converter); diff --git a/include/cudaq/Optimizer/CodeGen/Passes.td b/include/cudaq/Optimizer/CodeGen/Passes.td index d9a7c8380b6..0ff26168240 100644 --- a/include/cudaq/Optimizer/CodeGen/Passes.td +++ b/include/cudaq/Optimizer/CodeGen/Passes.td @@ -65,7 +65,10 @@ def ConvertToQIR : Pass<"quake-to-qir", "mlir::ModuleOp"> { def LowerToCG : Pass<"lower-to-cg", "mlir::ModuleOp"> { let summary = "Lower Quake to CG dialect."; let description = [{ - For testing purposes only. + Lower the Quake IR to the codegen dialect. The codegen dialect is used to + fuse small DAGs of IR into larger macro operations just prior to the final + codegen. This allows conversions to take place on the macro operations and + avoid some of the limitations of an MLIR conversion pass. }]; let dependentDialects = [ "cudaq::codegen::CodeGenDialect" ]; } @@ -212,4 +215,61 @@ def WireSetToProfileQIRPrep : let dependentDialects = ["cudaq::cc::CCDialect", "mlir::func::FuncDialect"]; } +def QuakeToQIRAPI : Pass<"quake-to-qir-api"> { + let summary = "Convert the Quake dialect to the QIR API."; + let description = [{ + This pass converts Quake operations to the QIR API as expressed in terms + of function calls to QIR functions. + + Which QIR functions are to be used is parameterized on the `api` option. + + This pass can lower to either use the obsolete opaque structure types (per + the QIR spec) or to use LLVM's currently supported opaque pointers. In the + latter case, type information is fully understood from the function names + themselves. + }]; + + let dependentDialects = ["cudaq::cc::CCDialect", "mlir::arith::ArithDialect", + "mlir::cf::ControlFlowDialect", "mlir::func::FuncDialect", + "mlir::LLVM::LLVMDialect"]; + + let options = [ + Option<"api", "api", "std::string", /*default=*/"\"full\"", + "Select the QIR API to use.">, + Option<"opaquePtr", "opaque-pointer", "bool", /*default=*/"false", + "Use opaque pointers."> + ]; +} + +def QuakeToQIRAPIFinal : Pass<"quake-to-qir-api-final", "mlir::ModuleOp"> { + let summary = "Convert the Quake dialect to the QIR API finalization."; + let description = [{ + }]; + + let dependentDialects = ["cudaq::cc::CCDialect", "mlir::arith::ArithDialect", + "mlir::cf::ControlFlowDialect", "mlir::func::FuncDialect", + "mlir::LLVM::LLVMDialect"]; + + let options = [ + Option<"api", "api", "std::string", /*default=*/"\"full\"", + "Select the QIR API to use."> + ]; +} + +def QuakeToQIRAPIPrep : Pass<"quake-to-qir-api-prep", "mlir::ModuleOp"> { + let summary = "Convert the Quake dialect to the QIR API preparation."; + let description = [{ + }]; + let dependentDialects = ["cudaq::cc::CCDialect", "mlir::arith::ArithDialect", + "mlir::cf::ControlFlowDialect", "mlir::func::FuncDialect", + "mlir::LLVM::LLVMDialect", "cudaq::codegen::CodeGenDialect"]; + let options = [ + Option<"api", "api", "std::string", /*default=*/"\"full\"", + "Select the QIR API to use.">, + Option<"opaquePtr", "opaque-pointer", "bool", /*default=*/"false", + "Use opaque pointers."> + ]; +} + + #endif // CUDAQ_OPT_OPTIMIZER_CODEGEN_PASSES diff --git a/include/cudaq/Optimizer/CodeGen/Peephole.h b/include/cudaq/Optimizer/CodeGen/Peephole.h index 22650c79b36..6b73ad39430 100644 --- a/include/cudaq/Optimizer/CodeGen/Peephole.h +++ b/include/cudaq/Optimizer/CodeGen/Peephole.h @@ -9,6 +9,7 @@ #pragma once #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" #include "llvm/ADT/StringRef.h" #include "mlir/IR/ValueRange.h" diff --git a/include/cudaq/Optimizer/CodeGen/Pipelines.h b/include/cudaq/Optimizer/CodeGen/Pipelines.h index e332195299d..afb39831baa 100644 --- a/include/cudaq/Optimizer/CodeGen/Pipelines.h +++ b/include/cudaq/Optimizer/CodeGen/Pipelines.h @@ -21,25 +21,41 @@ namespace cudaq::opt { -/// The common pipeline. -/// Adds the common pipeline (with or without a profile specifier) but without -/// the final QIR profile lowering passes. -void commonPipelineConvertToQIR( +/// Adds the common pipeline. \p codeGenFor specifies which variant of QIR is to +/// be generated: full, base-profile, adaptive-profile, etc. \p passConfigAs +/// specifies which variant of QIR to use with \e other passes, and not the +/// final `codegen`, in the pipeline. Typically, \p codeGenFor and \p +/// passConfigAs will have identical values. +void commonPipelineConvertToQIR(mlir::PassManager &pm, + mlir::StringRef codeGenFor = "qir", + mlir::StringRef passConfigAs = "qir"); + +/// \deprecated{Only for Python, since it can't use the new QIR codegen.} +void commonPipelineConvertToQIR_PythonWorkaround( mlir::PassManager &pm, const std::optional &convertTo); /// \brief Pipeline builder to convert Quake to QIR. /// Does not specify a particular QIR profile. inline void addPipelineConvertToQIR(mlir::PassManager &pm) { - commonPipelineConvertToQIR(pm, std::nullopt); + commonPipelineConvertToQIR(pm); +} + +/// \deprecated{Only for Python, since it can't use the new QIR codegen.} +inline void addPipelineConvertToQIR_PythonWorkaround(mlir::PassManager &pm) { + commonPipelineConvertToQIR_PythonWorkaround(pm, std::nullopt); } /// \brief Pipeline builder to convert Quake to QIR. /// Specifies a particular QIR profile in \p convertTo. /// \p pm Pass manager to append passes to /// \p convertTo name of QIR profile (e.g., `qir-base`, `qir-adaptive`, ...) -inline void addPipelineConvertToQIR(mlir::PassManager &pm, - mlir::StringRef convertTo) { - commonPipelineConvertToQIR(pm, convertTo); +void addPipelineConvertToQIR(mlir::PassManager &pm, mlir::StringRef convertTo); + +/// \deprecated{Only for Python, since it can't use the new QIR codegen.} +inline void +addPipelineConvertToQIR_PythonWorkaround(mlir::PassManager &pm, + mlir::StringRef convertTo) { + commonPipelineConvertToQIR_PythonWorkaround(pm, convertTo); addQIRProfilePipeline(pm, convertTo); } diff --git a/include/cudaq/Optimizer/CodeGen/QIRAttributeNames.h b/include/cudaq/Optimizer/CodeGen/QIRAttributeNames.h index e1bc4502eba..9a10e70d294 100644 --- a/include/cudaq/Optimizer/CodeGen/QIRAttributeNames.h +++ b/include/cudaq/Optimizer/CodeGen/QIRAttributeNames.h @@ -23,5 +23,7 @@ static constexpr const char QIRRequiredResultsAttrName[] = "requiredResults"; static constexpr const char QIRIrreversibleFlagName[] = "irreversible"; static constexpr const char StartingOffsetAttrName[] = "StartingOffset"; +static constexpr const char ResultIndexAttrName[] = "ResultIndex"; +static constexpr const char MzAssignedNameAttrName[] = "MzAssignedName"; } // namespace cudaq::opt diff --git a/include/cudaq/Optimizer/CodeGen/QIRFunctionNames.h b/include/cudaq/Optimizer/CodeGen/QIRFunctionNames.h index e25f0e08902..f04428f4ab1 100644 --- a/include/cudaq/Optimizer/CodeGen/QIRFunctionNames.h +++ b/include/cudaq/Optimizer/CodeGen/QIRFunctionNames.h @@ -11,8 +11,6 @@ /// This file provides some common QIR function names for use throughout our /// MLIR lowering infrastructure. -#include "mlir/Conversion/LLVMCommon/TypeConverter.h" - namespace cudaq::opt { /// QIS Function name strings @@ -21,14 +19,19 @@ static constexpr const char QIRMeasureBody[] = "__quantum__qis__mz__body"; static constexpr const char QIRMeasure[] = "__quantum__qis__mz"; static constexpr const char QIRMeasureToRegister[] = "__quantum__qis__mz__to__register"; +static constexpr const char QIRResetBody[] = "__quantum__qis__reset__body"; +static constexpr const char QIRReset[] = "__quantum__qis__reset"; -static constexpr const char QIRCnot[] = "__quantum__qis__cnot"; +static constexpr const char QIRCnot[] = "__quantum__qis__cnot__body"; static constexpr const char QIRCphase[] = "__quantum__qis__cphase"; -static constexpr const char QIRCZ[] = "__quantum__qis__cz"; +static constexpr const char QIRCZ[] = "__quantum__qis__cz__body"; static constexpr const char QIRReadResultBody[] = "__quantum__qis__read_result__body"; static constexpr const char QIRCustomOp[] = "__quantum__qis__custom_unitary"; +static constexpr const char QIRCustomAdjOp[] = + "__quantum__qis__custom_unitary__adj"; +static constexpr const char QIRExpPauli[] = "__quantum__qis__exp_pauli"; static constexpr const char NVQIRInvokeWithControlBits[] = "invokeWithControlQubits"; @@ -38,6 +41,8 @@ static constexpr const char NVQIRInvokeU3RotationWithControlBits[] = "invokeU3RotationWithControlQubits"; static constexpr const char NVQIRInvokeWithControlRegisterOrBits[] = "invokeWithControlRegisterOrQubits"; +static constexpr const char NVQIRGeneralizedInvokeAny[] = + "generalizedInvokeWithRotationsControlsTargets"; static constexpr const char NVQIRPackSingleQubitInArray[] = "packSingleQubitInArray"; static constexpr const char NVQIRReleasePackedQubitArray[] = @@ -89,30 +94,4 @@ static constexpr const char QIRRecordOutput[] = static constexpr const char QIRClearResultMaps[] = "__quantum__rt__clear_result_maps"; -inline mlir::Type getQuantumTypeByName(mlir::StringRef type, - mlir::MLIRContext *context) { - return mlir::LLVM::LLVMStructType::getOpaque(type, context); -} - -inline mlir::Type getQubitType(mlir::MLIRContext *context) { - return mlir::LLVM::LLVMPointerType::get( - getQuantumTypeByName("Qubit", context)); -} - -inline mlir::Type getArrayType(mlir::MLIRContext *context) { - return mlir::LLVM::LLVMPointerType::get( - getQuantumTypeByName("Array", context)); -} - -inline mlir::Type getResultType(mlir::MLIRContext *context) { - return mlir::LLVM::LLVMPointerType::get( - getQuantumTypeByName("Result", context)); -} - -inline mlir::Type getCharPointerType(mlir::MLIRContext *context) { - return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); -} - -void initializeTypeConversions(mlir::LLVMTypeConverter &typeConverter); - } // namespace cudaq::opt diff --git a/include/cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h b/include/cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h new file mode 100644 index 00000000000..95099f3bc8a --- /dev/null +++ b/include/cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h @@ -0,0 +1,94 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#pragma once + +/// This file provides the opaque struct types to be used with the obsolete LLVM +/// typed pointer type. + +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Dialect/LLVMIR/LLVMTypes.h" + +namespace cudaq { +inline mlir::Type getQuantumTypeByName(mlir::StringRef type, + mlir::MLIRContext *context) { + return mlir::LLVM::LLVMStructType::getOpaque(type, context); +} + +namespace opt { + +// The following type creators are deprecated and should only be used in the +// older codegen passes. Use the creators in the cg namespace immediately below +// instead. +inline mlir::Type getOpaquePointerType(mlir::MLIRContext *context) { + return mlir::LLVM::LLVMPointerType::get(context); +} + +inline mlir::Type getQubitType(mlir::MLIRContext *context) { + return mlir::LLVM::LLVMPointerType::get( + getQuantumTypeByName("Qubit", context)); +} + +inline mlir::Type getArrayType(mlir::MLIRContext *context) { + return mlir::LLVM::LLVMPointerType::get( + getQuantumTypeByName("Array", context)); +} + +inline mlir::Type getResultType(mlir::MLIRContext *context) { + return mlir::LLVM::LLVMPointerType::get( + getQuantumTypeByName("Result", context)); +} + +inline mlir::Type getCharPointerType(mlir::MLIRContext *context) { + return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); +} + +void initializeTypeConversions(mlir::LLVMTypeConverter &typeConverter); + +} // namespace opt + +namespace cg { + +// The following type creators replace the ones above. They are configurable on +// the fly to either use opaque structs or opaque pointers. The default is to +// use pointers to opaque structs, which is no longer supported in modern LLVM. + +inline mlir::Type getOpaquePointerType(mlir::MLIRContext *context) { + return cc::PointerType::get(mlir::NoneType::get(context)); +} + +inline mlir::Type getQubitType(mlir::MLIRContext *context, + bool useOpaquePtr = false) { + if (useOpaquePtr) + return getOpaquePointerType(context); + return cc::PointerType::get(getQuantumTypeByName("Qubit", context)); +} + +inline mlir::Type getArrayType(mlir::MLIRContext *context, + bool useOpaquePtr = false) { + if (useOpaquePtr) + return getOpaquePointerType(context); + return cc::PointerType::get(getQuantumTypeByName("Array", context)); +} + +inline mlir::Type getResultType(mlir::MLIRContext *context, + bool useOpaquePtr = false) { + if (useOpaquePtr) + return getOpaquePointerType(context); + return cc::PointerType::get(getQuantumTypeByName("Result", context)); +} + +inline mlir::Type getCharPointerType(mlir::MLIRContext *context, + bool useOpaquePtr = false) { + if (useOpaquePtr) + return getOpaquePointerType(context); + return cc::PointerType::get(mlir::IntegerType::get(context, 8)); +} + +} // namespace cg +} // namespace cudaq diff --git a/include/cudaq/Optimizer/CodeGen/QuakeToCC.h b/include/cudaq/Optimizer/CodeGen/QuakeToExecMgr.h similarity index 100% rename from include/cudaq/Optimizer/CodeGen/QuakeToCC.h rename to include/cudaq/Optimizer/CodeGen/QuakeToExecMgr.h diff --git a/include/cudaq/Optimizer/InitAllPasses.h b/include/cudaq/Optimizer/InitAllPasses.h index a55053bf0d1..cda83274ddf 100644 --- a/include/cudaq/Optimizer/InitAllPasses.h +++ b/include/cudaq/Optimizer/InitAllPasses.h @@ -14,21 +14,27 @@ namespace cudaq { -inline void registerAllPasses() { - // General MLIR passes - mlir::registerTransformsPasses(); - - // NVQPP passes +inline void registerCudaqPassesAndPipelines() { + // CUDA-Q passes opt::registerOptCodeGenPasses(); opt::registerOptTransformsPasses(); - opt::registerAggressiveEarlyInlining(); - // Pipelines + // CUDA-Q pipelines + opt::registerAggressiveEarlyInliningPipeline(); opt::registerUnrollingPipeline(); opt::registerToExecutionManagerCCPipeline(); + opt::registerToQIRAPIPipeline(); opt::registerTargetPipelines(); opt::registerWireSetToProfileQIRPipeline(); opt::registerMappingPipeline(); } +inline void registerAllPasses() { + // General MLIR passes + mlir::registerTransformsPasses(); + + // All the CUDA-Q passes and pipelines. + registerCudaqPassesAndPipelines(); +} + } // namespace cudaq diff --git a/include/cudaq/Optimizer/Transforms/Passes.h b/include/cudaq/Optimizer/Transforms/Passes.h index c3e47a422ce..4bfddf6101d 100644 --- a/include/cudaq/Optimizer/Transforms/Passes.h +++ b/include/cudaq/Optimizer/Transforms/Passes.h @@ -22,7 +22,7 @@ namespace cudaq::opt { /// do not go through the runtime layers, inline all calls, and detect if calls /// to kernels remain in the fully inlined into entry point kernel. void addAggressiveEarlyInlining(mlir::OpPassManager &pm); -void registerAggressiveEarlyInlining(); +void registerAggressiveEarlyInliningPipeline(); void registerUnrollingPipeline(); void registerMappingPipeline(); diff --git a/lib/Optimizer/Builder/Factory.cpp b/lib/Optimizer/Builder/Factory.cpp index 0f98162b83a..9ed2eab4c22 100644 --- a/lib/Optimizer/Builder/Factory.cpp +++ b/lib/Optimizer/Builder/Factory.cpp @@ -9,6 +9,7 @@ #include "cudaq/Optimizer/Builder/Intrinsics.h" #include "cudaq/Optimizer/Builder/Runtime.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/Dialect/CC/CCOps.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" #include "llvm/TargetParser/Host.h" @@ -249,6 +250,21 @@ Value factory::createLLVMTemporary(Location loc, OpBuilder &builder, Type type, return builder.create(loc, type, ArrayRef{len}); } +Value factory::createTemporary(Location loc, OpBuilder &builder, Type type, + std::size_t size) { + Operation *op = builder.getBlock()->getParentOp(); + auto func = dyn_cast(op); + if (!func) + func = op->getParentOfType(); + assert(func && "must be in a function"); + auto *entryBlock = &func.getRegion().front(); + assert(entryBlock && "function must have an entry block"); + OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToStart(entryBlock); + Value len = builder.create(loc, size, 64); + return builder.create(loc, type, len); +} + // This builder will transform the monotonic loop into an invariant loop during // construction. This is meant to save some time in loop analysis and // normalization, which would perform a similar transformation. diff --git a/lib/Optimizer/Builder/Intrinsics.cpp b/lib/Optimizer/Builder/Intrinsics.cpp index 96227b7fc00..57b8fe0b1bb 100644 --- a/lib/Optimizer/Builder/Intrinsics.cpp +++ b/lib/Optimizer/Builder/Intrinsics.cpp @@ -390,6 +390,135 @@ static constexpr IntrinsicCode intrinsicTable[] = { {"malloc", {}, "func.func private @malloc(i64) -> !cc.ptr"}, + // Declarations of QIR functions used by codegen that are common to all + // subtargets (full, base profle, or adaptive profile). + // These include qubit allocation and management, control variants of the + // gates, some one offs, and control form invocation helper routines. + {"qir_common", + {}, + R"#( + func.func private @__quantum__rt__qubit_allocate() -> !qir_qubit + func.func private @__quantum__rt__qubit_allocate_array(i64) -> !qir_array + func.func private @__quantum__rt__qubit_allocate_array_with_state_fp64(i64, !cc.ptr) -> !qir_array + func.func private @__quantum__rt__qubit_allocate_array_with_state_fp32(i64, !cc.ptr) -> !qir_array + func.func private @__quantum__rt__qubit_allocate_array_with_state_complex64(i64, !cc.ptr>) -> !qir_array + func.func private @__quantum__rt__qubit_allocate_array_with_state_complex32(i64, !cc.ptr>) -> !qir_array + func.func private @__quantum__rt__qubit_allocate_array_with_state_ptr(!cc.ptr) -> !qir_array + func.func private @__quantum__rt__qubit_allocate_array_with_cudaq_state_ptr(i64, !cc.ptr) -> !qir_array + + func.func private @__quantum__rt__qubit_release_array(!qir_array) + func.func private @__quantum__rt__qubit_release(!qir_qubit) + + func.func private @__quantum__rt__array_create_1d(i32, i64) -> !qir_array + func.func private @__quantum__rt__array_concatenate(!qir_array, !qir_array) -> !qir_array + func.func private @__quantum__rt__array_get_size_1d(!qir_array) -> i64 + func.func private @__quantum__rt__array_slice(!qir_array, i32, i64, i64, i64) -> !qir_array + func.func private @__quantum__rt__array_get_element_ptr_1d(!qir_array, i64) -> !cc.ptr + + func.func private @__quantum__qis__h__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__x__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__y__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__z__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__s__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__t__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__sdg__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__tdg__ctl(!qir_array, !qir_qubit) + func.func private @__quantum__qis__u3__ctl(f64, f64, f64, !qir_array, !qir_qubit) + func.func private @__quantum__qis__swap__ctl(!qir_array, !qir_qubit, !qir_qubit) + func.func private @__quantum__qis__rx__ctl(f64, !qir_array, !qir_qubit) + func.func private @__quantum__qis__ry__ctl(f64, !qir_array, !qir_qubit) + func.func private @__quantum__qis__rz__ctl(f64, !qir_array, !qir_qubit) + func.func private @__quantum__qis__r1__ctl(f64, !qir_array, !qir_qubit) + + func.func private @__quantum__qis__exp_pauli(f64, !qir_array, !qir_charptr) + func.func private @__quantum__qis__custom_unitary(!cc.ptr>, !qir_array, !qir_array, !qir_charptr) + func.func private @__quantum__qis__custom_unitary__adj(!cc.ptr>, !qir_array, !qir_array, !qir_charptr) + + llvm.func @generalizedInvokeWithRotationsControlsTargets(i64, i64, i64, i64, !qir_llvmptr, ...) attributes {sym_visibility = "private"} +)#"}, + + // Declarations for base and adaptive profile QIR functions used by codegen. + // These include gates, adjoint gates, one offs, and dealing with + // measurement results. + {"qir_common_profile", + {"qir_common"}, + R"#( + func.func private @__quantum__qis__h__body(!qir_qubit) + func.func private @__quantum__qis__x__body(!qir_qubit) + func.func private @__quantum__qis__y__body(!qir_qubit) + func.func private @__quantum__qis__z__body(!qir_qubit) + func.func private @__quantum__qis__s__body(!qir_qubit) + func.func private @__quantum__qis__t__body(!qir_qubit) + func.func private @__quantum__qis__sdg__body(!qir_qubit) + func.func private @__quantum__qis__tdg__body(!qir_qubit) + func.func private @__quantum__qis__u3__body(f64, f64, f64, !qir_qubit) + func.func private @__quantum__qis__reset__body(!qir_qubit) + func.func private @__quantum__qis__mz__body(!qir_qubit, !qir_result) attributes {passthrough = ["irreversible"]} + func.func private @__quantum__qis__swap__body(!qir_qubit, !qir_qubit) + func.func private @__quantum__qis__rx__body(f64, !qir_qubit) + func.func private @__quantum__qis__phased_rx__body(f64, f64, !qir_qubit) + func.func private @__quantum__qis__ry__body(f64, !qir_qubit) + func.func private @__quantum__qis__rz__body(f64, !qir_qubit) + func.func private @__quantum__qis__r1__body(f64, !qir_qubit) + + func.func private @__quantum__rt__result_record_output(!qir_result, !qir_charptr) + func.func private @__quantum__qis__cnot__body(!qir_qubit, !qir_qubit) + func.func private @__quantum__qis__cz__body(!qir_qubit, !qir_qubit) + func.func private @__quantum__qis__read_result__body(!qir_result) -> i1 + )#"}, + + // Declarations of all full QIR functions used by codegen. + // These include gates (sans the "__body" suffix) and measurements. + {"qir_full", + {"qir_common"}, + R"#( + func.func private @__quantum__qis__h(!qir_qubit) + func.func private @__quantum__qis__x(!qir_qubit) + func.func private @__quantum__qis__y(!qir_qubit) + func.func private @__quantum__qis__z(!qir_qubit) + func.func private @__quantum__qis__s(!qir_qubit) + func.func private @__quantum__qis__t(!qir_qubit) + func.func private @__quantum__qis__sdg(!qir_qubit) + func.func private @__quantum__qis__tdg(!qir_qubit) + func.func private @__quantum__qis__u3(f64, f64, f64, !qir_qubit) + func.func private @__quantum__qis__reset(!qir_qubit) + func.func private @__quantum__qis__mz(!qir_qubit) -> !qir_result + func.func private @__quantum__qis__mz__to__register(!qir_qubit, !qir_charptr) -> !qir_result + func.func private @__quantum__qis__swap(!qir_qubit, !qir_qubit) + func.func private @__quantum__qis__rx(f64, !qir_qubit) + func.func private @__quantum__qis__phased_rx(f64, f64, !qir_qubit) + func.func private @__quantum__qis__ry(f64, !qir_qubit) + func.func private @__quantum__qis__rz(f64, !qir_qubit) + func.func private @__quantum__qis__r1(f64, !qir_qubit) + )#"}, + + // Choose one of the two QIR typing conventions. Opaque pointers are the + // current LLVM standard. Opaque struct is from an obsolete LLVM version, + // but used by the QIR specification. + + // Use opaque pointers (LLVM's `ptr` type). The type of the referent is + // always implicit and unambiguous from its usage. At the moment, this is + // using i8* instead of ptr, since the latter requires some other changes. + {"qir_opaque_pointer", + {}, + R"#( + !qir_array = !cc.ptr + !qir_qubit = !cc.ptr + !qir_result = !cc.ptr + !qir_charptr = !cc.ptr + !qir_llvmptr = !llvm.ptr + )#"}, + // Use the obsolete LLVM opaque struct type. + {"qir_opaque_struct", + {}, + R"#( + !qir_array = !cc.ptr> + !qir_qubit = !cc.ptr> + !qir_result = !cc.ptr> + !qir_charptr = !cc.ptr + !qir_llvmptr = !llvm.ptr + )#"}, + // streamlinedLaunchKernel(kernelName, vectorArgPtrs) {cudaq::runtime::launchKernelStreamlinedFuncName, {}, @@ -476,6 +605,42 @@ LogicalResult IRBuilder::loadIntrinsic(ModuleOp module, StringRef intrinName) { ParserConfig{module.getContext(), /*verifyAfterParse=*/false}); } +StringRef IRBuilder::getIntrinsicText(StringRef intrinName) { + auto iter = std::lower_bound(&intrinsicTable[0], + &intrinsicTable[intrinsicTableSize], intrinName); + if (iter == &intrinsicTable[intrinsicTableSize]) + return ""; + return iter->code; +} + +LogicalResult IRBuilder::loadIntrinsicWithAliases(ModuleOp module, + StringRef intrinName, + StringRef prefix) { + // Check if this intrinsic was already loaded. + if (module.lookupSymbol(intrinName)) + return success(); + assert(intrinsicTableIsSorted() && "intrinsic table must be sorted"); + auto iter = std::lower_bound(&intrinsicTable[0], + &intrinsicTable[intrinsicTableSize], intrinName); + if (iter == &intrinsicTable[intrinsicTableSize]) { + module.emitError(std::string("intrinsic") + intrinName + " not in table."); + return failure(); + } + assert(iter->name == intrinName); + // First load the prereqs. + for (std::size_t i = 0; i < DefaultPrerequisiteSize; ++i) { + if (iter->preReqs[i].empty()) + break; + if (failed(loadIntrinsicWithAliases(module, iter->preReqs[i], prefix))) + return failure(); + } + // Now load the requested code. + std::string code = prefix.str() + std::string(iter->code); + return parseSourceString( + code, module.getBody(), + ParserConfig{module.getContext(), /*verifyAfterParse=*/false}); +} + template DenseElementsAttr createDenseElementsAttr(const SmallVectorImpl &values, Type eleTy) { diff --git a/lib/Optimizer/CodeGen/CMakeLists.txt b/lib/Optimizer/CodeGen/CMakeLists.txt index 991180b51da..e7e0808ad25 100644 --- a/lib/Optimizer/CodeGen/CMakeLists.txt +++ b/lib/Optimizer/CodeGen/CMakeLists.txt @@ -16,13 +16,14 @@ add_cudaq_library(OptCodeGen CodeGenOps.cpp CodeGenTypes.cpp ConvertCCToLLVM.cpp - ConvertToCC.cpp + ConvertToExecMgr.cpp ConvertToQIRProfile.cpp ConvertToQIR.cpp + ConvertToQIRAPI.cpp Passes.cpp Pipelines.cpp - QuakeToCC.cpp QuakeToCodegen.cpp + QuakeToExecMgr.cpp QuakeToLLVM.cpp RemoveMeasurements.cpp TranslateToIQMJson.cpp diff --git a/lib/Optimizer/CodeGen/ConvertToCC.cpp b/lib/Optimizer/CodeGen/ConvertToExecMgr.cpp similarity index 95% rename from lib/Optimizer/CodeGen/ConvertToCC.cpp rename to lib/Optimizer/CodeGen/ConvertToExecMgr.cpp index 81e956df684..041f57b1d6d 100644 --- a/lib/Optimizer/CodeGen/ConvertToCC.cpp +++ b/lib/Optimizer/CodeGen/ConvertToExecMgr.cpp @@ -11,10 +11,11 @@ #include "cudaq/Optimizer/CodeGen/CudaqFunctionNames.h" #include "cudaq/Optimizer/CodeGen/Passes.h" #include "cudaq/Optimizer/CodeGen/Pipelines.h" -#include "cudaq/Optimizer/CodeGen/QuakeToCC.h" +#include "cudaq/Optimizer/CodeGen/QuakeToExecMgr.h" #include "cudaq/Optimizer/Dialect/CC/CCTypes.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" #include "llvm/Support/Debug.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Pass/PassManager.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" @@ -65,7 +66,8 @@ struct QuakeToCCPass : public cudaq::opt::impl::QuakeToCCBase { cudaq::opt::populateQuakeToCCPatterns(quakeTypeConverter, patterns); ConversionTarget target(*context); target.addLegalDialect(); + cf::ControlFlowDialect, func::FuncDialect, + LLVM::LLVMDialect>(); target.addIllegalDialect(); LLVM_DEBUG(llvm::dbgs() << "Module before:\n"; op.dump()); diff --git a/lib/Optimizer/CodeGen/ConvertToQIR.cpp b/lib/Optimizer/CodeGen/ConvertToQIR.cpp index 39e67550a8e..52b82ecf2a2 100644 --- a/lib/Optimizer/CodeGen/ConvertToQIR.cpp +++ b/lib/Optimizer/CodeGen/ConvertToQIR.cpp @@ -14,6 +14,7 @@ #include "cudaq/Optimizer/CodeGen/Passes.h" #include "cudaq/Optimizer/CodeGen/Peephole.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/CodeGen/QuakeToLLVM.h" #include "cudaq/Optimizer/Dialect/CC/CCOps.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" diff --git a/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp b/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp new file mode 100644 index 00000000000..f267655cf74 --- /dev/null +++ b/lib/Optimizer/CodeGen/ConvertToQIRAPI.cpp @@ -0,0 +1,1888 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "CodeGenOps.h" +#include "cudaq/Optimizer/Builder/Intrinsics.h" +#include "cudaq/Optimizer/Builder/Runtime.h" +#include "cudaq/Optimizer/CodeGen/CodeGenDialect.h" +#include "cudaq/Optimizer/CodeGen/Passes.h" +#include "cudaq/Optimizer/CodeGen/Pipelines.h" +#include "cudaq/Optimizer/CodeGen/QIRAttributeNames.h" +#include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" +#include "cudaq/Optimizer/CodeGen/QuakeToExecMgr.h" +#include "cudaq/Optimizer/Dialect/CC/CCDialect.h" +#include "cudaq/Optimizer/Dialect/CC/CCOps.h" +#include "cudaq/Optimizer/Dialect/Quake/QuakeDialect.h" +#include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" +#include "nlohmann/json.hpp" +#include "llvm/Support/Debug.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Pass/PassOptions.h" +#include "mlir/Transforms/DialectConversion.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +#define DEBUG_TYPE "convert-to-qir-api" + +namespace cudaq::opt { +#define GEN_PASS_DEF_QUAKETOQIRAPI +#define GEN_PASS_DEF_QUAKETOQIRAPIPREP +#define GEN_PASS_DEF_QUAKETOQIRAPIFINAL +#include "cudaq/Optimizer/CodeGen/Passes.h.inc" +} // namespace cudaq::opt + +using namespace mlir; + +//===----------------------------------------------------------------------===// + +static std::string getGateName(Operation *op) { + return op->getName().stripDialect().str(); +} + +static std::string getGateFunctionPrefix(Operation *op) { + return cudaq::opt::QIRQISPrefix + getGateName(op); +} + +constexpr std::array filterAdjointNames = {"s", "t"}; + +template +std::pair generateGateFunctionName(OP op) { + auto prefix = getGateFunctionPrefix(op.getOperation()); + auto gateName = getGateName(op.getOperation()); + if (op.isAdj()) { + if (std::find(filterAdjointNames.begin(), filterAdjointNames.end(), + gateName) != filterAdjointNames.end()) + prefix += "dg"; + } + if (!op.getControls().empty()) + return {prefix + "__ctl", false}; + return {prefix, true}; +} + +static Value createGlobalCString(Operation *op, Location loc, + ConversionPatternRewriter &rewriter, + StringRef regName) { + cudaq::IRBuilder irb(rewriter.getContext()); + auto mod = op->getParentOfType(); + auto nameObj = irb.genCStringLiteralAppendNul(loc, mod, regName); + Value nameVal = rewriter.create( + loc, cudaq::cc::PointerType::get(nameObj.getType()), nameObj.getName()); + auto cstrTy = cudaq::cc::PointerType::get(rewriter.getI8Type()); + return rewriter.create(loc, cstrTy, nameVal); +} + +/// Use modifier class classes to specialize the QIR API to a particular flavor +/// of QIR. For example, the names of the actual functions in "full QIR" are +/// different than the names used by the other API flavors. +namespace { + +//===----------------------------------------------------------------------===// +// Type converter +//===----------------------------------------------------------------------===// + +/// Type converter for converting quake dialect to one of the QIR APIs. This +/// class is used for conversions as well as instantiating QIR types in +/// conversion patterns. + +struct QIRAPITypeConverter : public TypeConverter { + using TypeConverter::convertType; + + QIRAPITypeConverter(bool useOpaque) : useOpaque(useOpaque) { + addConversion([&](Type ty) { return ty; }); + addConversion([&](FunctionType ft) { return convertFunctionType(ft); }); + addConversion([&](cudaq::cc::PointerType ty) { + return cudaq::cc::PointerType::get(convertType(ty.getElementType())); + }); + addConversion([&](cudaq::cc::CallableType ty) { + auto newSig = cast(convertType(ty.getSignature())); + return cudaq::cc::CallableType::get(newSig); + }); + addConversion([&](cudaq::cc::IndirectCallableType ty) { + auto newSig = cast(convertType(ty.getSignature())); + return cudaq::cc::IndirectCallableType::get(newSig); + }); + addConversion( + [&](quake::VeqType ty) { return getArrayType(ty.getContext()); }); + addConversion( + [&](quake::RefType ty) { return getQubitType(ty.getContext()); }); + addConversion( + [&](quake::WireType ty) { return getQubitType(ty.getContext()); }); + addConversion( + [&](quake::ControlType ty) { return getQubitType(ty.getContext()); }); + addConversion( + [&](quake::MeasureType ty) { return getResultType(ty.getContext()); }); + addConversion([&](quake::StruqType ty) { return convertStruqType(ty); }); + } + + Type convertFunctionType(FunctionType ty) { + SmallVector args; + if (failed(convertTypes(ty.getInputs(), args))) + return {}; + SmallVector res; + if (failed(convertTypes(ty.getResults(), res))) + return {}; + return FunctionType::get(ty.getContext(), args, res); + } + + Type convertStruqType(quake::StruqType ty) { + SmallVector mems; + mems.reserve(ty.getNumMembers()); + if (failed(convertTypes(ty.getMembers(), mems))) + return {}; + return cudaq::cc::StructType::get(ty.getContext(), mems); + } + + Type getQubitType(MLIRContext *ctx) { + return cudaq::cg::getQubitType(ctx, useOpaque); + } + Type getArrayType(MLIRContext *ctx) { + return cudaq::cg::getArrayType(ctx, useOpaque); + } + Type getResultType(MLIRContext *ctx) { + return cudaq::cg::getResultType(ctx, useOpaque); + } + + bool useOpaque; +}; +} // namespace + +namespace { + +//===----------------------------------------------------------------------===// +// Conversion patterns +//===----------------------------------------------------------------------===// + +template +struct AllocaOpToCallsRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::AllocaOp alloc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // If this alloc is just returning a qubit + if (auto resultType = + dyn_cast_if_present(alloc.getType())) { + + // StringRef qirQubitAllocate = cudaq::opt::QIRQubitAllocate; + StringRef qirQubitAllocate = cudaq::opt::QIRQubitAllocate; + Type qubitTy = M::getQubitType(rewriter.getContext()); + + rewriter.replaceOpWithNewOp(alloc, TypeRange{qubitTy}, + qirQubitAllocate, ValueRange{}); + return success(); + } + + // Create a QIR call to allocate the qubits. + StringRef qirQubitArrayAllocate = cudaq::opt::QIRArrayQubitAllocateArray; + Type arrayQubitTy = M::getArrayType(rewriter.getContext()); + + // AllocaOp could have a size operand, or the size could be compile time + // known and encoded in the veq return type. + Value sizeOperand; + auto loc = alloc.getLoc(); + if (adaptor.getOperands().empty()) { + auto type = alloc.getType().cast(); + if (!type.hasSpecifiedSize()) + return failure(); + auto constantSize = type.getSize(); + sizeOperand = + rewriter.create(loc, constantSize, 64); + } else { + sizeOperand = adaptor.getOperands().front(); + auto sizeOpTy = cast(sizeOperand.getType()); + if (sizeOpTy.getWidth() < 64) + sizeOperand = rewriter.create( + loc, rewriter.getI64Type(), sizeOperand, + cudaq::cc::CastOpMode::Unsigned); + else if (sizeOpTy.getWidth() > 64) + sizeOperand = rewriter.create( + loc, rewriter.getI64Type(), sizeOperand); + } + + // Replace the AllocaOp with the QIR call. + rewriter.replaceOpWithNewOp(alloc, TypeRange{arrayQubitTy}, + qirQubitArrayAllocate, + ValueRange{sizeOperand}); + return success(); + } +}; + +template +struct AllocaOpToIntRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + // Precondition: every allocation must have been annotated with a starting + // index by the preparation phase. + LogicalResult + matchAndRewrite(quake::AllocaOp alloc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if (!alloc->hasAttr(cudaq::opt::StartingOffsetAttrName)) + return alloc.emitOpError("allocation must be annotated."); + + auto loc = alloc.getLoc(); + // If this alloc is just returning a qubit, so just replace it with the + // attribute. + Type ty = alloc.getType(); + if (!ty) + return alloc.emitOpError("quake alloca is malformed"); + auto startingOffsetAttr = + alloc->getAttr(cudaq::opt::StartingOffsetAttrName); + auto startingOffset = cast(startingOffsetAttr).getInt(); + + // In the case this is allocating a single qubit, we can just substitute + // the startingIndex as the qubit value. Voila! + if (auto resultType = dyn_cast(ty)) { + Value index = + rewriter.create(loc, startingOffset, 64); + auto qubitTy = M::getQubitType(rewriter.getContext()); + rewriter.replaceOpWithNewOp(alloc, qubitTy, index); + return success(); + } + + auto veqTy = dyn_cast(ty); + if (!veqTy) + return alloc.emitOpError("quake alloca must be a veq"); + if (!veqTy.hasSpecifiedSize()) + return alloc.emitOpError("quake alloca must be a veq with constant size"); + + // Otherwise, the allocation is of a sequence of qubits. Here, we allocate a + // constant array value with the qubit integral values in an ascending + // sequence. These will be accessed by extract_value or used collectively. + auto *ctx = rewriter.getContext(); + const std::int64_t veqSize = veqTy.getSize(); + auto arrTy = cudaq::cc::ArrayType::get(ctx, rewriter.getI64Type(), veqSize); + SmallVector data; + for (std::int64_t i = 0; i < veqSize; ++i) + data.emplace_back(startingOffset + i); + auto arr = rewriter.create( + loc, arrTy, rewriter.getI64ArrayAttr(data)); + Type qirArrTy = M::getArrayType(rewriter.getContext()); + rewriter.replaceOpWithNewOp( + alloc, qirArrTy, arr); + return success(); + } +}; + +struct MaterializeConstantArrayOpRewrite + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + // Rewrite this operation into a stack allocation and storing the array value + // to that stack slot. + // TODO: it is more efficient to use a global constant, which is done by the + // pass `globalize-array-values`. + LogicalResult + matchAndRewrite(cudaq::codegen::MaterializeConstantArrayOp mca, + OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = mca.getLoc(); + auto arr = adaptor.getConstArray(); + auto veqSize = cast(arr.getType()).getSize(); + Value stackObj = cudaq::opt::factory::createTemporary( + loc, rewriter, rewriter.getI64Type(), veqSize); + rewriter.create(loc, arr, stackObj); + auto ty = mca.getType(); + rewriter.replaceOpWithNewOp(mca, ty, stackObj); + return success(); + } +}; + +template +struct QubitHelperConversionPattern : public OpConversionPattern { + using Base = OpConversionPattern; + using Base::Base; + + Value wrapQubitAsArray(Location loc, ConversionPatternRewriter &rewriter, + Value val) const { + Type qubitTy = M::getQubitType(rewriter.getContext()); + if (val.getType() != qubitTy) + return val; + + // Create a QIR array container of 1 element. + auto ptrTy = cudaq::cc::PointerType::get(rewriter.getNoneType()); + Value sizeofPtrVal = + rewriter.create(loc, rewriter.getI32Type(), ptrTy); + Value one = rewriter.create(loc, 1, 64); + Type arrayTy = M::getArrayType(rewriter.getContext()); + auto newArr = rewriter.create( + loc, TypeRange{arrayTy}, cudaq::opt::QIRArrayCreateArray, + ArrayRef{sizeofPtrVal, one}); + Value result = newArr.getResult(0); + + // Get a pointer to element 0. + Value zero = rewriter.create(loc, 0, 64); + auto ptrQubitTy = cudaq::cc::PointerType::get(qubitTy); + auto elePtr = rewriter.create( + loc, TypeRange{ptrQubitTy}, cudaq::opt::QIRArrayGetElementPtr1d, + ArrayRef{result, zero}); + + // Write the qubit into the array at position 0. + auto castVal = rewriter.create(loc, qubitTy, val); + Value addr = elePtr.getResult(0); + rewriter.create(loc, castVal, addr); + + return result; + } +}; + +template +struct ConcatOpRewrite + : public QubitHelperConversionPattern { + using Base = QubitHelperConversionPattern; + using Base::Base; + + // For this rewrite, we walk the list of operands (if any) and for each + // operand, $o$, we ensure $o$ is already of type QIR array or convert $o$ to + // the array type using QIR functions. Then, we walk the list and pairwise + // concatenate each operand. First, take $c$ to be $o_0$ and then update $c$ + // to be the concat of the previous $c$ and $o_i \forall i \in \{ 1..N \}$. + // This algorithm will generate a linear number of concat calls for the number + // of operands. + LogicalResult + matchAndRewrite(quake::ConcatOp concat, Base::OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if (adaptor.getOperands().empty()) { + rewriter.eraseOp(concat); + return success(); + } + + auto loc = concat.getLoc(); + Type arrayTy = M::getArrayType(rewriter.getContext()); + Value firstOperand = adaptor.getOperands().front(); + Value resultArray = Base::wrapQubitAsArray(loc, rewriter, firstOperand); + for (auto next : adaptor.getOperands().drop_front()) { + Value wrapNext = Base::wrapQubitAsArray(loc, rewriter, next); + auto appended = rewriter.create( + loc, arrayTy, cudaq::opt::QIRArrayConcatArray, + ArrayRef{resultArray, wrapNext}); + resultArray = appended.getResult(0); + } + rewriter.replaceOp(concat, resultArray); + return success(); + } +}; + +struct DeallocOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::DeallocOp dealloc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto ty = dealloc.getReference().getType(); + StringRef qirFuncName = isa(ty) + ? cudaq::opt::QIRArrayQubitReleaseArray + : cudaq::opt::QIRArrayQubitReleaseQubit; + rewriter.replaceOpWithNewOp(dealloc, TypeRange{}, qirFuncName, + adaptor.getReference()); + return success(); + } +}; + +struct DeallocOpErase : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::DeallocOp dealloc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.eraseOp(dealloc); + return success(); + } +}; + +struct DiscriminateOpRewrite + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::DiscriminateOp disc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = disc.getLoc(); + Value m = adaptor.getMeasurement(); + auto i1PtrTy = cudaq::cc::PointerType::get(rewriter.getI1Type()); + auto cast = rewriter.create(loc, i1PtrTy, m); + rewriter.replaceOpWithNewOp(disc, cast); + return success(); + } +}; + +template +struct DiscriminateOpToCallRewrite + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::DiscriminateOp disc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if constexpr (M::discriminateToClassical) { + rewriter.replaceOpWithNewOp(disc, rewriter.getI1Type(), + cudaq::opt::QIRReadResultBody, + adaptor.getOperands()); + } else { + rewriter.replaceOpWithNewOp(disc, + rewriter.getI1Type()); + } + return success(); + } +}; + +template +struct ExtractRefOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + // There are two cases depending on which flavor of QIR is being generated. + // For full QIR, we need to generate calls to QIR functions to select the + // qubit from a QIR array. + // For the profile QIRs, we replace this with a `cc.extract_value` operation, + // which will be canonicalized into a constant. + LogicalResult + matchAndRewrite(quake::ExtractRefOp extract, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = extract.getLoc(); + auto veq = adaptor.getVeq(); + auto i64Ty = rewriter.getI64Type(); + + Value index; + if (!adaptor.getIndex()) { + index = rewriter.create( + loc, extract.getConstantIndex(), 64); + } else { + index = adaptor.getIndex(); + if (index.getType().isIntOrFloat()) { + if (cast(index.getType()).getWidth() < 64) + index = rewriter.create( + loc, i64Ty, index, cudaq::cc::CastOpMode::Unsigned); + else if (cast(index.getType()).getWidth() > 64) + index = rewriter.create(loc, i64Ty, index); + } + } + auto qubitTy = M::getQubitType(rewriter.getContext()); + + if (auto mca = + veq.getDefiningOp()) { + // This is the profile QIR case. + auto ext = rewriter.create( + loc, i64Ty, mca.getConstArray(), index); + rewriter.replaceOpWithNewOp(extract, qubitTy, ext); + return success(); + } + + // Otherwise, this must be full QIR. + auto call = rewriter.create( + loc, cudaq::cc::PointerType::get(qubitTy), + cudaq::opt::QIRArrayGetElementPtr1d, ArrayRef{veq, index}); + rewriter.replaceOpWithNewOp(extract, call.getResult(0)); + return success(); + } +}; + +struct GetMemberOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::GetMemberOp member, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto toTy = getTypeConverter()->convertType(member.getType()); + std::int32_t position = adaptor.getIndex(); + rewriter.replaceOpWithNewOp( + member, toTy, adaptor.getStruq(), position); + return success(); + } +}; + +struct VeqSizeOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::VeqSizeOp veqsize, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + veqsize, TypeRange{veqsize.getType()}, cudaq::opt::QIRArrayGetSize, + adaptor.getOperands()); + return success(); + } +}; + +struct MakeStruqOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::MakeStruqOp mkstruq, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = mkstruq.getLoc(); + auto *ctx = rewriter.getContext(); + auto toTy = getTypeConverter()->convertType(mkstruq.getType()); + Value result = rewriter.create(loc, toTy); + std::int64_t count = 0; + for (auto op : adaptor.getOperands()) { + auto off = DenseI64ArrayAttr::get(ctx, ArrayRef{count}); + result = rewriter.create(loc, toTy, result, op, off); + count++; + } + rewriter.replaceOp(mkstruq, result); + return success(); + } +}; + +template +struct QmemRAIIOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(cudaq::codegen::RAIIOp raii, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = raii.getLoc(); + auto arrayTy = M::getArrayType(rewriter.getContext()); + + // Get the CC Pointer for the state + auto ccState = adaptor.getInitState(); + + // Inspect the element type of the complex data, need to + // know if its f32 or f64 + Type eleTy = raii.getInitElementType(); + if (auto elePtrTy = dyn_cast(eleTy)) + eleTy = elePtrTy.getElementType(); + if (auto arrayTy = dyn_cast(eleTy)) + eleTy = arrayTy.getElementType(); + bool fromComplex = false; + if (auto complexTy = dyn_cast(eleTy)) { + fromComplex = true; + eleTy = complexTy.getElementType(); + } + + // Cascade to set functionName. + StringRef functionName; + Type ptrTy; + if (isa(eleTy)) { + functionName = cudaq::opt::QIRArrayQubitAllocateArrayWithCudaqStatePtr; + ptrTy = cudaq::cc::PointerType::get( + cudaq::cc::StateType::get(rewriter.getContext())); + } else if (eleTy == rewriter.getF64Type()) { + if (fromComplex) { + functionName = cudaq::opt::QIRArrayQubitAllocateArrayWithStateComplex64; + ptrTy = cudaq::cc::PointerType::get( + ComplexType::get(rewriter.getF64Type())); + } else { + functionName = cudaq::opt::QIRArrayQubitAllocateArrayWithStateFP64; + ptrTy = cudaq::cc::PointerType::get(rewriter.getF64Type()); + } + } else if (eleTy == rewriter.getF32Type()) { + if (fromComplex) { + functionName = cudaq::opt::QIRArrayQubitAllocateArrayWithStateComplex32; + ptrTy = cudaq::cc::PointerType::get( + ComplexType::get(rewriter.getF32Type())); + } else { + functionName = cudaq::opt::QIRArrayQubitAllocateArrayWithStateFP32; + ptrTy = cudaq::cc::PointerType::get(rewriter.getF32Type()); + } + } + + if (functionName.empty()) + return raii.emitOpError("initialize state has an invalid element type."); + assert(ptrTy && "argument pointer type must be set"); + + // Get the size of the qubit register + Type allocTy = adaptor.getAllocType(); + auto i64Ty = rewriter.getI64Type(); + + Value sizeOperand; + if (!adaptor.getAllocSize()) { + auto type = cast(allocTy); + auto constantSize = type.getSize(); + sizeOperand = + rewriter.create(loc, constantSize, 64); + } else { + sizeOperand = adaptor.getAllocSize(); + auto sizeTy = cast(sizeOperand.getType()); + if (sizeTy.getWidth() < 64) + sizeOperand = rewriter.create( + loc, i64Ty, sizeOperand, cudaq::cc::CastOpMode::Unsigned); + else if (sizeTy.getWidth() > 64) + sizeOperand = + rewriter.create(loc, i64Ty, sizeOperand); + } + + // Call the allocation function + Value casted = rewriter.create(loc, ptrTy, ccState); + rewriter.replaceOpWithNewOp( + raii, arrayTy, functionName, ArrayRef{sizeOperand, casted}); + return success(); + } +}; + +struct RelaxSizeOpErase : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::RelaxSizeOp relax, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOp(relax, relax.getInputVec()); + return success(); + } +}; + +template +struct SubveqOpRewrite : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::SubVeqOp subveq, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = subveq.getLoc(); + + auto lowArg = [&]() -> Value { + if (!adaptor.getLower()) + return rewriter.create(loc, adaptor.getRawLower(), + 64); + return adaptor.getLower(); + }(); + auto highArg = [&]() -> Value { + if (!adaptor.getUpper()) + return rewriter.create(loc, adaptor.getRawUpper(), + 64); + return adaptor.getUpper(); + }(); + auto i64Ty = rewriter.getI64Type(); + auto extend = [&](Value &v) -> Value { + if (auto intTy = dyn_cast(v.getType())) { + if (intTy.getWidth() < 64) + return rewriter.create( + loc, i64Ty, v, cudaq::cc::CastOpMode::Unsigned); + if (intTy.getWidth() > 64) + return rewriter.create(loc, i64Ty, v); + } + return v; + }; + lowArg = extend(lowArg); + highArg = extend(highArg); + Value inArr = adaptor.getVeq(); + auto i32Ty = rewriter.getI32Type(); + Value one32 = rewriter.create(loc, 1, i32Ty); + Value one64 = rewriter.create(loc, 1, i64Ty); + auto arrayTy = M::getArrayType(rewriter.getContext()); + rewriter.replaceOpWithNewOp( + subveq, arrayTy, cudaq::opt::QIRArraySlice, + ArrayRef{inArr, one32, lowArg, one64, highArg}); + return success(); + } +}; + +//===----------------------------------------------------------------------===// +// Custom handing of irregular quantum gates. +//===----------------------------------------------------------------------===// + +template +struct CustomUnitaryOpPattern + : public QubitHelperConversionPattern { + using Base = QubitHelperConversionPattern; + using Base::Base; + + LogicalResult + matchAndRewrite(quake::CustomUnitarySymbolOp unitary, Base::OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if (!unitary.getParameters().empty()) + return unitary.emitOpError( + "Parameterized custom operations not yet supported."); + + auto loc = unitary.getLoc(); + auto arrayTy = M::getArrayType(rewriter.getContext()); + + if (adaptor.getTargets().empty()) + return unitary.emitOpError("Custom operations must have targets."); + + // Concat all the targets into an array. + auto targetArray = + Base::wrapQubitAsArray(loc, rewriter, adaptor.getTargets().front()); + for (auto next : adaptor.getTargets().drop_front()) { + auto wrapNext = Base::wrapQubitAsArray(loc, rewriter, next); + auto result = rewriter.create( + loc, arrayTy, cudaq::opt::QIRArrayConcatArray, + ArrayRef{targetArray, wrapNext}); + targetArray = result.getResult(0); + } + + // Concat all the controls (if any) into an array. + Value controlArray; + if (adaptor.getControls().empty()) { + // Use a nullptr for when 0 control qubits are present. + Value zero = rewriter.create(loc, 0, 64); + controlArray = rewriter.create(loc, arrayTy, zero); + } else { + controlArray = + Base::wrapQubitAsArray(loc, rewriter, adaptor.getControls().front()); + for (auto next : adaptor.getControls().drop_front()) { + auto wrapNext = Base::wrapQubitAsArray(loc, rewriter, next); + auto result = rewriter.create( + loc, arrayTy, cudaq::opt::QIRArrayConcatArray, + ArrayRef{controlArray, wrapNext}); + controlArray = result.getResult(0); + } + } + + // Fetch the unitary matrix generator for this custom operation + auto generatorSym = unitary.getGenerator(); + StringRef generatorName = generatorSym.getRootReference(); + const auto customOpName = extractCustomNamePart(generatorName); + + // Create a global string for the unitary name. + auto nameOp = createGlobalCString(unitary, loc, rewriter, customOpName); + + auto complex64Ty = ComplexType::get(rewriter.getF64Type()); + auto complex64PtrTy = cudaq::cc::PointerType::get(complex64Ty); + auto globalObj = cast( + unitary->getParentOfType().lookupSymbol(generatorName)); + auto addrOp = rewriter.create( + loc, globalObj.getType(), generatorName); + auto unitaryData = + rewriter.create(loc, complex64PtrTy, addrOp); + + StringRef functionName = + unitary.isAdj() ? cudaq::opt::QIRCustomAdjOp : cudaq::opt::QIRCustomOp; + + rewriter.replaceOpWithNewOp( + unitary, TypeRange{}, functionName, + ArrayRef{unitaryData, controlArray, targetArray, nameOp}); + + return success(); + } + + // IMPORTANT: this must match the logic to generate global data globalName = + // f'{nvqppPrefix}{opName}_generator_{numTargets}.rodata' + std::string extractCustomNamePart(StringRef generatorName) const { + auto globalName = generatorName.str(); + if (globalName.starts_with(cudaq::runtime::cudaqGenPrefixName)) { + globalName = globalName.substr(cudaq::runtime::cudaqGenPrefixLength); + const size_t pos = globalName.find("_generator"); + if (pos != std::string::npos) + return globalName.substr(0, pos); + } + return {}; + } +}; + +struct ExpPauliOpPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::ExpPauliOp pauli, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = pauli.getLoc(); + SmallVector operands = adaptor.getOperands(); + + // First need to check the type of the Pauli word. We expect + // a pauli_word directly `{i8*,i64}` or a string literal + // `ptr`. If it is a string literal, we need to map it to + // a pauli word. + auto pauliWord = operands.back(); + auto i8PtrTy = cudaq::cc::PointerType::get(rewriter.getI8Type()); + if (auto ptrTy = dyn_cast(pauliWord.getType())) { + // Make sure we have the right types to extract the + // length of the string literal + auto arrayTy = dyn_cast(ptrTy.getElementType()); + if (!arrayTy) + return pauli.emitOpError( + "exp_pauli string literal must have ptr type."); + if (!arrayTy.getSize()) + return pauli.emitOpError("string literal may not be empty."); + + // We must create the {i8*, i64} struct from the string literal + SmallVector structTys{i8PtrTy, rewriter.getI64Type()}; + auto structTy = + cudaq::cc::StructType::get(rewriter.getContext(), structTys); + + // Allocate the char span struct + Value alloca = + cudaq::opt::factory::createTemporary(loc, rewriter, structTy); + + // Convert the number of elements to a constant op. + auto size = + rewriter.create(loc, arrayTy.getSize() - 1, 64); + + // Set the string literal data + auto castedPauli = + rewriter.create(loc, i8PtrTy, pauliWord); + auto strPtr = rewriter.create( + loc, cudaq::cc::PointerType::get(i8PtrTy), alloca, + ArrayRef{0, 0}); + rewriter.create(loc, castedPauli, strPtr); + + // Set the integer length + auto intPtr = rewriter.create( + loc, cudaq::cc::PointerType::get(rewriter.getI64Type()), alloca, + ArrayRef{0, 1}); + rewriter.create(loc, size, intPtr); + + // Cast to raw opaque pointer + auto castedStore = + rewriter.create(loc, i8PtrTy, alloca); + operands.back() = castedStore; + rewriter.replaceOpWithNewOp( + pauli, TypeRange{}, cudaq::opt::QIRExpPauli, operands); + return success(); + } + + // Here we know we have a pauli word expressed as `{i8*, i64}`. + // Allocate a stack slot for it and store what we have to that pointer, + // pass the pointer to NVQIR + auto newPauliWord = adaptor.getOperands().back(); + auto newPauliWordTy = newPauliWord.getType(); + Value alloca = + cudaq::opt::factory::createTemporary(loc, rewriter, newPauliWordTy); + auto castedVar = rewriter.create( + loc, cudaq::cc::PointerType::get(newPauliWordTy), alloca); + rewriter.create(loc, newPauliWord, castedVar); + auto castedPauli = rewriter.create(loc, i8PtrTy, alloca); + operands.back() = castedPauli; + rewriter.replaceOpWithNewOp( + pauli, TypeRange{}, cudaq::opt::QIRExpPauli, operands); + return success(); + } +}; + +template +struct MeasurementOpPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::MzOp mz, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = mz.getLoc(); + auto regNameAttr = dyn_cast(mz.getRegisterNameAttr()); + if (!regNameAttr) + return mz.emitOpError("mz operation must have a name."); + if (regNameAttr.getValue().empty()) + return mz.emitOpError("mz name may not be an empty string."); + SmallVector args; + args.append(adaptor.getTargets().begin(), adaptor.getTargets().end()); + auto functionName = M::getQIRMeasure(); + + // Are we using the measurement that returns a result? + if constexpr (M::mzReturnsResultType) { + // Yes, the measurement results the result, so we can use a + // straightforward codegen pattern. Use either the mz or the + // mz_to_register call (with the name as an extra argument) and forward + // the result of the call as the result. + + if (mz->getAttr(cudaq::opt::MzAssignedNameAttrName)) { + functionName = cudaq::opt::QIRMeasureToRegister; + auto cstringGlobal = + createGlobalCString(mz, loc, rewriter, regNameAttr.getValue()); + args.push_back(cstringGlobal); + } + auto resultTy = M::getResultType(rewriter.getContext()); + auto call = rewriter.replaceOpWithNewOp(mz, resultTy, + functionName, args); + call->setAttr(cudaq::opt::QIRRegisterNameAttr, regNameAttr); + } else { + // No, the measurement doesn't return any result so use a much more + // convoluted pattern. + // 1. Cast an integer to the result and append it to the mz call. This + // will be the token to identify the result. The value will have been + // attached to the MzOp in preprocessing. + // 2. Call the mz function. + // 3. Call the result_record_output to bind the name, which is not folded + // into the mz call. There is always a name in this case. + + auto resultAttr = mz->getAttr(cudaq::opt::ResultIndexAttrName); + std::int64_t annInt = cast(resultAttr).getInt(); + Value intVal = rewriter.create(loc, annInt, 64); + auto resultTy = M::getResultType(rewriter.getContext()); + Value res = rewriter.create(loc, resultTy, intVal); + args.push_back(res); + auto call = + rewriter.create(loc, TypeRange{}, functionName, args); + call->setAttr(cudaq::opt::QIRRegisterNameAttr, regNameAttr); + auto cstringGlobal = + createGlobalCString(mz, loc, rewriter, regNameAttr.getValue()); + if constexpr (!M::discriminateToClassical) { + // These QIR profile variants force all record output calls to appear at + // the end. In these variants, control-flow isn't allowed in the final + // LLVM. Therefore, a single basic block is assumed but unchecked here + // as the verifier will raise an error. + rewriter.setInsertionPoint(rewriter.getBlock()->getTerminator()); + } + auto recOut = rewriter.create( + loc, TypeRange{}, cudaq::opt::QIRRecordOutput, + ArrayRef{res, cstringGlobal}); + recOut->setAttr(cudaq::opt::ResultIndexAttrName, resultAttr); + recOut->setAttr(cudaq::opt::QIRRegisterNameAttr, regNameAttr); + rewriter.replaceOp(mz, res); + } + return success(); + } +}; + +template +struct ResetOpPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(quake::ResetOp reset, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // Get the reset QIR function name + auto qirFunctionName = M::getQIRReset(); + + // Replace the quake op with the new call op. + rewriter.replaceOpWithNewOp( + reset, TypeRange{}, qirFunctionName, adaptor.getOperands()); + return success(); + } +}; + +struct AnnotateKernelsWithMeasurementStringsPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(func::FuncOp func, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + constexpr const char PassthroughAttr[] = "passthrough"; + if (!func->hasAttr(cudaq::kernelAttrName)) + return failure(); + if (!func->hasAttr(PassthroughAttr)) + return failure(); + auto passthru = cast(func->getAttr(PassthroughAttr)); + for (auto a : passthru) { + if (auto strArrAttr = dyn_cast(a)) { + auto strAttr = dyn_cast(strArrAttr[0]); + if (!strAttr) + continue; + if (strAttr.getValue() == cudaq::opt::QIROutputNamesAttrName) + return failure(); + } + } + + // Lambda to help recover an integer value (the QIR qubit or result as an + // integer). + auto recoverIntValue = [&](Value v) -> std::optional { + auto cast = v.getDefiningOp(); + if (!cast) + return {}; + return cudaq::opt::factory::maybeValueOfIntConstant(cast.getValue()); + }; + + // If we're here, then `func` is a kernel, it has a passthrough attribute, + // and the passthrough attribute does *not* have an output names entry. + // + // OUTPUT-NAME-MAP: At this point, we will try to heroically generate the + // output names attribute for the QIR consumer. The content of the attribute + // is a map from results back to pairs of qubits and names. The map is + // encoded in a JSON string. The map is appended to the passthrough + // attribute array. + + std::map measMap; + std::map> nameMap; + func.walk([&](func::CallOp call) { + auto calleeName = call.getCallee(); + if (calleeName == cudaq::opt::QIRMeasureBody) { + auto qubit = recoverIntValue(call.getOperand(0)); + auto meas = recoverIntValue(call.getOperand(1)); + if (qubit && meas) + measMap[*meas] = *qubit; + } else if (calleeName == cudaq::opt::QIRRecordOutput) { + auto resAttr = call->getAttr(cudaq::opt::ResultIndexAttrName); + std::size_t res = cast(resAttr).getInt(); + auto regNameAttr = call->getAttr(cudaq::opt::QIRRegisterNameAttr); + std::string regName = cast(regNameAttr).getValue().str(); + if (measMap.count(res)) { + std::size_t qubit = measMap[res]; + nameMap[res] = std::pair{qubit, regName}; + } + } + }); + + // If there were no measurements, then nothing to see here. + if (nameMap.empty()) + return failure(); + + // Append the name map. Use a `const T&` to introduce another layer of + // brackets here to maintain backwards compatibility. + const auto &outputNameMapRef = nameMap; + nlohmann::json outputNames{outputNameMapRef}; + std::string outputNamesStr = outputNames.dump(); + SmallVector funcAttrs(passthru.begin(), passthru.end()); + funcAttrs.push_back( + rewriter.getStrArrayAttr({cudaq::opt::QIROutputNamesAttrName, + rewriter.getStringAttr(outputNamesStr)})); + func->setAttr(PassthroughAttr, rewriter.getArrayAttr(funcAttrs)); + return success(); + } +}; + +//===----------------------------------------------------------------------===// +// Generic handling of regular quantum gates. +//===----------------------------------------------------------------------===// + +template +struct QuantumGatePattern : public OpConversionPattern { + using Base = OpConversionPattern; + using Base::Base; + + LogicalResult + matchAndRewrite(OP op, typename Base::OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto forwardOrEraseOp = [&]() { + if (op.getResults().empty()) + rewriter.eraseOp(op); + else + rewriter.replaceOp(op, adaptor.getTargets()); + return success(); + }; + auto qirFunctionName = M::quakeToFuncName(op); + + // Make sure that apply-control-negations pass was run. + if (adaptor.getNegatedQubitControls()) + return op.emitOpError("negated control qubits not allowed."); + + // Prepare any floating-point parameters. + auto loc = op.getLoc(); + SmallVector opParams = adaptor.getParameters(); + if (!opParams.empty()) { + // If this is adjoint, each parameter is negated. + if (op.getIsAdj()) + for (std::size_t i = 0; i < opParams.size(); ++i) + opParams[i] = rewriter.create(loc, opParams[i]); + + // Each parameter must be converted to double-precision. + auto f64Ty = rewriter.getF64Type(); + for (std::size_t i = 0; i < opParams.size(); ++i) { + if (opParams[i].getType().getIntOrFloatBitWidth() < 64) + opParams[i] = rewriter.create(loc, f64Ty, opParams[i]); + else if (opParams[i].getType().getIntOrFloatBitWidth() > 64) + opParams[i] = + rewriter.create(loc, f64Ty, opParams[i]); + } + } + + // If no control qubits or if there is 1 control and it is already a veq, + // just add a call and forward the target qubits as needed. + auto numControls = adaptor.getControls().size(); + if (op.getControls().empty() || + conformsToIntendedCall(numControls, op.getControls().front(), op, + qirFunctionName)) { + SmallVector args{opParams.begin(), opParams.end()}; + args.append(adaptor.getControls().begin(), adaptor.getControls().end()); + args.append(adaptor.getTargets().begin(), adaptor.getTargets().end()); + qirFunctionName = + specializeFunctionName(op, qirFunctionName, numControls); + rewriter.create(loc, TypeRange{}, qirFunctionName, args); + return forwardOrEraseOp(); + } + + // Otherwise, we'll use the generalized invoke helper function. This + // function takes 4 size_t values, which delimit the different argument + // types, a pointer to the function to be invoked, and varargs of all the + // arguments being used. This function's signature is tuned so as to reduce + // or eliminate the creation of auxiliary temporaries needs to make the + // call to the helper. + std::size_t numArrayCtrls = 0; + SmallVector opArrCtrls; + std::size_t numQubitCtrls = 0; + SmallVector opQubitCtrls; + Type i64Ty = rewriter.getI64Type(); + auto ptrNoneTy = M::getLLVMPointerType(rewriter.getContext()); + + // Process the controls, sorting them by type. + for (auto pr : llvm::zip(op.getControls(), adaptor.getControls())) { + if (isa(std::get<0>(pr).getType())) { + numArrayCtrls++; + auto sizeCall = rewriter.create( + loc, i64Ty, cudaq::opt::QIRArrayGetSize, + ValueRange{std::get<1>(pr)}); + // Arrays are encoded as pairs of arguments: length and Array* + opArrCtrls.push_back(sizeCall.getResult(0)); + opArrCtrls.push_back(rewriter.create( + loc, ptrNoneTy, std::get<1>(pr))); + } else { + numQubitCtrls++; + // Qubits are simply the Qubit** + opQubitCtrls.emplace_back(rewriter.create( + loc, ptrNoneTy, std::get<1>(pr))); + } + } + + // Lookup and process the gate operation we're invoking. + auto module = op->template getParentOfType(); + auto symOp = module.lookupSymbol(qirFunctionName); + if (!symOp) + return op.emitError("cannot find QIR function"); + auto funOp = dyn_cast(symOp); + if (!funOp) + return op.emitError("cannot find " + qirFunctionName); + FunctionType qirFunctionTy = funOp.getFunctionType(); + auto funCon = + rewriter.create(loc, qirFunctionTy, qirFunctionName); + auto funPtr = + rewriter.create(loc, ptrNoneTy, funCon); + + // Process the target qubits. + auto numTargets = adaptor.getTargets().size(); + if (numTargets == 0) + return op.emitOpError("quake op must have at least 1 target."); + SmallVector opTargs; + for (auto t : adaptor.getTargets()) + opTargs.push_back(rewriter.create(loc, ptrNoneTy, t)); + + // Build the declared arguments for the helper call (5 total). + SmallVector args; + args.emplace_back( + rewriter.create(loc, opParams.size(), 64)); + args.emplace_back( + rewriter.create(loc, numArrayCtrls, 64)); + args.emplace_back( + rewriter.create(loc, numQubitCtrls, 64)); + args.emplace_back( + rewriter.create(loc, numTargets, 64)); + args.emplace_back(funPtr); + + // Finally, append the varargs to the end of the argument list. + args.append(opParams.begin(), opParams.end()); + args.append(opArrCtrls.begin(), opArrCtrls.end()); + args.append(opQubitCtrls.begin(), opQubitCtrls.end()); + args.append(opTargs.begin(), opTargs.end()); + + // Call the generalized version of the gate invocation. + rewriter.create(loc, TypeRange{}, + cudaq::opt::NVQIRGeneralizedInvokeAny, args); + return forwardOrEraseOp(); + } + + static bool conformsToIntendedCall(std::size_t numControls, Value ctrl, OP op, + StringRef qirFunctionName) { + if (numControls != 1) + return false; + auto ctrlTy = ctrl.getType(); + auto trivialName = specializeFunctionName(op, qirFunctionName, numControls); + const bool nameChanged = trivialName != qirFunctionName; + if (nameChanged && !isa(ctrlTy)) + return true; + return !nameChanged && isa(ctrlTy); + } + + static StringRef specializeFunctionName(OP op, StringRef funcName, + std::size_t numCtrls) { + // Last resort to change the names of particular functions from the general + // scheme to specialized names under the right conditions. + if constexpr (std::is_same_v && M::convertToCNot) { + if (numCtrls == 1) + return cudaq::opt::QIRCnot; + } + if constexpr (std::is_same_v && M::convertToCZ) { + if (numCtrls == 1) + return cudaq::opt::QIRCZ; + } + return funcName; + } +}; + +//===----------------------------------------------------------------------===// +// Handling of functions, calls, and classic memory ops on callables. +//===----------------------------------------------------------------------===// + +struct AllocaOpPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(cudaq::cc::AllocaOp alloc, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto eleTy = alloc.getElementType(); + auto newEleTy = getTypeConverter()->convertType(eleTy); + if (eleTy == newEleTy) + return failure(); + Value ss = alloc.getSeqSize(); + if (ss) + rewriter.replaceOpWithNewOp(alloc, newEleTy, ss); + else + rewriter.replaceOpWithNewOp(alloc, newEleTy); + return success(); + } +}; + +/// Convert the quake types in `func::FuncOp` signatures. +struct FuncSignaturePattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(func::FuncOp func, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto funcTy = func.getFunctionType(); + auto newFuncTy = + cast(getTypeConverter()->convertType(funcTy)); + if (funcTy == newFuncTy) + return failure(); + if (funcTy.getNumInputs() && !func.getBody().empty()) { + // Replace the block argument types. + for (auto [blockArg, argTy] : llvm::zip( + func.getBody().front().getArguments(), newFuncTy.getInputs())) + blockArg.setType(argTy); + } + // Replace the signature. + rewriter.updateRootInPlace(func, + [&]() { func.setFunctionType(newFuncTy); }); + return success(); + } +}; + +struct CreateLambdaPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(cudaq::cc::CreateLambdaOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto sigTy = cast(op.getSignature().getType()); + auto newSigTy = + cast(getTypeConverter()->convertType(sigTy)); + if (sigTy == newSigTy) + return failure(); + if (sigTy.getSignature().getNumInputs() && !op.getInitRegion().empty()) { + // Replace the block argument types. + for (auto [blockArg, argTy] : + llvm::zip(op.getInitRegion().front().getArguments(), + newSigTy.getSignature().getInputs())) + blockArg.setType(argTy); + } + // Replace the signature. + rewriter.updateRootInPlace(op, + [&]() { op.getSignature().setType(newSigTy); }); + return success(); + } +}; + +struct CallableFuncPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(cudaq::cc::CallableFuncOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto funcTy = op.getFunction().getType(); + auto newFuncTy = + cast(getTypeConverter()->convertType(funcTy)); + rewriter.replaceOpWithNewOp(op, newFuncTy, + op.getCallable()); + return success(); + } +}; + +template +struct OpInterfacePattern : public OpConversionPattern { + using Base = OpConversionPattern; + using Base::Base; + using Base::getTypeConverter; + + LogicalResult + matchAndRewrite(OP op, typename Base::OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto newResultTy = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, newResultTy, adaptor.getOperands(), + op->getAttrs()); + return success(); + } +}; + +using FuncConstantPattern = OpInterfacePattern; +using FuncToPtrPattern = OpInterfacePattern; +using LoadOpPattern = OpInterfacePattern; +using UndefOpPattern = OpInterfacePattern; +using PoisonOpPattern = OpInterfacePattern; +using CastOpPattern = OpInterfacePattern; + +struct InstantiateCallablePattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(cudaq::cc::InstantiateCallableOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto sigTy = cast(op.getSignature().getType()); + auto newSigTy = + cast(getTypeConverter()->convertType(sigTy)); + rewriter.replaceOpWithNewOp( + op, newSigTy, op.getCallee(), op.getClosureData(), + op.getNoCaptureAttr()); + return success(); + } +}; + +struct StoreOpPattern : public OpConversionPattern { + using Base = OpConversionPattern; + using Base::Base; + using Base::getTypeConverter; + + LogicalResult + matchAndRewrite(cudaq::cc::StoreOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, TypeRange{}, adaptor.getOperands(), op->getAttrs()); + return success(); + } +}; + +template +struct CallOpInterfacePattern : public OpConversionPattern { + using Base = OpConversionPattern; + using Base::Base; + using Base::getTypeConverter; + + LogicalResult + matchAndRewrite(CALLOP op, typename Base::OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + SmallVector newResultTys; + for (auto ty : op.getResultTypes()) + newResultTys.emplace_back(getTypeConverter()->convertType(ty)); + rewriter.replaceOpWithNewOp(op, newResultTys, adaptor.getOperands(), + op->getAttrs()); + return success(); + } +}; + +using CallOpPattern = CallOpInterfacePattern; +using CallIndirectOpPattern = CallOpInterfacePattern; +using CallCallableOpPattern = CallOpInterfacePattern; +using CallIndirectCallableOpPattern = + CallOpInterfacePattern; + +//===----------------------------------------------------------------------===// +// Patterns that are common to all QIR conversions. +//===----------------------------------------------------------------------===// + +static void commonClassicalHandlingPatterns(RewritePatternSet &patterns, + TypeConverter &typeConverter, + MLIRContext *ctx) { + patterns.insert(typeConverter, ctx); +} + +static void commonQuakeHandlingPatterns(RewritePatternSet &patterns, + TypeConverter &typeConverter, + MLIRContext *ctx) { + patterns.insert(typeConverter, ctx); +} + +//===----------------------------------------------------------------------===// +// Modifier classes +//===----------------------------------------------------------------------===// + +template +Type GetLLVMPointerType(MLIRContext *ctx) { + if constexpr (opaquePtr) { + return LLVM::LLVMPointerType::get(ctx); + } else { + return LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)); + } +} + +/// The modifier class for the "full QIR" API. +template +struct FullQIR { + using Self = FullQIR; + + template + static std::string quakeToFuncName(QuakeOp op) { + auto [prefix, _] = generateGateFunctionName(op); + return prefix; + } + + static void populateRewritePatterns(RewritePatternSet &patterns, + TypeConverter &typeConverter) { + auto *ctx = patterns.getContext(); + patterns.insert< + /* Rewrites for qubit management and aggregation. */ + AllocaOpToCallsRewrite, ConcatOpRewrite, DeallocOpRewrite, + DiscriminateOpRewrite, ExtractRefOpRewrite, + QmemRAIIOpRewrite, SubveqOpRewrite, + + /* Irregular quantum operators. */ + CustomUnitaryOpPattern, ExpPauliOpPattern, + MeasurementOpPattern, ResetOpPattern, + + /* Regular quantum operators. */ + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern>(typeConverter, ctx); + commonQuakeHandlingPatterns(patterns, typeConverter, ctx); + commonClassicalHandlingPatterns(patterns, typeConverter, ctx); + } + + static StringRef getQIRMeasure() { return cudaq::opt::QIRMeasure; } + static StringRef getQIRReset() { return cudaq::opt::QIRReset; } + + static constexpr bool mzReturnsResultType = true; + static constexpr bool convertToCNot = false; + static constexpr bool convertToCZ = false; + + static Type getQubitType(MLIRContext *ctx) { + return cudaq::cg::getQubitType(ctx, opaquePtr); + } + static Type getArrayType(MLIRContext *ctx) { + return cudaq::cg::getArrayType(ctx, opaquePtr); + } + static Type getResultType(MLIRContext *ctx) { + return cudaq::cg::getResultType(ctx, opaquePtr); + } + static Type getCharPointerType(MLIRContext *ctx) { + return cudaq::cg::getCharPointerType(ctx, opaquePtr); + } + static Type getLLVMPointerType(MLIRContext *ctx) { + return GetLLVMPointerType(ctx); + } +}; + +/// The base modifier class for the "profile QIR" APIs. +template +struct AnyProfileQIR { + using Self = AnyProfileQIR; + + template + static std::string quakeToFuncName(QuakeOp op) { + auto [prefix, isBarePrefix] = generateGateFunctionName(op); + return isBarePrefix ? prefix + "__body" : prefix; + } + + static void populateRewritePatterns(RewritePatternSet &patterns, + TypeConverter &typeConverter) { + auto *ctx = patterns.getContext(); + patterns.insert< + /* Rewrites for qubit management and aggregation. */ + AllocaOpToIntRewrite, ConcatOpRewrite, DeallocOpErase, + ExtractRefOpRewrite, QmemRAIIOpRewrite, + SubveqOpRewrite, + + /* Irregular quantum operators. */ + CustomUnitaryOpPattern, ExpPauliOpPattern, ResetOpPattern, + + /* Regular quantum operators. */ + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern, + QuantumGatePattern>(typeConverter, ctx); + commonQuakeHandlingPatterns(patterns, typeConverter, ctx); + commonClassicalHandlingPatterns(patterns, typeConverter, ctx); + } + + static StringRef getQIRMeasure() { return cudaq::opt::QIRMeasureBody; } + static StringRef getQIRReset() { return cudaq::opt::QIRResetBody; } + + static constexpr bool mzReturnsResultType = false; + static constexpr bool convertToCNot = true; + static constexpr bool convertToCZ = true; + + static Type getQubitType(MLIRContext *ctx) { + return cudaq::cg::getQubitType(ctx, opaquePtr); + } + static Type getArrayType(MLIRContext *ctx) { + return cudaq::cg::getArrayType(ctx, opaquePtr); + } + static Type getResultType(MLIRContext *ctx) { + return cudaq::cg::getResultType(ctx, opaquePtr); + } + static Type getCharPointerType(MLIRContext *ctx) { + return cudaq::cg::getCharPointerType(ctx, opaquePtr); + } + static Type getLLVMPointerType(MLIRContext *ctx) { + return GetLLVMPointerType(ctx); + } +}; + +/// The QIR base profile modifier class. +template +struct BaseProfileQIR : public AnyProfileQIR { + using Self = BaseProfileQIR; + using Base = AnyProfileQIR; + + static void populateRewritePatterns(RewritePatternSet &patterns, + TypeConverter &typeConverter) { + Base::populateRewritePatterns(patterns, typeConverter); + patterns + .insert, MeasurementOpPattern>( + typeConverter, patterns.getContext()); + } + + static constexpr bool discriminateToClassical = false; +}; + +/// The QIR adaptive profile modifier class. +template +struct AdaptiveProfileQIR : public AnyProfileQIR { + using Self = AdaptiveProfileQIR; + using Base = AnyProfileQIR; + + static void populateRewritePatterns(RewritePatternSet &patterns, + TypeConverter &typeConverter) { + Base::populateRewritePatterns(patterns, typeConverter); + patterns + .insert, MeasurementOpPattern>( + typeConverter, patterns.getContext()); + } + + static constexpr bool discriminateToClassical = true; +}; + +//===----------------------------------------------------------------------===// +// Quake conversion to the QIR API driver pass. +// +// This is done in 3 phased: preparation, conversion, and finalization. +//===----------------------------------------------------------------------===// + +/// Conversion of quake IR to QIR calls for the intended API. +struct QuakeToQIRAPIPass + : public cudaq::opt::impl::QuakeToQIRAPIBase { + using QuakeToQIRAPIBase::QuakeToQIRAPIBase; + + template + void processOperation(QIRAPITypeConverter &typeConverter) { + auto *op = getOperation(); + LLVM_DEBUG(llvm::dbgs() << "Before QIR API conversion:\n" << *op << '\n'); + auto *ctx = &getContext(); + RewritePatternSet patterns(ctx); + A::populateRewritePatterns(patterns, typeConverter); + ConversionTarget target(*ctx); + target.addLegalDialect(); + target.addIllegalDialect(); + target.addLegalOp(); + target.addDynamicallyLegalOp( + [&](func::FuncOp fn) { return !hasQuakeType(fn.getFunctionType()); }); + target.addDynamicallyLegalOp([&](func::ConstantOp fn) { + return !hasQuakeType(fn.getResult().getType()); + }); + target.addDynamicallyLegalOp( + [&](Operation *op) { + return !hasQuakeType(op->getResult(0).getType()); + }); + target.addDynamicallyLegalOp( + [&](cudaq::cc::CallableFuncOp op) { + return !hasQuakeType(op.getFunction().getType()); + }); + target.addDynamicallyLegalOp( + [&](cudaq::cc::CreateLambdaOp op) { + return !hasQuakeType(op.getSignature().getType()); + }); + target.addDynamicallyLegalOp( + [&](cudaq::cc::InstantiateCallableOp op) { + return !hasQuakeType(op.getSignature().getType()); + }); + target.addDynamicallyLegalOp( + [&](cudaq::cc::AllocaOp op) { + return !hasQuakeType(op.getElementType()); + }); + target.addDynamicallyLegalOp< + func::CallOp, func::CallIndirectOp, cudaq::cc::CallCallableOp, + cudaq::cc::CallIndirectCallableOp, cudaq::cc::CastOp, + cudaq::cc::FuncToPtrOp, cudaq::cc::StoreOp, cudaq::cc::LoadOp>( + [&](Operation *op) { + for (auto opnd : op->getOperands()) + if (hasQuakeType(opnd.getType())) + return false; + for (auto res : op->getResults()) + if (hasQuakeType(res.getType())) + return false; + return true; + }); + target.markUnknownOpDynamicallyLegal([](Operation *) { return true; }); + if (failed(applyPartialConversion(op, target, std::move(patterns)))) + signalPassFailure(); + LLVM_DEBUG(llvm::dbgs() << "After QIR API conversion:\n" << *op << '\n'); + } + + static bool hasQuakeType(Type ty) { + if (auto pty = dyn_cast(ty)) + return hasQuakeType(pty.getElementType()); + if (auto cty = dyn_cast(ty)) + return hasQuakeType(cty.getSignature()); + if (auto cty = dyn_cast(ty)) + return hasQuakeType(cty.getSignature()); + if (auto fty = dyn_cast(ty)) { + for (auto t : fty.getInputs()) + if (hasQuakeType(t)) + return true; + for (auto t : fty.getResults()) + if (hasQuakeType(t)) + return true; + return false; + } + return quake::isQuakeType(ty); + } + + void runOnOperation() override { + LLVM_DEBUG(llvm::dbgs() << "Begin converting to QIR\n"); + QIRAPITypeConverter typeConverter(opaquePtr); + if (api == "full") { + if (opaquePtr) + processOperation>(typeConverter); + processOperation>(typeConverter); + } else if (api == "base-profile") { + if (opaquePtr) + processOperation>(typeConverter); + processOperation>(typeConverter); + } else if (api == "adaptive-profile") { + if (opaquePtr) + processOperation>(typeConverter); + processOperation>(typeConverter); + } else { + getOperation()->emitOpError("The currently supported APIs are: 'full', " + "'base-profile', 'adaptive-profile'."); + signalPassFailure(); + } + } +}; + +struct QuakeToQIRAPIPrepPass + : public cudaq::opt::impl::QuakeToQIRAPIPrepBase { + using QuakeToQIRAPIPrepBase::QuakeToQIRAPIPrepBase; + + void runOnOperation() override { + ModuleOp module = getOperation(); + + { + auto *ctx = &getContext(); + RewritePatternSet patterns(ctx); + QIRAPITypeConverter typeConverter(opaquePtr); + cudaq::opt::populateQuakeToCCPrepPatterns(patterns); + if (failed(applyPatternsAndFoldGreedily(module, std::move(patterns)))) { + signalPassFailure(); + return; + } + } + + auto irBuilder = cudaq::IRBuilder::atBlockEnd(module.getBody()); + + // Get the type aliases to use to dynamically configure the prototypes. + StringRef typeAliases; + if (opaquePtr) { + LLVM_DEBUG(llvm::dbgs() << "Using opaque pointers\n"); + typeAliases = irBuilder.getIntrinsicText("qir_opaque_pointer"); + } else { + LLVM_DEBUG(llvm::dbgs() << "Using pointers to opaque structs\n"); + typeAliases = irBuilder.getIntrinsicText("qir_opaque_struct"); + } + + bool usingFullQIR = api == "full"; + if (usingFullQIR) { + if (failed(irBuilder.loadIntrinsicWithAliases(module, "qir_full", + typeAliases))) { + module.emitError("could not load full QIR declarations."); + signalPassFailure(); + return; + } + + } else { + if (failed(irBuilder.loadIntrinsicWithAliases( + module, "qir_common_profile", typeAliases))) { + module.emitError("could not load QIR profile declarations."); + signalPassFailure(); + return; + } + } + + if (usingFullQIR) { + OpBuilder builder(module); + module.walk([&](func::FuncOp func) { + int counter = 0; + func.walk([&](quake::MzOp mz) { + guaranteeMzIsLabeled(mz, counter, builder); + }); + }); + } else { + // If the API is one of the profile variants, we must perform allocation + // and measurement analysis and stick attributes on the Ops as needed. + OpBuilder builder(module); + module.walk([&](func::FuncOp func) { + std::size_t totalQubits = 0; + std::size_t totalResults = 0; + int counter = 0; + + // A map to keep track of wire set usage. + DenseMap> borrowSets; + + // Recursive walk in func. + func.walk([&](Operation *op) { + // Annotate all qubit allocations with the starting qubit index value. + // This ought to handle both reference and value semantics. If the + // value semantics is using wire sets, no (redundant) annotation is + // needed. + if (auto alloc = dyn_cast(op)) { + auto allocTy = alloc.getType(); + if (isa(allocTy)) { + alloc->setAttr(cudaq::opt::StartingOffsetAttrName, + builder.getI64IntegerAttr(totalQubits++)); + return; + } + if (!isa(allocTy)) { + alloc.emitOpError("must be veq type."); + return; + } + auto veqTy = cast(allocTy); + if (!veqTy.hasSpecifiedSize()) { + alloc.emitOpError("must have a constant size."); + return; + } + alloc->setAttr(cudaq::opt::StartingOffsetAttrName, + builder.getI64IntegerAttr(totalQubits)); + totalQubits += veqTy.getSize(); + return; + } + if (auto nw = dyn_cast(op)) { + nw->setAttr(cudaq::opt::StartingOffsetAttrName, + builder.getI64IntegerAttr(totalQubits++)); + return; + } + if (auto bw = dyn_cast(op)) { + [[maybe_unused]] StringRef name = bw.getSetName(); + [[maybe_unused]] std::int32_t wire = bw.getIdentity(); + bw.emitOpError("not implemented."); + return; + } + + // For each mz, we want to assign it a result index. + if (auto mz = dyn_cast(op)) { + // Verify there is exactly one qubit being measured. + if (mz.getTargets().empty() || + std::distance(mz.getTargets().begin(), mz.getTargets().end()) != + 1) { + mz.emitOpError("must measure exactly one qubit."); + return; + } + mz->setAttr(cudaq::opt::ResultIndexAttrName, + builder.getI64IntegerAttr(totalResults++)); + guaranteeMzIsLabeled(mz, counter, builder); + } + }); + + // If the API is one of the profile variants, the QIR consumer expects + // some bonus information by way of attributes. Add most of them here. + // (See also OUTPUT-NAME-MAP.) + SmallVector funcAttrs; + if (func->hasAttr(cudaq::kernelAttrName)) { + if (func->getAttr(cudaq::entryPointAttrName)) + funcAttrs.push_back( + builder.getStringAttr(cudaq::opt::QIREntryPointAttrName)); + if (api == "base-profile") { + funcAttrs.push_back(builder.getStrArrayAttr( + {cudaq::opt::QIRProfilesAttrName, "base_profile"})); + funcAttrs.push_back(builder.getStrArrayAttr( + {cudaq::opt::QIROutputLabelingSchemaAttrName, "schema_id"})); + } else if (api == "adaptive-profile") { + funcAttrs.push_back(builder.getStrArrayAttr( + {cudaq::opt::QIRProfilesAttrName, "adaptive_profile"})); + } + if (totalQubits) + funcAttrs.push_back(builder.getStrArrayAttr( + {cudaq::opt::QIRRequiredQubitsAttrName, + builder.getStringAttr(std::to_string(totalQubits))})); + if (totalResults) + funcAttrs.push_back(builder.getStrArrayAttr( + {cudaq::opt::QIRRequiredResultsAttrName, + builder.getStringAttr(std::to_string(totalResults))})); + } + if (!funcAttrs.empty()) + func->setAttr("passthrough", builder.getArrayAttr(funcAttrs)); + }); + } + } + + void guaranteeMzIsLabeled(quake::MzOp mz, int &counter, OpBuilder &builder) { + if (mz.getRegisterNameAttr() && + /* FIXME: issue 2538: the name should never be empty. */ + !mz.getRegisterNameAttr().getValue().empty()) { + mz->setAttr(cudaq::opt::MzAssignedNameAttrName, builder.getUnitAttr()); + return; + } + // Manufacture a bogus name on demand here. + std::string manuName = std::to_string(counter++); + constexpr std::size_t padSize = 5; + manuName = + std::string(padSize - std::min(padSize, manuName.length()), '0') + + manuName; + mz.setRegisterName("r" + manuName); + } +}; + +struct QuakeToQIRAPIFinalPass + : public cudaq::opt::impl::QuakeToQIRAPIFinalBase { + using QuakeToQIRAPIFinalBase::QuakeToQIRAPIFinalBase; + + void runOnOperation() override { + auto *ctx = &getContext(); + ModuleOp module = getOperation(); + RewritePatternSet patterns(ctx); + patterns.insert(ctx); + if (api == "base-profile") + patterns.insert(ctx); + if (failed(applyPatternsAndFoldGreedily(module, std::move(patterns)))) + signalPassFailure(); + } +}; + +} // namespace + +void cudaq::opt::addConvertToQIRAPIPipeline(OpPassManager &pm, StringRef api, + bool opaquePtr) { + QuakeToQIRAPIPrepOptions prepApiOpt{.api = api.str(), .opaquePtr = opaquePtr}; + pm.addPass(cudaq::opt::createQuakeToQIRAPIPrep(prepApiOpt)); + pm.addPass(cudaq::opt::createLowerToCG()); + QuakeToQIRAPIOptions apiOpt{.api = api.str(), .opaquePtr = opaquePtr}; + pm.addPass(cudaq::opt::createQuakeToQIRAPI(apiOpt)); + pm.addPass(createCanonicalizerPass()); + QuakeToQIRAPIFinalOptions finalApiOpt{.api = api.str()}; + pm.addPass(cudaq::opt::createQuakeToQIRAPIFinal(finalApiOpt)); + pm.addPass(cudaq::opt::createGlobalizeArrayValues()); +} + +namespace { +struct QIRAPIPipelineOptions + : public PassPipelineOptions { + PassOptions::Option api{ + *this, "api", + llvm::cl::desc("select the profile to convert to [full, base-profile, " + "adaptive-profile]"), + llvm::cl::init("full")}; + PassOptions::Option opaquePtr{*this, "opaque-pointer", + llvm::cl::desc("use opaque pointers"), + llvm::cl::init(false)}; +}; +} // namespace + +void cudaq::opt::registerToQIRAPIPipeline() { + PassPipelineRegistration( + "convert-to-qir-api", "Convert quake to one of the QIR APIs.", + [](OpPassManager &pm, const QIRAPIPipelineOptions &opt) { + addConvertToQIRAPIPipeline(pm, opt.api, opt.opaquePtr); + }); +} diff --git a/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp b/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp index 827ae61ec35..be95599f34f 100644 --- a/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp +++ b/lib/Optimizer/CodeGen/ConvertToQIRProfile.cpp @@ -587,14 +587,17 @@ std::unique_ptr cudaq::opt::createQIRProfilePreparationPass() { //===----------------------------------------------------------------------===// // The various passes defined here should be added as a pass pipeline. +void cudaq::opt::addQIRProfileVerify(OpPassManager &pm, + llvm::StringRef convertTo) { + VerifyQIRProfileOptions vqpo = {convertTo.str()}; + pm.addNestedPass(createVerifyQIRProfile(vqpo)); +} + void cudaq::opt::addQIRProfilePipeline(OpPassManager &pm, - llvm::StringRef convertTo, - bool performPrep) { + llvm::StringRef convertTo) { assert(convertTo == "qir-adaptive" || convertTo == "qir-base"); - if (performPrep) - pm.addPass(createQIRProfilePreparationPass()); + pm.addPass(createQIRProfilePreparationPass()); pm.addNestedPass(createConvertToQIRFuncPass(convertTo)); pm.addPass(createQIRToQIRProfilePass(convertTo)); - VerifyQIRProfileOptions vqpo = {convertTo.str()}; - pm.addNestedPass(createVerifyQIRProfile(vqpo)); + addQIRProfileVerify(pm, convertTo); } diff --git a/lib/Optimizer/CodeGen/PassDetails.h b/lib/Optimizer/CodeGen/PassDetails.h index 507c61a19fc..225997c4119 100644 --- a/lib/Optimizer/CodeGen/PassDetails.h +++ b/lib/Optimizer/CodeGen/PassDetails.h @@ -11,6 +11,8 @@ #include "cudaq/Optimizer/CodeGen/CodeGenDialect.h" #include "cudaq/Optimizer/Dialect/CC/CCDialect.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeDialect.h" +#include "mlir/Dialect/Arith/IR/Arith.h" +#include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/OpenACC/OpenACC.h" diff --git a/lib/Optimizer/CodeGen/Pipelines.cpp b/lib/Optimizer/CodeGen/Pipelines.cpp index f1aa357ef85..393dad5c653 100644 --- a/lib/Optimizer/CodeGen/Pipelines.cpp +++ b/lib/Optimizer/CodeGen/Pipelines.cpp @@ -10,31 +10,72 @@ using namespace mlir; -void cudaq::opt::commonPipelineConvertToQIR( +void cudaq::opt::commonPipelineConvertToQIR(PassManager &pm, + StringRef codeGenFor, + StringRef passConfigAs) { + pm.addNestedPass(createApplyControlNegations()); + addAggressiveEarlyInlining(pm); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createUnwindLoweringPass()); + pm.addNestedPass(createCanonicalizerPass()); + pm.addPass(createApplyOpSpecializationPass()); + pm.addNestedPass(createExpandMeasurementsPass()); + pm.addNestedPass(createClassicalMemToReg()); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); + pm.addNestedPass(createQuakeAddDeallocs()); + pm.addNestedPass(createQuakeAddMetadata()); + pm.addNestedPass(createLoopNormalize()); + LoopUnrollOptions luo; + luo.allowBreak = passConfigAs == "qir-adaptive"; + pm.addNestedPass(createLoopUnroll(luo)); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); + pm.addNestedPass(createLowerToCFGPass()); + pm.addNestedPass(createCombineQuantumAllocations()); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); + if (passConfigAs == "qir-base") + pm.addNestedPass(createDelayMeasurementsPass()); + if (codeGenFor == "qir") + cudaq::opt::addConvertToQIRAPIPipeline(pm, "full"); + else if (codeGenFor == "qir-base") + cudaq::opt::addConvertToQIRAPIPipeline(pm, "base-profile"); + else if (codeGenFor == "qir-adaptive") + cudaq::opt::addConvertToQIRAPIPipeline(pm, "adaptive-profile"); + else + emitError(UnknownLoc::get(pm.getContext()), + "convert to QIR must be given a valid specification to use."); + pm.addPass(createConvertMathToFuncs()); + pm.addPass(createSymbolDCEPass()); + pm.addPass(createCCToLLVM()); +} + +void cudaq::opt::commonPipelineConvertToQIR_PythonWorkaround( PassManager &pm, const std::optional &convertTo) { pm.addNestedPass(createApplyControlNegations()); addAggressiveEarlyInlining(pm); - pm.addPass(createCanonicalizerPass()); + pm.addNestedPass(createCanonicalizerPass()); pm.addNestedPass(createUnwindLoweringPass()); - pm.addPass(createCanonicalizerPass()); + pm.addNestedPass(createCanonicalizerPass()); pm.addPass(createApplyOpSpecializationPass()); - pm.addPass(createExpandMeasurementsPass()); + pm.addNestedPass(createExpandMeasurementsPass()); pm.addNestedPass(createClassicalMemToReg()); - pm.addPass(createCanonicalizerPass()); - pm.addPass(createCSEPass()); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); pm.addNestedPass(createQuakeAddDeallocs()); pm.addNestedPass(createQuakeAddMetadata()); - pm.addPass(createLoopNormalize()); + pm.addNestedPass(createLoopNormalize()); LoopUnrollOptions luo; - luo.allowBreak = convertTo && convertTo->equals("qir-adaptive"); - pm.addPass(createLoopUnroll(luo)); - pm.addPass(createCanonicalizerPass()); - pm.addPass(createCSEPass()); + luo.allowBreak = convertTo && (*convertTo == "qir-adaptive"); + pm.addNestedPass(createLoopUnroll(luo)); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); pm.addNestedPass(createLowerToCFGPass()); pm.addNestedPass(createCombineQuantumAllocations()); - pm.addPass(createCanonicalizerPass()); - pm.addPass(createCSEPass()); - if (convertTo && convertTo->equals("qir-base")) + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); + if (convertTo && (*convertTo == "qir-base")) pm.addNestedPass(createDelayMeasurementsPass()); pm.addPass(createConvertMathToFuncs()); pm.addPass(createSymbolDCEPass()); @@ -51,13 +92,19 @@ void cudaq::opt::addPipelineTranslateToOpenQASM(PassManager &pm) { void cudaq::opt::addPipelineTranslateToIQMJson(PassManager &pm) { pm.addNestedPass(createUnwindLoweringPass()); - pm.addPass(createExpandMeasurementsPass()); + pm.addNestedPass(createExpandMeasurementsPass()); LoopUnrollOptions luo; - pm.addPass(createLoopUnroll(luo)); - pm.addPass(createCanonicalizerPass()); + pm.addNestedPass(createLoopUnroll(luo)); + pm.addNestedPass(createCanonicalizerPass()); pm.addNestedPass(createLowerToCFGPass()); pm.addNestedPass(createQuakeAddDeallocs()); pm.addNestedPass(createCombineQuantumAllocations()); - pm.addPass(createCanonicalizerPass()); - pm.addPass(createCSEPass()); + pm.addNestedPass(createCanonicalizerPass()); + pm.addNestedPass(createCSEPass()); +} + +void cudaq::opt::addPipelineConvertToQIR(PassManager &pm, StringRef convertTo) { + commonPipelineConvertToQIR(pm, convertTo, convertTo); + if (convertTo != "qir") + addQIRProfileVerify(pm, convertTo); } diff --git a/lib/Optimizer/CodeGen/QuakeToCC.cpp b/lib/Optimizer/CodeGen/QuakeToExecMgr.cpp similarity index 99% rename from lib/Optimizer/CodeGen/QuakeToCC.cpp rename to lib/Optimizer/CodeGen/QuakeToExecMgr.cpp index 4ccb7aea380..cadc65c31f2 100644 --- a/lib/Optimizer/CodeGen/QuakeToCC.cpp +++ b/lib/Optimizer/CodeGen/QuakeToExecMgr.cpp @@ -6,7 +6,7 @@ * the terms of the Apache License 2.0 which accompanies this distribution. * ******************************************************************************/ -#include "cudaq/Optimizer/CodeGen/QuakeToCC.h" +#include "cudaq/Optimizer/CodeGen/QuakeToExecMgr.h" #include "cudaq/Optimizer/Builder/Intrinsics.h" #include "cudaq/Optimizer/CodeGen/CudaqFunctionNames.h" #include "cudaq/Optimizer/CodeGen/Passes.h" diff --git a/lib/Optimizer/CodeGen/QuakeToLLVM.cpp b/lib/Optimizer/CodeGen/QuakeToLLVM.cpp index 1ab80750947..8216f81c27e 100644 --- a/lib/Optimizer/CodeGen/QuakeToLLVM.cpp +++ b/lib/Optimizer/CodeGen/QuakeToLLVM.cpp @@ -11,6 +11,8 @@ #include "cudaq/Optimizer/Builder/Intrinsics.h" #include "cudaq/Optimizer/CodeGen/Passes.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" +#include "cudaq/Optimizer/CodeGen/QuakeToExecMgr.h" #include "cudaq/Optimizer/Dialect/CC/CCOps.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" @@ -296,9 +298,9 @@ class ExtractQubitOpRewrite LogicalResult matchAndRewrite(quake::ExtractRefOp extract, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { - auto loc = extract->getLoc(); + auto loc = extract.getLoc(); auto parentModule = extract->getParentOfType(); - auto context = parentModule->getContext(); + auto *context = rewriter.getContext(); auto qir_array_get_element_ptr_1d = cudaq::opt::QIRArrayGetElementPtr1d; @@ -1140,40 +1142,6 @@ class MeasureRewrite : public ConvertOpToLLVMPattern { } }; -/// Convert a MX operation to a sequence H; MZ. -class MxToMz : public OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - LogicalResult - matchAndRewrite(quake::MxOp mx, OpAdaptor adaptor, - ConversionPatternRewriter &rewriter) const override { - rewriter.create(mx.getLoc(), adaptor.getTargets()); - rewriter.replaceOpWithNewOp(mx, mx.getResultTypes(), - adaptor.getTargets(), - mx.getRegisterNameAttr()); - return success(); - } -}; - -/// Convert a MY operation to a sequence S; H; MZ. -class MyToMz : public OpConversionPattern { -public: - using OpConversionPattern::OpConversionPattern; - - LogicalResult - matchAndRewrite(quake::MyOp my, OpAdaptor adaptor, - ConversionPatternRewriter &rewriter) const override { - rewriter.create(my.getLoc(), true, ValueRange{}, ValueRange{}, - adaptor.getTargets()); - rewriter.create(my.getLoc(), adaptor.getTargets()); - rewriter.replaceOpWithNewOp(my, my.getResultTypes(), - adaptor.getTargets(), - my.getRegisterNameAttr()); - return success(); - } -}; - class GetVeqSizeOpRewrite : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; @@ -1407,8 +1375,9 @@ class CustomUnitaryOpRewrite auto unitaryData = rewriter.create(loc, complex64PtrTy, addrOp); - auto qirFunctionName = - std::string{cudaq::opt::QIRCustomOp} + (op.isAdj() ? "__adj" : ""); + std::string qirFunctionName = cudaq::opt::QIRCustomOp; + if (op.isAdj()) + qirFunctionName += "__adj"; FlatSymbolRefAttr customSymbolRef = cudaq::opt::factory::createLLVMFunctionSymbol( @@ -1431,8 +1400,10 @@ void cudaq::opt::populateQuakeToLLVMPatterns(LLVMTypeConverter &typeConverter, RewritePatternSet &patterns, unsigned &measureCounter) { auto *context = patterns.getContext(); - patterns.insert(context); + cudaq::opt::populateQuakeToCCPrepPatterns(patterns); + patterns + .insert( + context); patterns .insert NVQIR_FUNCS = { - cudaq::opt::NVQIRInvokeWithControlBits, - cudaq::opt::NVQIRInvokeRotationWithControlBits, - cudaq::opt::NVQIRInvokeWithControlRegisterOrBits, + cudaq::opt::NVQIRInvokeWithControlBits, // obsolete + cudaq::opt::NVQIRInvokeRotationWithControlBits, // obsolete + cudaq::opt::NVQIRInvokeWithControlRegisterOrBits, // obsolete + cudaq::opt::NVQIRGeneralizedInvokeAny, cudaq::opt::NVQIRPackSingleQubitInArray, cudaq::opt::NVQIRReleasePackedQubitArray, cudaq::opt::QIRArrayQubitAllocateArrayWithStateComplex32, @@ -78,7 +79,7 @@ struct VerifyNVQIRCallOpsPass return WalkResult::interrupt(); } else if (!isa(op)) { + LLVM::IntToPtrOp, LLVM::LoadOp, LLVM::StoreOp>(op)) { // No pointers allowed except for the above operations. for (auto oper : op->getOperands()) { if (isa(oper.getType())) { diff --git a/lib/Optimizer/CodeGen/VerifyQIRProfile.cpp b/lib/Optimizer/CodeGen/VerifyQIRProfile.cpp index c3191bd5600..16d15c52ea7 100644 --- a/lib/Optimizer/CodeGen/VerifyQIRProfile.cpp +++ b/lib/Optimizer/CodeGen/VerifyQIRProfile.cpp @@ -10,6 +10,7 @@ #include "cudaq/Optimizer/Builder/Intrinsics.h" #include "cudaq/Optimizer/CodeGen/Passes.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" #include "cudaq/Todo.h" #include "nlohmann/json.hpp" diff --git a/lib/Optimizer/CodeGen/WireSetsToProfileQIR.cpp b/lib/Optimizer/CodeGen/WireSetsToProfileQIR.cpp index cb3e2949087..6ae7fb627c1 100644 --- a/lib/Optimizer/CodeGen/WireSetsToProfileQIR.cpp +++ b/lib/Optimizer/CodeGen/WireSetsToProfileQIR.cpp @@ -14,14 +14,14 @@ #include "cudaq/Optimizer/CodeGen/Pipelines.h" #include "cudaq/Optimizer/CodeGen/QIRAttributeNames.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" -#include "cudaq/Optimizer/CodeGen/QuakeToCC.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" +#include "cudaq/Optimizer/CodeGen/QuakeToExecMgr.h" #include "cudaq/Optimizer/Dialect/CC/CCOps.h" #include "cudaq/Optimizer/Dialect/CC/CCTypes.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" #include "nlohmann/json.hpp" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/Support/Debug.h" -#include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" #include "mlir/Pass/PassManager.h" #include "mlir/Pass/PassOptions.h" @@ -676,15 +676,20 @@ void cudaq::opt::addWiresetToProfileQIRPipeline(OpPassManager &pm, // Perform final cleanup for other dialect conversions (like func.func) pm.addPass(cudaq::opt::createConvertToQIR()); if (profile.starts_with("qir")) - cudaq::opt::addQIRProfilePipeline(pm, profile, /*performPrep=*/false); + cudaq::opt::addQIRProfilePipeline(pm, profile); } +namespace { // Pipeline option: let the user specify the profile name. struct WiresetToProfileQIRPipelineOptions : public PassPipelineOptions { PassOptions::Option profile{ - *this, "convert-to", llvm::cl::desc(""), llvm::cl::init("qir-base")}; + *this, "convert-to", + llvm::cl::desc( + "select the profile to convert to [qir-base, qir-adaptive]"), + llvm::cl::init("qir-base")}; }; +} // namespace void cudaq::opt::registerWireSetToProfileQIRPipeline() { PassPipelineRegistration( diff --git a/lib/Optimizer/Dialect/CC/CCOps.cpp b/lib/Optimizer/Dialect/CC/CCOps.cpp index f16d9c1aff6..601292df17b 100644 --- a/lib/Optimizer/Dialect/CC/CCOps.cpp +++ b/lib/Optimizer/Dialect/CC/CCOps.cpp @@ -2175,6 +2175,16 @@ struct ReplaceConstantSizes : public OpRewritePattern { auto inpTy = sizeOp.getInputType(); if (Value v = cudaq::cc::getByteSizeOfType(rewriter, sizeOp.getLoc(), inpTy, /*useSizeOf=*/false)) { + if (v.getType() != sizeOp.getType()) { + auto vSz = v.getType().getIntOrFloatBitWidth(); + auto sizeOpSz = sizeOp.getType().getIntOrFloatBitWidth(); + auto loc = sizeOp.getLoc(); + if (sizeOpSz < vSz) + v = rewriter.create(loc, sizeOp.getType(), v); + else + v = rewriter.create( + loc, sizeOp.getType(), v, cudaq::cc::CastOpMode::Unsigned); + } rewriter.replaceOp(sizeOp, v); return success(); } diff --git a/lib/Optimizer/Transforms/AggressiveEarlyInlining.cpp b/lib/Optimizer/Transforms/AggressiveEarlyInlining.cpp index 45784ca1097..2617b6b4f09 100644 --- a/lib/Optimizer/Transforms/AggressiveEarlyInlining.cpp +++ b/lib/Optimizer/Transforms/AggressiveEarlyInlining.cpp @@ -146,7 +146,7 @@ void cudaq::opt::addAggressiveEarlyInlining(OpPassManager &pm) { pm.addNestedPass(cudaq::opt::createCheckKernelCalls()); } -void cudaq::opt::registerAggressiveEarlyInlining() { +void cudaq::opt::registerAggressiveEarlyInliningPipeline() { PassPipelineRegistration<>( "aggressive-early-inlining", "Convert calls between kernels to direct calls and inline functions.", diff --git a/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp b/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp index e2c34ca8f5a..b9728ed0670 100644 --- a/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp +++ b/lib/Optimizer/Transforms/GlobalizeArrayValues.cpp @@ -28,7 +28,7 @@ namespace cudaq::opt { using namespace mlir; template -SmallVector conversion(ArrayAttr seq) { +SmallVector conversion(ArrayAttr seq, Type) { SmallVector result; for (auto v : seq) { B c = cast(v); @@ -37,8 +37,21 @@ SmallVector conversion(ArrayAttr seq) { return result; } template <> +SmallVector conversion(ArrayAttr seq, Type ty) { + SmallVector result; + for (auto v : seq) { + auto c = cast(v); + APInt ap = c.getValue(); + if (c.getType() != ty) + result.emplace_back(ty.getIntOrFloatBitWidth(), ap.getLimitedValue()); + else + result.emplace_back(ap); + } + return result; +} +template <> SmallVector> -conversion, ArrayAttr>(ArrayAttr seq) { +conversion, ArrayAttr>(ArrayAttr seq, Type) { SmallVector> result; for (auto v : seq) { auto p = cast(v); @@ -55,15 +68,16 @@ convertArrayAttrToGlobalConstant(MLIRContext *ctx, Location loc, cudaq::IRBuilder irBuilder(ctx); auto tensorTy = RankedTensorType::get(arrAttr.size(), eleTy); if (isa(eleTy)) { - auto blockValues = conversion, ArrayAttr>(arrAttr); + auto blockValues = + conversion, ArrayAttr>(arrAttr, eleTy); auto dense = DenseElementsAttr::get(tensorTy, blockValues); irBuilder.genVectorOfConstants(loc, module, globalName, dense, eleTy); } else if (isa(eleTy)) { - auto blockValues = conversion(arrAttr); + auto blockValues = conversion(arrAttr, eleTy); auto dense = DenseElementsAttr::get(tensorTy, blockValues); irBuilder.genVectorOfConstants(loc, module, globalName, dense, eleTy); } else if (isa(eleTy)) { - auto blockValues = conversion(arrAttr); + auto blockValues = conversion(arrAttr, eleTy); auto dense = DenseElementsAttr::get(tensorTy, blockValues); irBuilder.genVectorOfConstants(loc, module, globalName, dense, eleTy); } else { @@ -81,14 +95,18 @@ struct ConstantArrayPattern LogicalResult matchAndRewrite(cudaq::cc::ConstantArrayOp conarr, PatternRewriter &rewriter) const override { - if (!conarr->hasOneUse()) - return failure(); - auto store = dyn_cast(*conarr->getUsers().begin()); - if (!store) - return failure(); - auto alloca = store.getPtrvalue().getDefiningOp(); - if (!alloca) - return failure(); + SmallVector allocas; + SmallVector stores; + for (auto *usr : conarr->getUsers()) { + auto store = dyn_cast(usr); + if (!store) + return failure(); + auto alloca = store.getPtrvalue().getDefiningOp(); + if (!alloca) + return failure(); + stores.push_back(store); + allocas.push_back(alloca); + } auto func = conarr->getParentOfType(); if (!func) return failure(); @@ -100,9 +118,11 @@ struct ConstantArrayPattern if (failed(convertArrayAttrToGlobalConstant(ctx, conarr.getLoc(), valueAttr, module, globalName, eleTy))) return failure(); - rewriter.replaceOpWithNewOp( - alloca, alloca.getType(), globalName); - rewriter.eraseOp(store); + for (auto alloca : allocas) + rewriter.replaceOpWithNewOp( + alloca, alloca.getType(), globalName); + for (auto store : stores) + rewriter.eraseOp(store); rewriter.eraseOp(conarr); return success(); } diff --git a/lib/Optimizer/Transforms/QuakeSynthesizer.cpp b/lib/Optimizer/Transforms/QuakeSynthesizer.cpp index 9eacc50e99c..626b3e6943d 100644 --- a/lib/Optimizer/Transforms/QuakeSynthesizer.cpp +++ b/lib/Optimizer/Transforms/QuakeSynthesizer.cpp @@ -10,6 +10,7 @@ #include "cudaq/Optimizer/Builder/Intrinsics.h" #include "cudaq/Optimizer/Builder/Runtime.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/Dialect/CC/CCOps.h" #include "cudaq/Optimizer/Dialect/CC/CCTypes.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" diff --git a/python/cudaq/kernel/ast_bridge.py b/python/cudaq/kernel/ast_bridge.py index ba5e664ea85..d64cbdd79ee 100644 --- a/python/cudaq/kernel/ast_bridge.py +++ b/python/cudaq/kernel/ast_bridge.py @@ -930,6 +930,7 @@ def isQuantumTy(ty): ty) or quake.StruqType.isinstance(ty) areQuantumTypes = [isQuantumTy(ty) for ty in self.argTypes] + f.attributes.__setitem__('cudaq-kernel', UnitAttr.get()) if True not in areQuantumTypes and not self.disableEntryPointTag: f.attributes.__setitem__('cudaq-entrypoint', UnitAttr.get()) diff --git a/python/cudaq/kernel/kernel_builder.py b/python/cudaq/kernel/kernel_builder.py index 3e83f161470..f77b1308306 100644 --- a/python/cudaq/kernel/kernel_builder.py +++ b/python/cudaq/kernel/kernel_builder.py @@ -285,6 +285,7 @@ def __init__(self, argTypeList): loc=self.loc) self.funcOp.attributes.__setitem__('cudaq-entrypoint', UnitAttr.get()) + self.funcOp.attributes.__setitem__('cudaq-kernel', UnitAttr.get()) e = self.funcOp.add_entry_block() self.arguments = [self.__createQuakeValue(b) for b in e.arguments] self.argument_count = len(self.arguments) diff --git a/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp b/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp index 3232e26b11e..9a6e48203fb 100644 --- a/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp +++ b/python/runtime/cudaq/platform/py_alt_launch_kernel.cpp @@ -104,7 +104,7 @@ jitAndCreateArgs(const std::string &name, MlirModule module, {.startingArgIdx = startingArgIdx})); pm.addPass(cudaq::opt::createLambdaLiftingPass()); pm.addPass(createSymbolDCEPass()); - cudaq::opt::addPipelineConvertToQIR(pm); + cudaq::opt::addPipelineConvertToQIR_PythonWorkaround(pm); DefaultTimingManager tm; tm.setEnabled(cudaq::isTimingTagEnabled(cudaq::TIMING_JIT_PASSES)); @@ -596,9 +596,9 @@ std::string getQIR(const std::string &name, MlirModule module, PassManager pm(context); pm.addPass(cudaq::opt::createLambdaLiftingPass()); if (profile.empty()) - cudaq::opt::addPipelineConvertToQIR(pm); + cudaq::opt::addPipelineConvertToQIR_PythonWorkaround(pm); else - cudaq::opt::addPipelineConvertToQIR(pm, profile); + cudaq::opt::addPipelineConvertToQIR_PythonWorkaround(pm, profile); DefaultTimingManager tm; tm.setEnabled(cudaq::isTimingTagEnabled(cudaq::TIMING_JIT_PASSES)); auto timingScope = tm.getRootScope(); // starts the timer diff --git a/python/runtime/mlir/py_register_dialects.cpp b/python/runtime/mlir/py_register_dialects.cpp index 157a91d9211..2485449165f 100644 --- a/python/runtime/mlir/py_register_dialects.cpp +++ b/python/runtime/mlir/py_register_dialects.cpp @@ -6,8 +6,6 @@ * the terms of the Apache License 2.0 which accompanies this distribution. * ******************************************************************************/ -#include "mlir/Bindings/Python/PybindAdaptors.h" - #include "cudaq/Optimizer/Builder/Intrinsics.h" #include "cudaq/Optimizer/CAPI/Dialects.h" #include "cudaq/Optimizer/CodeGen/Passes.h" @@ -17,7 +15,9 @@ #include "cudaq/Optimizer/Dialect/CC/CCTypes.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeDialect.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeTypes.h" +#include "cudaq/Optimizer/InitAllPasses.h" #include "cudaq/Optimizer/Transforms/Passes.h" +#include "mlir/Bindings/Python/PybindAdaptors.h" #include "mlir/InitAllDialects.h" #include #include @@ -38,17 +38,11 @@ void registerQuakeDialectAndTypes(py::module &m) { [](MlirContext context, bool load) { MlirDialectHandle handle = mlirGetDialectHandle__quake__(); mlirDialectHandleRegisterDialect(handle, context); - if (load) { + if (load) mlirDialectHandleLoadDialect(handle, context); - } if (!registered) { - cudaq::opt::registerOptCodeGenPasses(); - cudaq::opt::registerOptTransformsPasses(); - cudaq::opt::registerAggressiveEarlyInlining(); - cudaq::opt::registerUnrollingPipeline(); - cudaq::opt::registerTargetPipelines(); - cudaq::opt::registerMappingPipeline(); + cudaq::registerCudaqPassesAndPipelines(); registered = true; } }, diff --git a/python/tests/interop/test_interop.py b/python/tests/interop/test_interop.py index f9f1aa0d0e9..4a6b6dec6a9 100644 --- a/python/tests/interop/test_interop.py +++ b/python/tests/interop/test_interop.py @@ -41,7 +41,7 @@ def kernel(i: int): kernel(10) otherMod = '''module attributes {quake.mangled_name_map = {__nvqpp__mlirgen__test = "__nvqpp__mlirgen__test_PyKernelEntryPointRewrite"}} { - func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint"} { + func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint", "cudaq-kernel"} { %0 = quake.alloca !quake.veq<2> %1 = quake.extract_ref %0[0] : (!quake.veq<2>) -> !quake.ref quake.h %1 : (!quake.ref) -> () @@ -64,7 +64,7 @@ def callee(q: cudaq.qview): callee.compile() otherMod = '''module attributes {quake.mangled_name_map = {__nvqpp__mlirgen__caller = "__nvqpp__mlirgen__caller_PyKernelEntryPointRewrite"}} { - func.func @__nvqpp__mlirgen__caller(%arg0: !cc.callable<(!quake.veq) -> ()>) attributes {"cudaq-entrypoint"} { + func.func @__nvqpp__mlirgen__caller(%arg0: !cc.callable<(!quake.veq) -> ()>) attributes {"cudaq-entrypoint", "cudaq-kernel"} { %0 = quake.alloca !quake.veq<2> %1 = quake.relax_size %0 : (!quake.veq<2>) -> !quake.veq %2 = cc.callable_func %arg0 : (!cc.callable<(!quake.veq) -> ()>) -> ((!quake.veq) -> ()) diff --git a/python/tests/mlir/adjoint.py b/python/tests/mlir/adjoint.py index 22f8771e8cf..34a9e775f56 100644 --- a/python/tests/mlir/adjoint.py +++ b/python/tests/mlir/adjoint.py @@ -29,12 +29,14 @@ def test_kernel_adjoint_no_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} : () -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () {{.*}}{ # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return @@ -54,14 +56,15 @@ def test_kernel_adjoint_qubit_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } @@ -80,15 +83,16 @@ def test_kernel_adjoint_qreg_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<5> # CHECK: %[[VAL_1:.*]] = quake.relax_size %[[VAL_0]] # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_1]] : (!quake.veq) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.veq) {{.*}}{ # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = quake.veq_size %[[VAL_0]] : (!quake.veq) -> i64 @@ -125,14 +129,14 @@ def test_kernel_adjoint_float_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (f64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_1]] : (!quake.ref) -> () # CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> () @@ -158,14 +162,14 @@ def test_kernel_adjoint_int_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint" # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (i64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_1]] : (!quake.ref) -> () # CHECK: return @@ -187,14 +191,14 @@ def test_kernel_adjoint_list_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}} %[[VAL_0]] : (!cc.stdvec) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> # CHECK: %[[VAL_4:.*]] = cc.cast %[[VAL_2]] : (!cc.ptr>) -> !cc.ptr @@ -231,7 +235,8 @@ def test_sample_adjoint_qubit(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> () @@ -240,8 +245,8 @@ def test_sample_adjoint_qubit(): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } @@ -276,8 +281,8 @@ def test_sample_adjoint_qreg(): # CHECK-LABEL: test_sample_adjoint_qreg -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq[%[[VAL_0]] : i64] @@ -301,8 +306,8 @@ def test_sample_adjoint_qreg(): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.veq) {{.*}}{ # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = quake.veq_size %[[VAL_0]] : (!quake.veq) -> i64 diff --git a/python/tests/mlir/ast_break.py b/python/tests/mlir/ast_break.py index 457ec5076ae..a0b36549864 100644 --- a/python/tests/mlir/ast_break.py +++ b/python/tests/mlir/ast_break.py @@ -27,7 +27,7 @@ def kernel(x: float): # CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 5.000000e+00 : f64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/ast_compute_action.py b/python/tests/mlir/ast_compute_action.py index adaffa09ca5..a59b98e548d 100644 --- a/python/tests/mlir/ast_compute_action.py +++ b/python/tests/mlir/ast_compute_action.py @@ -28,7 +28,7 @@ def compute(): # CHECK-LABEL: func.func @__nvqpp__mlirgen__reflect( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) { +# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/ast_conditionals.py b/python/tests/mlir/ast_conditionals.py index 37ac6d95def..b2445af0372 100644 --- a/python/tests/mlir/ast_conditionals.py +++ b/python/tests/mlir/ast_conditionals.py @@ -27,7 +27,7 @@ def test1(): print(test1) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test1() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test1() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test2(): @@ -40,7 +40,7 @@ def test2(): print(test2) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test2() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test2() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test3(): @@ -53,7 +53,7 @@ def test3(): print(test3) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test3() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test3() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test4(): @@ -66,7 +66,7 @@ def test4(): print(test4) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test4() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test4() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test5(): @@ -79,7 +79,7 @@ def test5(): print(test5) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test5() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test5() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test6(): @@ -93,7 +93,7 @@ def test6(): print(test6) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__test6() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__test6() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { def test_conditional_on_measure(): @@ -108,7 +108,7 @@ def test7(): print(test7) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test7() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test7() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test8(): @@ -120,7 +120,7 @@ def test8(): print(test8) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test8() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test8() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test9(): @@ -132,7 +132,7 @@ def test9(): print(test9) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test9() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test9() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test10(): @@ -144,7 +144,7 @@ def test10(): print(test10) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test10() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test10() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test11(): @@ -157,7 +157,7 @@ def test11(): print(test11) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test11() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test11() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test12(): @@ -171,7 +171,7 @@ def test12(): print(test12) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test12() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test12() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test13(): @@ -186,7 +186,7 @@ def test13(): print(test13) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test13() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test13() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { @cudaq.kernel def test14(): @@ -202,7 +202,7 @@ def test14(): print(test14) - # CHECK-LABEL: func.func @__nvqpp__mlirgen__test14() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { + # CHECK-LABEL: func.func @__nvqpp__mlirgen__test14() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { # leave for gdb debugging diff --git a/python/tests/mlir/ast_continue.py b/python/tests/mlir/ast_continue.py index ce4a290aae7..8c1c03b5ea5 100644 --- a/python/tests/mlir/ast_continue.py +++ b/python/tests/mlir/ast_continue.py @@ -28,7 +28,7 @@ def kernel(x: float): # CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1.000000e+01 : f64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/ast_control_kernel.py b/python/tests/mlir/ast_control_kernel.py index 9d493ebdb4e..508932292ca 100644 --- a/python/tests/mlir/ast_control_kernel.py +++ b/python/tests/mlir/ast_control_kernel.py @@ -27,12 +27,12 @@ def bell(): # CHECK-LABEL: func.func @__nvqpp__mlirgen__applyX( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen__bell() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__bell() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: quake.h %[[VAL_1]] : (!quake.ref) -> () diff --git a/python/tests/mlir/ast_decrementing_range.py b/python/tests/mlir/ast_decrementing_range.py index 16cda2b092d..fa65f383b44 100644 --- a/python/tests/mlir/ast_decrementing_range.py +++ b/python/tests/mlir/ast_decrementing_range.py @@ -25,7 +25,7 @@ def test(q: int, p: int): # CHECK-LABEL: func.func @__nvqpp__mlirgen__test( # CHECK-SAME: %[[VAL_0:.*]]: i64, -# CHECK-SAME: %[[VAL_1:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_1:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_2:.*]] = arith.constant -1 : i64 # CHECK: %[[VAL_3:.*]] = cc.alloca i64 # CHECK: cc.store %[[VAL_0]], %[[VAL_3]] : !cc.ptr diff --git a/python/tests/mlir/ast_elif.py b/python/tests/mlir/ast_elif.py index d7d97e89978..a11b977c336 100644 --- a/python/tests/mlir/ast_elif.py +++ b/python/tests/mlir/ast_elif.py @@ -30,7 +30,7 @@ def cost(thetas: np.ndarray): # can pass 1D ndarray or list # CHECK-LABEL: func.func @__nvqpp__mlirgen__cost( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 diff --git a/python/tests/mlir/ast_for_stdvec.py b/python/tests/mlir/ast_for_stdvec.py index 794e4f1b78a..f75a7a9887d 100644 --- a/python/tests/mlir/ast_for_stdvec.py +++ b/python/tests/mlir/ast_for_stdvec.py @@ -28,7 +28,7 @@ def cost(thetas: np.ndarray): # can pass 1D ndarray or list # CHECK-LABEL: func.func @__nvqpp__mlirgen__cost( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq<4> diff --git a/python/tests/mlir/ast_iterate_loop_init.py b/python/tests/mlir/ast_iterate_loop_init.py index e3bfd8f09bd..80ef4f074e0 100644 --- a/python/tests/mlir/ast_iterate_loop_init.py +++ b/python/tests/mlir/ast_iterate_loop_init.py @@ -25,7 +25,7 @@ def kernel(x: float): # CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 3 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/ast_lambda_tuple_stmts.py b/python/tests/mlir/ast_lambda_tuple_stmts.py index ca2cfad759f..d55c167be95 100644 --- a/python/tests/mlir/ast_lambda_tuple_stmts.py +++ b/python/tests/mlir/ast_lambda_tuple_stmts.py @@ -24,7 +24,7 @@ def reflect(qubits: cudaq.qview): # CHECK-LABEL: func.func @__nvqpp__mlirgen__reflect( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) { +# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/ast_list_comprehension.py b/python/tests/mlir/ast_list_comprehension.py index 74bd29b5d70..b6938e2ff9e 100644 --- a/python/tests/mlir/ast_list_comprehension.py +++ b/python/tests/mlir/ast_list_comprehension.py @@ -23,7 +23,7 @@ def kernel(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 6 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 diff --git a/python/tests/mlir/ast_list_init.py b/python/tests/mlir/ast_list_init.py index 6ce32ca263c..7a8b5aae88c 100644 --- a/python/tests/mlir/ast_list_init.py +++ b/python/tests/mlir/ast_list_init.py @@ -24,7 +24,7 @@ def kernel(): kernel() -# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 4 : i64 diff --git a/python/tests/mlir/ast_list_int.py b/python/tests/mlir/ast_list_int.py index ec3770322f9..52001bbea8f 100644 --- a/python/tests/mlir/ast_list_int.py +++ b/python/tests/mlir/ast_list_int.py @@ -26,9 +26,9 @@ def oracle(register: cudaq.qview, auxillary_qubit: cudaq.qubit, # CHECK-LABEL: func.func @__nvqpp__mlirgen__oracle( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq, -# CHECK-SAME: %[[VAL_1:.*]]: !quake.ref, -# CHECK-SAME: %[[VAL_2:.*]]: !cc.stdvec) { +# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq, +# CHECK-SAME: %[[VAL_1:.*]]: !quake.ref, +# CHECK-SAME: %[[VAL_2:.*]]: !cc.stdvec) # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_5:.*]] = cc.stdvec_size %[[VAL_2]] : (!cc.stdvec) -> i64 diff --git a/python/tests/mlir/ast_qreg_slice.py b/python/tests/mlir/ast_qreg_slice.py index 10bf1140a9f..51992068577 100644 --- a/python/tests/mlir/ast_qreg_slice.py +++ b/python/tests/mlir/ast_qreg_slice.py @@ -50,7 +50,7 @@ def slice(): slice() -# CHECK-LABEL: func.func @__nvqpp__mlirgen__slice() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__slice() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 3 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 4 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : i64 diff --git a/python/tests/mlir/ast_veq_tuple_target.py b/python/tests/mlir/ast_veq_tuple_target.py index 4980ef4e247..79d143f72b4 100644 --- a/python/tests/mlir/ast_veq_tuple_target.py +++ b/python/tests/mlir/ast_veq_tuple_target.py @@ -24,7 +24,7 @@ def foo(): print(foo) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__foo() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__foo() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<3> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<3>) -> !quake.ref # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_0]][1] : (!quake.veq<3>) -> !quake.ref diff --git a/python/tests/mlir/ast_while_loop.py b/python/tests/mlir/ast_while_loop.py index d19532ed8bd..bb553d5a6f6 100644 --- a/python/tests/mlir/ast_while_loop.py +++ b/python/tests/mlir/ast_while_loop.py @@ -27,7 +27,7 @@ def cost(): # cost() -# CHECK-LABEL: func.func @__nvqpp__mlirgen__cost() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__cost() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = arith.constant 1 : i64 # CHECK: %[[VAL_1:.*]] = arith.constant 3.1415926535897931 : f64 # CHECK: %[[VAL_2:.*]] = arith.constant 0 : i64 @@ -65,7 +65,7 @@ def cost(): print(cost) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__cost() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__cost() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 3.1415926535897931 : f64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 14 : i64 diff --git a/python/tests/mlir/bool_var_scope.py b/python/tests/mlir/bool_var_scope.py index 55560d778ab..b52bf99cf86 100644 --- a/python/tests/mlir/bool_var_scope.py +++ b/python/tests/mlir/bool_var_scope.py @@ -22,7 +22,7 @@ def kernel(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = arith.constant true # CHECK: %[[VAL_1:.*]] = arith.constant 42 : i64 # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<2> diff --git a/python/tests/mlir/bug_1775.py b/python/tests/mlir/bug_1775.py index 089be3ca1d3..ed65e3f06ea 100644 --- a/python/tests/mlir/bug_1775.py +++ b/python/tests/mlir/bug_1775.py @@ -44,7 +44,7 @@ def test(): 'false_res') and '1' not in result.get_register_counts('false_res') -# CHECK-LABEL: func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { # CHECK: %[[VAL_0:.*]] = arith.constant true # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_2:.*]] = quake.mz %[[VAL_1]] name "res" : (!quake.ref) -> !quake.measure diff --git a/python/tests/mlir/bug_1777.py b/python/tests/mlir/bug_1777.py index f5144eee281..fd5b2f75385 100644 --- a/python/tests/mlir/bug_1777.py +++ b/python/tests/mlir/bug_1777.py @@ -31,7 +31,7 @@ def test(): print(result) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { # CHECK: %[[VAL_0:.*]] = arith.constant false # CHECK: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK: %[[VAL_2:.*]] = arith.constant 0 : i64 diff --git a/python/tests/mlir/bug_1871.py b/python/tests/mlir/bug_1871.py index 5d6ec68b43b..cab605f65a6 100644 --- a/python/tests/mlir/bug_1871.py +++ b/python/tests/mlir/bug_1871.py @@ -39,7 +39,7 @@ def kernel(theta: float): # CHECK-LABEL: func.func @__nvqpp__mlirgen__my_func( # CHECK-SAME: %[[VAL_0:.*]]: !quake.ref, -# CHECK-SAME: %[[VAL_1:.*]]: f64) { +# CHECK-SAME: %[[VAL_1:.*]]: f64) # CHECK: %[[VAL_2:.*]] = cc.alloca f64 # CHECK: cc.store %[[VAL_1]], %[[VAL_2]] : !cc.ptr # CHECK: %[[VAL_3:.*]] = cc.load %[[VAL_2]] : !cc.ptr @@ -51,7 +51,7 @@ def kernel(theta: float): # CHECK-LABEL: func.func @__nvqpp__mlirgen__adj_func( # CHECK-SAME: %[[VAL_0:.*]]: !quake.ref, -# CHECK-SAME: %[[VAL_1:.*]]: f64) { +# CHECK-SAME: %[[VAL_1:.*]]: f64) # CHECK: %[[VAL_2:.*]] = cc.alloca f64 # CHECK: cc.store %[[VAL_1]], %[[VAL_2]] : !cc.ptr # CHECK: %[[VAL_3:.*]] = cc.load %[[VAL_2]] : !cc.ptr @@ -60,7 +60,7 @@ def kernel(theta: float): # CHECK: } # CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_1:.*]] = cc.alloca f64 # CHECK: cc.store %[[VAL_0]], %[[VAL_1]] : !cc.ptr # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.ref diff --git a/python/tests/mlir/bug_1875.py b/python/tests/mlir/bug_1875.py index 1d1fee00e86..3e3ff78122b 100644 --- a/python/tests/mlir/bug_1875.py +++ b/python/tests/mlir/bug_1875.py @@ -35,7 +35,7 @@ def kernel_break(): assert 'a' in result.register_names -# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel_break() attributes {"cudaq-entrypoint", qubitMeasurementFeedback = true} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel_break() attributes {"cudaq-entrypoint", "cudaq-kernel", qubitMeasurementFeedback = true} { # CHECK: %[[VAL_0:.*]] = arith.constant false # CHECK: %[[VAL_1:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.ref diff --git a/python/tests/mlir/builderBug_332.py b/python/tests/mlir/builderBug_332.py index 387a54474b5..bfd7c7e88d7 100644 --- a/python/tests/mlir/builderBug_332.py +++ b/python/tests/mlir/builderBug_332.py @@ -27,17 +27,21 @@ def test_recursive_calls(): print(kernel3) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<1>) -> !quake.ref # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_1]]) : (!quake.ref) -> () # CHECK: return # CHECK: } -# CHECK: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%arg0: !quake.ref) { + +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%arg0: !quake.ref) {{.*}}{ # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%arg0) : (!quake.ref) -> () # CHECK: return # CHECK: } -# CHECK: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%arg0: !quake.ref) { + +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%arg0: !quake.ref) {{.*}}{ # CHECK: return # CHECK: } -# CHECK:} diff --git a/python/tests/mlir/call.py b/python/tests/mlir/call.py index df19e4e8bec..1eb480c963f 100644 --- a/python/tests/mlir/call.py +++ b/python/tests/mlir/call.py @@ -29,12 +29,14 @@ def test_kernel_apply_call_no_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() : () -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () {{.*}}{ # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return @@ -55,14 +57,15 @@ def test_kernel_apply_call_qubit_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!quake.ref) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } @@ -82,15 +85,16 @@ def test_kernel_apply_call_qreg_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<5> # CHECK: %[[VAL_1:.*]] = quake.relax_size %[[VAL_0]] : (!quake.veq<5>) -> !quake.veq # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_1]]) : (!quake.veq) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.veq) {{.*}}{ # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = quake.veq_size %[[VAL_0]] : (!quake.veq) -> i64 @@ -125,14 +129,14 @@ def test_kernel_apply_call_float_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (f64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> () # CHECK: return @@ -155,14 +159,14 @@ def test_kernel_apply_call_int_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint" # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (i64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: return # CHECK: } @@ -182,14 +186,14 @@ def test_kernel_apply_call_list_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%[[VAL_0]]) : (!cc.stdvec) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> # CHECK: %[[VAL_4:.*]] = cc.cast %[[VAL_2]] : (!cc.ptr>) -> !cc.ptr diff --git a/python/tests/mlir/conditional.py b/python/tests/mlir/conditional.py index 79f6dd37abf..d2d46e29343 100644 --- a/python/tests/mlir/conditional.py +++ b/python/tests/mlir/conditional.py @@ -47,7 +47,7 @@ def test_function(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 @@ -114,7 +114,7 @@ def then_function(): # CHECK-LABEL: test_kernel_conditional_with_sample -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: %[[VAL_1:.*]] = quake.mz %[[VAL_0]] name "" : (!quake.ref) -> !quake.measure @@ -157,7 +157,7 @@ def then(): # CHECK-LABEL: test_cif_extract_ref_bug -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: quake.x %[[VAL_1]] : (!quake.ref) -> () diff --git a/python/tests/mlir/control.py b/python/tests/mlir/control.py index 176a4d8cb87..feace59c9f8 100644 --- a/python/tests/mlir/control.py +++ b/python/tests/mlir/control.py @@ -36,13 +36,15 @@ def test_kernel_control_no_args(qubit_count): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_0]]] : (!quake.veq<1>) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () {{.*}}{ # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<1> @@ -62,13 +64,15 @@ def test_kernel_control_no_args(qubit_count): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<5> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_0]]] : (!quake.veq<5>) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () {{.*}}{ # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 5 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 @@ -110,29 +114,29 @@ def test_kernel_control_float_args(qubit_count): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<1>, f64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<5>, f64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> () # CHECK: return @@ -158,28 +162,28 @@ def test_kernel_control_int_args(qubit_count): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<1>, i64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1> # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<5>, i64) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: i64) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5> # CHECK: return # CHECK: } @@ -205,15 +209,15 @@ def test_kernel_control_list_args(qubit_count): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<1> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<1>, !cc.stdvec) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> # CHECK: %[[VAL_4:.*]] = cc.cast %[[VAL_2]] : (!cc.ptr>) -> !cc.ptr @@ -222,15 +226,15 @@ def test_kernel_control_list_args(qubit_count): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<5> # CHECK: quake.apply @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}{{\[}}%[[VAL_1]]] %[[VAL_0]] : (!quake.veq<5>, !cc.stdvec) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) {{.*}}{ # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_2:.*]] = cc.stdvec_data %[[VAL_0]] : (!cc.stdvec) -> !cc.ptr> # CHECK: %[[VAL_4:.*]] = cc.cast %[[VAL_2]] : (!cc.ptr>) -> !cc.ptr @@ -276,7 +280,8 @@ def test_sample_control_qubit_args(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %{{[0-2]}} = quake.alloca !quake.ref # CHECK: %{{[0-2]}} = quake.alloca !quake.ref # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%{{[0-2]}}) : (!quake.ref) -> () @@ -287,8 +292,8 @@ def test_sample_control_qubit_args(): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } @@ -332,7 +337,8 @@ def test_sample_control_qreg_args(): # CHECK-LABEL: test_sample_control_qreg_args -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_5:.*]] = quake.alloca !quake.veq<2> # CHECK-DAG: %[[VAL_6:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_7:.*]] = quake.extract_ref %[[VAL_5]][0] : (!quake.veq<2>) -> !quake.ref @@ -343,8 +349,8 @@ def test_sample_control_qreg_args(): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } @@ -389,7 +395,8 @@ def test_sample_apply_call_control(): # CHECK-LABEL: test_sample_apply_call_control -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: () attributes {"cudaq-entrypoint" # CHECK: %{{[0-2]}} = quake.alloca !quake.ref # CHECK: %{{[0-2]}} = quake.alloca !quake.ref # CHECK: call @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%{{[0-2]}}) : (!quake.ref) -> () @@ -400,14 +407,14 @@ def test_sample_apply_call_control(): # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.x %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: %[[VAL_0:.*]]: !quake.ref) {{.*}}{ # CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> () # CHECK: return # CHECK: } diff --git a/python/tests/mlir/control_toffoli.py b/python/tests/mlir/control_toffoli.py index 6875c50d9e0..f3aff17bd3b 100644 --- a/python/tests/mlir/control_toffoli.py +++ b/python/tests/mlir/control_toffoli.py @@ -27,7 +27,7 @@ def toffoli(): print(toffoli) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__toffoli() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__toffoli() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<3> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<3>) -> !quake.ref # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_0]][2] : (!quake.veq<3>) -> !quake.ref diff --git a/python/tests/mlir/ctrl_gates.py b/python/tests/mlir/ctrl_gates.py index be5392fe45e..e804c20b005 100644 --- a/python/tests/mlir/ctrl_gates.py +++ b/python/tests/mlir/ctrl_gates.py @@ -43,7 +43,7 @@ def test_kernel_2q_ctrl(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_0]][1] : (!quake.veq<2>) -> !quake.ref @@ -84,7 +84,7 @@ def test_kernel_ctrl_rotation(): # CHECK-LABEL: test_kernel_ctrl_rotation # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 3.000000e+00 : f64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1.000000e+00 : f64 @@ -139,7 +139,7 @@ def test_kernel_multi_ctrl(): # CHECK-LABEL: test_kernel_multi_ctrl -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<3> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<3>) -> !quake.ref # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_0]][1] : (!quake.veq<3>) -> !quake.ref @@ -178,7 +178,7 @@ def test_kernel_ctrl_register(): # CHECK-LABEL: test_kernel_ctrl_register -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_0:.*]] = quake.alloca !quake.veq<3> # CHECK-DAG: %[[VAL_1:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_1]][0] : (!quake.veq<2>) -> !quake.ref @@ -226,7 +226,7 @@ def test_kernel_rotation_ctrl_register(): # CHECK-LABEL: test_kernel_rotation_ctrl_register # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 3 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 3.000000e+00 : f64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f64 @@ -291,7 +291,7 @@ def test_ctrl_swap(): # CHECK-LABEL: test_ctrl_swap -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK-DAG: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK-DAG: %[[VAL_2:.*]] = quake.alloca !quake.ref diff --git a/python/tests/mlir/custom_op_builder.py b/python/tests/mlir/custom_op_builder.py index 9e8f18573f6..32fced02a82 100644 --- a/python/tests/mlir/custom_op_builder.py +++ b/python/tests/mlir/custom_op_builder.py @@ -43,7 +43,7 @@ def register_custom_operations(matrix): counts = cudaq.sample(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = arith.constant 2 : i64 # CHECK: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK: %[[VAL_2:.*]] = arith.constant 0 : i64 diff --git a/python/tests/mlir/custom_operation.py b/python/tests/mlir/custom_operation.py index 5a6605537d0..675b8ee68be 100644 --- a/python/tests/mlir/custom_operation.py +++ b/python/tests/mlir/custom_operation.py @@ -27,7 +27,7 @@ def bell(): print(bell) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__bell() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__bell() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: quake.custom_op @__nvqpp__mlirgen__custom_h_generator_1.rodata %[[VAL_1]] : (!quake.ref) -> () @@ -56,7 +56,7 @@ def kernel(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__kernel() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> () # CHECK: quake.custom_op @__nvqpp__mlirgen__custom_s_generator_1.rodata %[[VAL_0]] : (!quake.ref) -> () diff --git a/python/tests/mlir/fix_1130_bug_bad_arg_checking.py b/python/tests/mlir/fix_1130_bug_bad_arg_checking.py index c3a78fafa0e..5c8f53cf271 100644 --- a/python/tests/mlir/fix_1130_bug_bad_arg_checking.py +++ b/python/tests/mlir/fix_1130_bug_bad_arg_checking.py @@ -23,8 +23,8 @@ def test_bad_arg_checking_fix_1130(): print(qernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_ +# CHECK-SAME: (%[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_1]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: %[[VAL_3:.*]] = quake.extract_ref %[[VAL_1]][1] : (!quake.veq<2>) -> !quake.ref @@ -32,11 +32,12 @@ def test_bad_arg_checking_fix_1130(): # CHECK: return # CHECK: } -# CHECK: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}(%arg0: !quake.ref, %arg1: !quake.ref, %arg2: f64) { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel +# CHECK-SAME: (%[[arg0:.*]]: !quake.ref, %[[arg1:.*]]: !quake.ref, %[[arg2:.*]]: f64) # CHECK: %cst = arith.constant 2.000000e+00 : f64 -# CHECK: quake.x [%arg0] %arg1 : (!quake.ref, !quake.ref) -> () -# CHECK: %0 = arith.mulf {{%arg2, %cst|%cst, %arg2}} : f64 -# CHECK: quake.rz (%0) %arg1 : (f64, !quake.ref) -> () -# CHECK: quake.x [%arg0] %arg1 : (!quake.ref, !quake.ref) -> () +# CHECK: quake.x [%[[arg0]]] %[[arg1]] : (!quake.ref, !quake.ref) -> () +# CHECK: %0 = arith.mulf %{{.*}}, %{{.*}} : f64 +# CHECK: quake.rz (%0) %[[arg1]] : (f64, !quake.ref) -> () +# CHECK: quake.x [%[[arg0]]] %[[arg1]] : (!quake.ref, !quake.ref) -> () # CHECK: return # CHECK: } diff --git a/python/tests/mlir/float.py b/python/tests/mlir/float.py index da737d9869d..a76c041a37c 100644 --- a/python/tests/mlir/float.py +++ b/python/tests/mlir/float.py @@ -30,7 +30,7 @@ def test_make_kernel_float(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: return # CHECK: } diff --git a/python/tests/mlir/ghz.py b/python/tests/mlir/ghz.py index b91d9aed695..3129bf0734f 100644 --- a/python/tests/mlir/ghz.py +++ b/python/tests/mlir/ghz.py @@ -23,7 +23,7 @@ def ghz(N: int): print(ghz) # CHECK-LABEL: func.func @__nvqpp__mlirgen__ghz( - # CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { + # CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = cc.alloca i64 @@ -63,7 +63,7 @@ def simple(numQubits: int): # CHECK-LABEL: func.func @__nvqpp__mlirgen__simple( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i64 diff --git a/python/tests/mlir/list.py b/python/tests/mlir/list.py index 5d6f054c115..c85f19085c0 100644 --- a/python/tests/mlir/list.py +++ b/python/tests/mlir/list.py @@ -30,7 +30,7 @@ def test_make_kernel_list(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec) attributes {"cudaq-entrypoint" # CHECK: return # CHECK: } diff --git a/python/tests/mlir/measure.py b/python/tests/mlir/measure.py index 9065727fc76..c1bb76b3323 100644 --- a/python/tests/mlir/measure.py +++ b/python/tests/mlir/measure.py @@ -37,7 +37,7 @@ def test_kernel_measure_1q(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_3:.*]] = quake.extract_ref %[[VAL_2]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_2]][1] : (!quake.veq<2>) -> !quake.ref @@ -68,7 +68,7 @@ def test_kernel_measure_qreg(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<3> # CHECK: %[[VAL_1:.*]] = quake.mx %[[VAL_0]] name "" : (!quake.veq<3>) -> !cc.stdvec # CHECK: %[[VAL_2:.*]] = quake.my %[[VAL_0]] name "" : (!quake.veq<3>) -> !cc.stdvec diff --git a/python/tests/mlir/mixed_args.py b/python/tests/mlir/mixed_args.py index 27648192706..e82c0d9c06f 100644 --- a/python/tests/mlir/mixed_args.py +++ b/python/tests/mlir/mixed_args.py @@ -30,7 +30,7 @@ def test_make_kernel_mixed_args(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( # CHECK-SAME: %[[VAL_0:.*]]: !cc.stdvec, -# CHECK-SAME: %[[VAL_1:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_1:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: return # CHECK: } diff --git a/python/tests/mlir/multi_qubit.py b/python/tests/mlir/multi_qubit.py index 2a7c4b13593..9692b8e96cb 100644 --- a/python/tests/mlir/multi_qubit.py +++ b/python/tests/mlir/multi_qubit.py @@ -43,7 +43,7 @@ def test_kernel_2q(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_3:.*]] = quake.extract_ref %[[VAL_2]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_2]][1] : (!quake.veq<2>) -> !quake.ref @@ -82,7 +82,7 @@ def test_kernel_3q(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_3:.*]] = quake.alloca !quake.veq<3> # CHECK: %[[VAL_4:.*]] = quake.extract_ref %[[VAL_3]][0] : (!quake.veq<3>) -> !quake.ref # CHECK: %[[VAL_5:.*]] = quake.extract_ref %[[VAL_3]][1] : (!quake.veq<3>) -> !quake.ref diff --git a/python/tests/mlir/multiple_floats.py b/python/tests/mlir/multiple_floats.py index ab921754ad7..3adb76a8167 100644 --- a/python/tests/mlir/multiple_floats.py +++ b/python/tests/mlir/multiple_floats.py @@ -30,7 +30,7 @@ def test_make_kernel_multiple_floats(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( # CHECK-SAME: %[[VAL_0:.*]]: f64, -# CHECK: %[[VAL_1:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK: %[[VAL_1:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: return # CHECK: } diff --git a/python/tests/mlir/no_input.py b/python/tests/mlir/no_input.py index 96ad501989e..0ca8dee28a4 100644 --- a/python/tests/mlir/no_input.py +++ b/python/tests/mlir/no_input.py @@ -30,7 +30,7 @@ def test_make_kernel_no_input(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK-NEXT: return # CHECK-NEXT: } diff --git a/python/tests/mlir/one_qubit.py b/python/tests/mlir/one_qubit.py index 16ea5955b70..6e7f432c4f9 100644 --- a/python/tests/mlir/one_qubit.py +++ b/python/tests/mlir/one_qubit.py @@ -42,7 +42,7 @@ def test_kernel_non_param_1q(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %0 = quake.alloca !quake.veq<1> # CHECK: %[[VAL_0:.*]] = quake.extract_ref %0[0] : (!quake.veq<1>) -> !quake.ref # CHECK: quake.h %[[VAL_0]] : (!quake.ref) -> () @@ -83,7 +83,7 @@ def test_kernel_param_1q(): # CHECK-LABEL: test_kernel_param_1q # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK: %0 = quake.alloca !quake.veq<1> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %0[0] : (!quake.veq<1>) -> !quake.ref # CHECK: quake.rx (%[[VAL_0]]) %[[VAL_1]] : (f64, !quake.ref) -> () diff --git a/python/tests/mlir/qalloc.py b/python/tests/mlir/qalloc.py index d2a68737bba..90ca53b091b 100644 --- a/python/tests/mlir/qalloc.py +++ b/python/tests/mlir/qalloc.py @@ -24,7 +24,7 @@ def test_kernel_qalloc_empty(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: return # CHECK: } @@ -42,7 +42,7 @@ def test_kernel_qalloc_qreg(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<10> # CHECK: return # CHECK: } @@ -60,7 +60,7 @@ def test_kernel_qalloc_qreg_keyword(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq<10> # CHECK: return # CHECK: } @@ -78,7 +78,7 @@ def test_kernel_qalloc_quake_val(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint" # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.veq[%[[VAL_0]] : i64] # CHECK: return # CHECK: } @@ -96,7 +96,7 @@ def test_kernel_qalloc_qubit(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1> # CHECK: return # CHECK: } @@ -113,7 +113,7 @@ def test_kernel_qalloc_qubit_keyword(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<1> # CHECK: return # CHECK: } diff --git a/python/tests/mlir/qft.py b/python/tests/mlir/qft.py index 2ec4767052c..1429bdd06f9 100644 --- a/python/tests/mlir/qft.py +++ b/python/tests/mlir/qft.py @@ -33,7 +33,7 @@ def iqft(qubits: cudaq.qview): # CHECK-LABEL: func.func @__nvqpp__mlirgen__iqft( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) { +# CHECK-SAME: %[[VAL_0:.*]]: !quake.veq) # CHECK-DAG: %[[VAL_1:.*]] = arith.constant -3.1415926535897931 : f64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant -1 : i64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/qreg_apply.py b/python/tests/mlir/qreg_apply.py index cb32afb099c..3a992401091 100644 --- a/python/tests/mlir/qreg_apply.py +++ b/python/tests/mlir/qreg_apply.py @@ -37,7 +37,7 @@ def test_kernel_qreg(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_0:.*]] = arith.constant 2 : i64 # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 diff --git a/python/tests/mlir/qreg_iterable.py b/python/tests/mlir/qreg_iterable.py index 47ba2d3d646..8ece9fafe23 100644 --- a/python/tests/mlir/qreg_iterable.py +++ b/python/tests/mlir/qreg_iterable.py @@ -23,7 +23,7 @@ def foo(N: int): # CHECK-LABEL: func.func @__nvqpp__mlirgen__foo( -# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: i64) attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 1 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : i64 # CHECK: %[[VAL_3:.*]] = cc.alloca i64 diff --git a/python/tests/mlir/quantum_type.py b/python/tests/mlir/quantum_type.py index 3cee0f30cd1..c48cad50cbb 100644 --- a/python/tests/mlir/quantum_type.py +++ b/python/tests/mlir/quantum_type.py @@ -53,21 +53,21 @@ def run(): # NAUGHTY-LABEL: func.func @__nvqpp__mlirgen__logicalH( -# NAUGHTY-SAME: %[[VAL_0:.*]]: !quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) { +# NAUGHTY-SAME: %[[VAL_0:.*]]: !quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) # NAUGHTY: %[[VAL_3:.*]] = quake.get_member %[[VAL_0]][0] : (!quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) -> !quake.veq # NAUGHTY: %[[VAL_4:.*]] = quake.veq_size %[[VAL_3]] : (!quake.veq) -> i64 # NAUGHTY: return # NAUGHTY: } # NICE-LABEL: func.func @__nvqpp__mlirgen__logicalX( -# NICE-SAME: %[[VAL_0:.*]]: !quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) { +# NICE-SAME: %[[VAL_0:.*]]: !quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) # NICE: %[[VAL_3:.*]] = quake.get_member %[[VAL_0]][1] : (!quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) -> !quake.veq # NICE: %[[VAL_4:.*]] = quake.veq_size %[[VAL_3]] : (!quake.veq) -> i64 # NICE: return # NICE: } # CHECK-LABEL: func.func @__nvqpp__mlirgen__logicalZ( -# CHECK-SAME: %[[VAL_0:.*]]: !quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) { +# CHECK-SAME: %[[VAL_0:.*]]: !quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) # CHECK: %[[VAL_3:.*]] = quake.get_member %[[VAL_0]][2] : (!quake.struq<"patch": !quake.veq, !quake.veq, !quake.veq>) -> !quake.veq # CHECK: %[[VAL_4:.*]] = quake.veq_size %[[VAL_3]] : (!quake.veq) -> i64 # CHECK: return diff --git a/python/tests/mlir/rotation_gates.py b/python/tests/mlir/rotation_gates.py index 5a6caf6c24e..785160c31f7 100644 --- a/python/tests/mlir/rotation_gates.py +++ b/python/tests/mlir/rotation_gates.py @@ -39,7 +39,7 @@ def test_control_list_rotation(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 4.000000e+00 : f64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 3.000000e+00 : f64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2.000000e+00 : f64 @@ -80,7 +80,7 @@ def test_rotation_qreg(): # CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}( -# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint"} { +# CHECK-SAME: %[[VAL_0:.*]]: f64) attributes {"cudaq-entrypoint" # CHECK-DAG: %[[VAL_1:.*]] = arith.constant 3 : i64 # CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1.000000e+00 : f64 # CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : i64 diff --git a/python/tests/mlir/swap.py b/python/tests/mlir/swap.py index 6d8d1123719..aeae75340b6 100644 --- a/python/tests/mlir/swap.py +++ b/python/tests/mlir/swap.py @@ -33,7 +33,7 @@ def test_swap_2q(): print(kernel) -# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen____nvqppBuilderKernel_{{.*}}() attributes {"cudaq-entrypoint" # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.veq<2> # CHECK: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<2>) -> !quake.ref # CHECK: %[[VAL_2:.*]] = quake.extract_ref %[[VAL_0]][1] : (!quake.veq<2>) -> !quake.ref diff --git a/python/tests/mlir/tuple_assign.py b/python/tests/mlir/tuple_assign.py index d3083e4872b..2703565199a 100644 --- a/python/tests/mlir/tuple_assign.py +++ b/python/tests/mlir/tuple_assign.py @@ -22,7 +22,7 @@ def test(): print(test) -# CHECK-LABEL: func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint"} { +# CHECK-LABEL: func.func @__nvqpp__mlirgen__test() attributes {"cudaq-entrypoint", "cudaq-kernel"} { # CHECK: %[[VAL_0:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_1:.*]] = quake.alloca !quake.ref # CHECK: %[[VAL_2:.*]] = quake.alloca !quake.ref diff --git a/python/utils/OpaqueArguments.h b/python/utils/OpaqueArguments.h index dd76e6fcc52..26389bbf2eb 100644 --- a/python/utils/OpaqueArguments.h +++ b/python/utils/OpaqueArguments.h @@ -12,6 +12,7 @@ #include "common/FmtCore.h" #include "cudaq/Optimizer/Builder/Runtime.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/Dialect/CC/CCTypes.h" #include "cudaq/builder/kernel_builder.h" #include "cudaq/qis/pauli_word.h" diff --git a/runtime/common/RuntimeMLIRCommonImpl.h b/runtime/common/RuntimeMLIRCommonImpl.h index 657de8f4cf8..63f43488b24 100644 --- a/runtime/common/RuntimeMLIRCommonImpl.h +++ b/runtime/common/RuntimeMLIRCommonImpl.h @@ -19,6 +19,7 @@ #include "cudaq/Optimizer/CodeGen/Pipelines.h" #include "cudaq/Optimizer/CodeGen/QIRAttributeNames.h" #include "cudaq/Optimizer/CodeGen/QIRFunctionNames.h" +#include "cudaq/Optimizer/CodeGen/QIROpaqueStructTypes.h" #include "cudaq/Optimizer/Dialect/CC/CCDialect.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeDialect.h" #include "cudaq/Optimizer/Dialect/Quake/QuakeOps.h" @@ -678,7 +679,7 @@ mlir::ExecutionEngine *createQIRJITEngine(mlir::ModuleOp &moduleOp, if (containsWireSet) cudaq::opt::addWiresetToProfileQIRPipeline(pm, convertTo); else - cudaq::opt::commonPipelineConvertToQIR(pm, convertTo); + cudaq::opt::commonPipelineConvertToQIR(pm, "qir", convertTo); auto enablePrintMLIREachPass = getEnvBool("CUDAQ_MLIR_PRINT_EACH_PASS", false); diff --git a/runtime/cudaq/builder/kernel_builder.cpp b/runtime/cudaq/builder/kernel_builder.cpp index 6bebbf72786..c5a8a15c82e 100644 --- a/runtime/cudaq/builder/kernel_builder.cpp +++ b/runtime/cudaq/builder/kernel_builder.cpp @@ -887,6 +887,8 @@ void tagEntryPoint(ImplicitLocOpBuilder &builder, ModuleOp &module, module.walk([&](func::FuncOp function) { if (function.empty()) return WalkResult::advance(); + if (!function->hasAttr(cudaq::kernelAttrName)) + function->setAttr(cudaq::kernelAttrName, builder.getUnitAttr()); if (!function->hasAttr(cudaq::entryPointAttrName) && !hasAnyQubitTypes(function.getFunctionType()) && (symbolName.empty() || function.getSymName().equals(symbolName))) diff --git a/runtime/nvqir/NVQIR.cpp b/runtime/nvqir/NVQIR.cpp index 75ac9188dd3..f06c967e119 100644 --- a/runtime/nvqir/NVQIR.cpp +++ b/runtime/nvqir/NVQIR.cpp @@ -127,12 +127,14 @@ Array *vectorSizetToArray(std::vector &idxs) { /// @brief Utility function mapping a QIR Array pointer to a vector of ids std::vector arrayToVectorSizeT(Array *arr) { + assert(arr && "array must not be null"); std::vector ret; - for (std::size_t i = 0; i < arr->size(); i++) { + const auto arrSize = arr->size(); + for (std::size_t i = 0; i < arrSize; ++i) { auto arrayPtr = (*arr)[i]; Qubit *idxVal = *reinterpret_cast(arrayPtr); if (qubitPtrIsIndex) - ret.push_back((intptr_t)idxVal); + ret.push_back(reinterpret_cast(idxVal)); else ret.push_back(idxVal->idx); } @@ -143,7 +145,7 @@ std::vector arrayToVectorSizeT(Array *arr) { std::size_t qubitToSizeT(Qubit *q) { if (qubitPtrIsIndex) return (intptr_t)q; - + assert(q && "qubit must not be null"); return q->idx; } @@ -257,7 +259,7 @@ void __quantum__rt__resetExecutionContext() { } /// @brief QIR function for allocated a qubit array -Array *__quantum__rt__qubit_allocate_array(uint64_t numQubits) { +Array *__quantum__rt__qubit_allocate_array(std::uint64_t numQubits) { ScopedTraceWithContext("NVQIR::qubit_allocate_array", numQubits); __quantum__rt__initialize(0, nullptr); auto qubitIdxs = @@ -266,10 +268,10 @@ Array *__quantum__rt__qubit_allocate_array(uint64_t numQubits) { } Array *__quantum__rt__qubit_allocate_array_with_state_complex32( - uint64_t numQubits, std::complex *data); + std::uint64_t numQubits, std::complex *data); Array *__quantum__rt__qubit_allocate_array_with_state_complex64( - uint64_t numQubits, std::complex *data) { + std::uint64_t numQubits, std::complex *data) { ScopedTraceWithContext("NVQIR::qubit_allocate_array_with_data_complex64", numQubits); __quantum__rt__initialize(0, nullptr); @@ -284,8 +286,9 @@ Array *__quantum__rt__qubit_allocate_array_with_state_complex64( return vectorSizetToArray(qubitIdxs); } -Array *__quantum__rt__qubit_allocate_array_with_state_fp64(uint64_t numQubits, - double *data) { +Array * +__quantum__rt__qubit_allocate_array_with_state_fp64(std::uint64_t numQubits, + double *data) { ScopedTraceWithContext("NVQIR::qubit_allocate_array_with_data_fp64", numQubits); if (nvqir::getCircuitSimulatorInternal()->isDoublePrecision()) { @@ -314,7 +317,7 @@ Array *__quantum__rt__qubit_allocate_array_with_state_ptr( } Array * -__quantum__rt__qubit_allocate_array_with_cudaq_state_ptr(int _, +__quantum__rt__qubit_allocate_array_with_cudaq_state_ptr(std::uint64_t, cudaq::state *state) { if (!state) throw std::invalid_argument("[NVQIR] Invalid state encountered " @@ -610,10 +613,16 @@ void __quantum__rt__result_record_output(Result *r, int8_t *name) { reinterpret_cast(name)); } +static std::vector safeArrayToVectorSizeT(Array *arr) { + if (!arr) + return {}; + return arrayToVectorSizeT(arr); +} + void __quantum__qis__custom_unitary(std::complex *unitary, Array *controls, Array *targets, const char *name) { - auto ctrlsVec = arrayToVectorSizeT(controls); + auto ctrlsVec = safeArrayToVectorSizeT(controls); auto tgtsVec = arrayToVectorSizeT(targets); auto numQubits = tgtsVec.size(); if (numQubits >= 64) @@ -629,8 +638,7 @@ void __quantum__qis__custom_unitary(std::complex *unitary, void __quantum__qis__custom_unitary__adj(std::complex *unitary, Array *controls, Array *targets, const char *name) { - - auto ctrlsVec = arrayToVectorSizeT(controls); + auto ctrlsVec = safeArrayToVectorSizeT(controls); auto tgtsVec = arrayToVectorSizeT(targets); auto numQubits = tgtsVec.size(); if (numQubits >= 64) @@ -1001,6 +1009,38 @@ static void commonInvokeWithRotationsControlsTargets( } } +void generalizedInvokeWithRotationsControlsTargets( + std::size_t numRotationOperands, std::size_t numControlArrayOperands, + std::size_t numControlQubitOperands, std::size_t numTargetOperands, + void (*QISFunction)(...), ...) { + const std::size_t totalControls = + numControlArrayOperands + numControlQubitOperands; + double parameters[numRotationOperands]; + std::size_t arrayAndLength[totalControls]; + Qubit *controls[totalControls]; + Qubit *targets[numTargetOperands]; + std::size_t i; + va_list args; + va_start(args, QISFunction); + for (i = 0; i < numRotationOperands; ++i) + parameters[i] = va_arg(args, double); + for (i = 0; i < numControlArrayOperands; ++i) { + arrayAndLength[i] = va_arg(args, std::size_t); + controls[i] = va_arg(args, Qubit *); + } + for (i = 0; i < numControlQubitOperands; ++i) { + arrayAndLength[i] = 0; + controls[numControlArrayOperands + i] = va_arg(args, Qubit *); + } + for (i = 0; i < numTargetOperands; ++i) + targets[i] = va_arg(args, Qubit *); + va_end(args); + + commonInvokeWithRotationsControlsTargets( + numRotationOperands, parameters, totalControls, arrayAndLength, controls, + numTargetOperands, targets, reinterpret_cast(QISFunction)); +} + /// @brief Utility function used by Quake->QIR to invoke a QIR QIS function /// with a variadic list of control qubits. void invokeWithControlQubits(const std::size_t numControlOperands, diff --git a/targettests/execution/qir_string_labels.cpp b/targettests/execution/qir_string_labels.cpp index a0ebc86a2f2..aa109e4397b 100644 --- a/targettests/execution/qir_string_labels.cpp +++ b/targettests/execution/qir_string_labels.cpp @@ -6,11 +6,13 @@ * the terms of the Apache License 2.0 which accompanies this distribution. * ******************************************************************************/ -// Note: change |& to 2>&1 if running in bash -// RUN: nvq++ %cpp_std -v %s -o %t --target quantinuum --emulate && CUDAQ_DUMP_JIT_IR=1 %t |& FileCheck %s -// RUN: nvq++ %cpp_std -v %s -o %t --target ionq --emulate && CUDAQ_DUMP_JIT_IR=1 %t |& FileCheck --check-prefix IONQ %s +// clang-format off +// RUN: nvq++ %cpp_std -v %s -o %t --target quantinuum --emulate && CUDAQ_DUMP_JIT_IR=1 %t |& FileCheck --check-prefixes=CHECK,QUANTINUUM %s +// RUN: nvq++ %cpp_std -v %s -o %t --target ionq --emulate && CUDAQ_DUMP_JIT_IR=1 %t |& FileCheck --check-prefixes=CHECK,IONQ %s // RUN: nvq++ -std=c++17 --enable-mlir %s -o %t -// Note: iqm not currently tested because it does not currently use QIR +// clang-format on + +// Note: iqm (and others) that don't use QIR should not beincluded in this test. #include #include @@ -29,24 +31,11 @@ int main() { return 0; } -// CHECK: @cstr.[[ADDRESS:[A-Z0-9]+]] = private constant [14 x i8] c"measureResult\00" -// CHECK-DAG: declare void @__quantum__qis__mz__body(%Qubit*, %Result* writeonly) local_unnamed_addr #[[ATTR_0:[0-9]+]] -// CHECK-DAG: define void @__nvqpp__mlirgen__function_qir_test.{{.*}}() local_unnamed_addr #[[ATTR_1:[0-9]+]] -// CHECK: call void @__quantum__rt__result_record_output(%Result* null, i8* nonnull getelementptr inbounds ([14 x i8], [14 x i8]* @cstr.[[ADDRESS]], i64 0, i64 0)) -// CHECK-DAG: attributes #[[ATTR_0]] = { "irreversible" } -// CHECK-DAG: attributes #[[ATTR_1]] = { "entry_point" {{.*}} "qir_profiles"="adaptive_profile" "requiredQubits"="1" "requiredResults"="1" } -// CHECK-DAG: !llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12} -// CHECK-DAG: !0 = !{i32 2, !"Debug Info Version", i32 3} -// CHECK-DAG: !1 = !{i32 1, !"qir_major_version", i32 1} -// CHECK-DAG: !2 = !{i32 7, !"qir_minor_version", i32 0} -// CHECK-DAG: !3 = !{i32 1, !"dynamic_qubit_management", i1 false} -// CHECK-DAG: !4 = !{i32 1, !"dynamic_result_management", i1 false} +// clang-format off +// QUANTINUUM: @cstr.[[ADDRESS:[A-Z0-9]+]] = private constant [14 x i8] c"measureResult\00" +// CHECK-LABEL: define void @__nvqpp__mlirgen__function_qir_test. +// CHECK-SAME: () local_unnamed_addr #[[ATTR_1:[0-9]+]] { +// QUANTINUUM: call void @__quantum__rt__result_record_output(%Result* null, i8* nonnull getelementptr inbounds ([14 x i8], [14 x i8]* @cstr.[[ADDRESS]], i64 0, i64 0)) +// IONQ: tail call void @__quantum__qis__x__body( +// CHECK: attributes #[[ATTR_1]] = { "entry_point" {{.*}}"qir_profiles"="{{.*}}_profile" "requiredQubits"="1" "requiredResults"="1" } -// IONQ: define void @__nvqpp__mlirgen__function_qir_test.{{.*}}() local_unnamed_addr #[[ATTR_1:[0-9]+]] -// IONQ-DAG: attributes #[[ATTR_1]] = { "entry_point" {{.*}} "output_names"={{.*}} "qir_profiles"="base_profile" "requiredQubits"="1" "requiredResults"="1" } -// IONQ-DAG: !llvm.module.flags = !{!0, !1, !2, !3, !4} -// IONQ-DAG: !0 = !{i32 2, !"Debug Info Version", i32 3} -// IONQ-DAG: !1 = !{i32 1, !"qir_major_version", i32 1} -// IONQ-DAG: !2 = !{i32 7, !"qir_minor_version", i32 0} -// IONQ-DAG: !3 = !{i32 1, !"dynamic_qubit_management", i1 false} -// IONQ-DAG: !4 = !{i32 1, !"dynamic_result_management", i1 false} diff --git a/test/AST-Quake/base_profile-1.cpp b/test/AST-Quake/base_profile-1.cpp index ba417eecd3d..8ea3f3024fd 100644 --- a/test/AST-Quake/base_profile-1.cpp +++ b/test/AST-Quake/base_profile-1.cpp @@ -44,6 +44,7 @@ struct comprehensive { z(q0); z(q3); T(z(!q2[1], q3[2], q1[0])); + cz(q2[0], q3[0]); t(q0); t(q3); T(t(!q2[1], q3[2], q1[0])); @@ -113,12 +114,12 @@ struct comprehensive { // BASE: tail call void @__quantum__qis__t__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) // BASE: tail call void @__quantum__qis__t__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // BASE: tail call void @__quantum__qis__t__body(%Qubit* nonnull inttoptr (i64 6 to %Qubit*)) -// BASE: tail call void @__quantum__qis__t__adj(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) +// BASE: tail call void @__quantum__qis__tdg__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // BASE: tail call void @__quantum__qis__s__body(%Qubit* null) // BASE: tail call void @__quantum__qis__s__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) // BASE: tail call void @__quantum__qis__s__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // BASE: tail call void @__quantum__qis__s__body(%Qubit* nonnull inttoptr (i64 6 to %Qubit*)) -// BASE: tail call void @__quantum__qis__s__adj(%Qubit* null) +// BASE: tail call void @__quantum__qis__sdg__body(%Qubit* null) // BASE: tail call void @__quantum__qis__rx__body(double 5.612300e+00, %Qubit* null) // BASE: tail call void @__quantum__qis__rx__body(double 5.612300e+00, %Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // BASE: tail call void @__quantum__qis__rx__body(double -5.612300e+00, %Qubit* null) @@ -172,12 +173,12 @@ struct comprehensive { // ADAPT: tail call void @__quantum__qis__t__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) // ADAPT: tail call void @__quantum__qis__t__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // ADAPT: tail call void @__quantum__qis__t__body(%Qubit* nonnull inttoptr (i64 6 to %Qubit*)) -// ADAPT: tail call void @__quantum__qis__t__adj(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) +// ADAPT: tail call void @__quantum__qis__tdg__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // ADAPT: tail call void @__quantum__qis__s__body(%Qubit* null) // ADAPT: tail call void @__quantum__qis__s__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) // ADAPT: tail call void @__quantum__qis__s__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // ADAPT: tail call void @__quantum__qis__s__body(%Qubit* nonnull inttoptr (i64 6 to %Qubit*)) -// ADAPT: tail call void @__quantum__qis__s__adj(%Qubit* null) +// ADAPT: tail call void @__quantum__qis__sdg__body(%Qubit* null) // ADAPT: tail call void @__quantum__qis__rx__body(double 5.612300e+00, %Qubit* null) // ADAPT: tail call void @__quantum__qis__rx__body(double 5.612300e+00, %Qubit* nonnull inttoptr (i64 5 to %Qubit*)) // ADAPT: tail call void @__quantum__qis__rx__body(double -5.612300e+00, %Qubit* null) @@ -216,146 +217,130 @@ struct comprehensive { // ADAPT: } // FULL-LABEL: define void @__nvqpp__mlirgen__comprehensive() -// FULL: %[[VAL_0:.*]] = alloca i64, align 8 -// FULL: %[[VAL_1:.*]] = alloca i64, align 8 -// FULL: %[[VAL_2:.*]] = alloca i64, align 8 -// FULL: %[[VAL_3:.*]] = alloca i64, align 8 -// FULL: %[[VAL_4:.*]] = alloca i64, align 8 -// FULL: %[[VAL_5:.*]] = alloca i64, align 8 -// FULL: %[[VAL_6:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 9) -// FULL: %[[VAL_8:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 0) -// FULL: %[[VAL_9:.*]] = bitcast i8* %[[VAL_8]] to %Qubit** -// FULL: %[[VAL_11:.*]] = load %Qubit*, %Qubit** %[[VAL_9]], align 8 -// FULL: %[[VAL_12:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 1) -// FULL: %[[VAL_13:.*]] = bitcast i8* %[[VAL_12]] to %Qubit** +// FULL: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 9) +// FULL: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// FULL: %[[VAL_4:.*]] = bitcast %Qubit** %[[VAL_2]] to i8** +// FULL: %[[VAL_5:.*]] = load i8*, i8** %[[VAL_4]], align 8 +// FULL: %[[VAL_6:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// FULL: %[[VAL_7:.*]] = bitcast %Qubit** %[[VAL_6]] to i8** +// FULL: %[[VAL_8:.*]] = load i8*, i8** %[[VAL_7]], align 8 +// FULL: %[[VAL_9:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// FULL: %[[VAL_10:.*]] = load %Qubit*, %Qubit** %[[VAL_9]], align 8 +// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_10]]) +// FULL: %[[VAL_11:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 6) +// FULL: %[[VAL_12:.*]] = load %Qubit*, %Qubit** %[[VAL_11]], align 8 +// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_12]]) +// FULL: %[[VAL_13:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 7) // FULL: %[[VAL_14:.*]] = load %Qubit*, %Qubit** %[[VAL_13]], align 8 -// FULL: %[[VAL_15:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 2) -// FULL: %[[VAL_16:.*]] = bitcast i8* %[[VAL_15]] to %Qubit** -// FULL: %[[VAL_17:.*]] = load %Qubit*, %Qubit** %[[VAL_16]], align 8 -// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_17]]) -// FULL: %[[VAL_18:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 6) -// FULL: %[[VAL_19:.*]] = bitcast i8* %[[VAL_18]] to %Qubit** +// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_14]]) +// FULL: %[[VAL_15:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 8) +// FULL: %[[VAL_16:.*]] = load %Qubit*, %Qubit** %[[VAL_15]], align 8 +// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_16]]) +// FULL: %[[VAL_17:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 4) +// FULL: %[[VAL_18:.*]] = load %Qubit*, %Qubit** %[[VAL_17]], align 8 +// FULL: %[[VAL_19:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 5) // FULL: %[[VAL_20:.*]] = load %Qubit*, %Qubit** %[[VAL_19]], align 8 -// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_20]]) -// FULL: %[[VAL_21:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 7) -// FULL: %[[VAL_22:.*]] = bitcast i8* %[[VAL_21]] to %Qubit** -// FULL: %[[VAL_23:.*]] = load %Qubit*, %Qubit** %[[VAL_22]], align 8 -// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_23]]) -// FULL: %[[VAL_24:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 8) -// FULL: %[[VAL_25:.*]] = bitcast i8* %[[VAL_24]] to %Qubit** -// FULL: %[[VAL_26:.*]] = load %Qubit*, %Qubit** %[[VAL_25]], align 8 -// FULL: tail call void @__quantum__qis__h(%Qubit* %[[VAL_26]]) -// FULL: %[[VAL_27:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 4) -// FULL: %[[VAL_28:.*]] = bitcast i8* %[[VAL_27]] to %Qubit** -// FULL: %[[VAL_29:.*]] = load %Qubit*, %Qubit** %[[VAL_28]], align 8 -// FULL: %[[VAL_30:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 5) -// FULL: %[[VAL_31:.*]] = bitcast i8* %[[VAL_30]] to %Qubit** -// FULL: %[[VAL_32:.*]] = load %Qubit*, %Qubit** %[[VAL_31]], align 8 -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_29]], %Qubit* %[[VAL_32]], %Qubit* %[[VAL_14]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__h__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_17]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_29]], %Qubit* %[[VAL_32]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_17]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_18]], %Qubit* %[[VAL_20]], i8* %[[VAL_8]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__h__ctl to i8*), i8* %[[VAL_8]], %Qubit* %[[VAL_10]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_18]], %Qubit* %[[VAL_20]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_16]]) +// FULL: %[[VAL_21:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 3) +// FULL: %[[VAL_22:.*]] = load %Qubit*, %Qubit** %[[VAL_21]], align 8 // FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_26]]) -// FULL: %[[VAL_33:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_6]], i64 3) -// FULL: %[[VAL_34:.*]] = bitcast i8* %[[VAL_33]] to %Qubit** -// FULL: %[[VAL_35:.*]] = load %Qubit*, %Qubit** %[[VAL_34]], align 8 -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_35]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_29]], %Qubit* %[[VAL_20]]) -// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_17]]) -// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_20]]) -// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_26]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_20]], %Qubit* %[[VAL_23]], %Qubit* %[[VAL_14]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]], %Qubit* %[[VAL_11]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__y__ctl, %Qubit* %[[VAL_11]], %Qubit* %[[VAL_35]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]], %Qubit* %[[VAL_11]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_20]], %Qubit* %[[VAL_23]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_17]]) -// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_20]]) -// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_26]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__z__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_35]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_17]]) -// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_20]]) -// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_26]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__t__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_35]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void @__quantum__qis__t__adj(%Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_17]]) -// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_20]]) -// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_26]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__s__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_35]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void @__quantum__qis__s__adj(%Qubit* %[[VAL_17]]) -// FULL: tail call void @__quantum__qis__rx(double 5.612300e+00, %Qubit* %[[VAL_17]]) -// FULL: tail call void @__quantum__qis__rx(double 5.612300e+00, %Qubit* %[[VAL_23]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: store i64 0, i64* %[[VAL_5]], align 8 -// FULL: call void (double, i64, i64*, void (double, %Array*, %Qubit*)*, ...) @invokeRotationWithControlQubits(double 5.612300e+00, i64 1, i64* nonnull %[[VAL_5]], void (double, %Array*, %Qubit*)* nonnull @__quantum__qis__rx__ctl, double 5.612300e+00, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_35]]) -// FULL: call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: call void @__quantum__qis__rx(double -5.612300e+00, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__ry(double 6.612300e+00, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__ry(double 6.612300e+00, %Qubit* %[[VAL_20]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_20]], %Qubit* %[[VAL_14]]) -// FULL: store i64 0, i64* %[[VAL_4]], align 8 -// FULL: call void (double, i64, i64*, void (double, %Array*, %Qubit*)*, ...) @invokeRotationWithControlQubits(double 6.612300e+00, i64 1, i64* nonnull %[[VAL_4]], void (double, %Array*, %Qubit*)* nonnull @__quantum__qis__ry__ctl, double 6.612300e+00, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_35]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_20]], %Qubit* %[[VAL_14]]) -// FULL: call void @__quantum__qis__ry(double -6.612300e+00, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__rz(double 7.612300e+00, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__rz(double 0x4021397F62B6AE7E, %Qubit* %[[VAL_23]]) -// FULL: call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: store i64 0, i64* %[[VAL_3]], align 8 -// FULL: call void (double, i64, i64*, void (double, %Array*, %Qubit*)*, ...) @invokeRotationWithControlQubits(double 0x4023397F62B6AE7E, i64 1, i64* nonnull %[[VAL_3]], void (double, %Array*, %Qubit*)* nonnull @__quantum__qis__rz__ctl, double 0x4023397F62B6AE7E, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_35]]) -// FULL: call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_32]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: call void @__quantum__qis__x(%Qubit* %[[VAL_32]]) -// FULL: call void @__quantum__qis__rz(double 0xC025397F62B6AE7E, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__r1(double 4.612300e+00, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__r1(double 0x400CE5FD8ADAB9F6, %Qubit* %[[VAL_26]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_23]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: store i64 0, i64* %[[VAL_2]], align 8 -// FULL: call void (double, i64, i64*, void (double, %Array*, %Qubit*)*, ...) @invokeRotationWithControlQubits(double 0x4004E5FD8ADAB9F6, i64 1, i64* nonnull %[[VAL_2]], void (double, %Array*, %Qubit*)* nonnull @__quantum__qis__r1__ctl, double 0x4004E5FD8ADAB9F6, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_35]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_23]], %Qubit* %[[VAL_26]], %Qubit* %[[VAL_14]]) -// FULL: call void @__quantum__qis__r1(double 0xBFF9CBFB15B573EC, %Qubit* %[[VAL_17]]) -// FULL: call void @__quantum__qis__swap(%Qubit* %[[VAL_17]], %Qubit* %[[VAL_26]]) -// FULL: store i64 0, i64* %[[VAL_1]], align 8 -// FULL: call void (i64, i64*, i64, void (%Array*, %Qubit*, %Qubit*)*, ...) @invokeWithControlRegisterOrQubits(i64 1, i64* nonnull %[[VAL_1]], i64 2, void (%Array*, %Qubit*, %Qubit*)* nonnull @__quantum__qis__swap__ctl, %Qubit* %[[VAL_17]], %Qubit* %[[VAL_35]], %Qubit* %[[VAL_32]]) -// FULL: call void @__quantum__qis__u3(double 8.000000e-01, double 5.000000e-01, double -1.000000e+00, %Qubit* %[[VAL_32]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_23]], %Qubit* %[[VAL_20]], %Qubit* %[[VAL_14]]) -// FULL: store i64 0, i64* %[[VAL_0]], align 8 -// FULL: call void (double, double, double, i64, i64*, void (double, double, double, %Array*, %Qubit*)*, ...) @invokeU3RotationWithControlQubits(double 6.200000e+00, double -3.100000e+00, double 0x401F333333333333, i64 1, i64* nonnull %[[VAL_0]], void (double, double, double, %Array*, %Qubit*)* nonnull @__quantum__qis__u3__ctl, double 6.200000e+00, double -3.100000e+00, double 0x401F333333333333, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_17]]) -// FULL: call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_23]], %Qubit* %[[VAL_20]], %Qubit* %[[VAL_14]]) -// FULL: %[[VAL_36:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_17]], i8* nonnull getelementptr inbounds ([10 x i8], [10 x i8]* @cstr.73696E676C65746F6E00, i64 0, i64 0)) -// FULL: %[[VAL_38:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_35]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.65696E7300, i64 0, i64 0)) -// FULL: %[[VAL_39:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_29]], i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @cstr.64756200, i64 0, i64 0)) -// FULL: %[[VAL_40:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_32]], i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @cstr.64756200, i64 0, i64 0)) -// FULL: %[[VAL_41:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_20]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.7472697000, i64 0, i64 0)) -// FULL: %[[VAL_42:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_23]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.7472697000, i64 0, i64 0)) -// FULL: %[[VAL_43:.*]] = call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_26]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.7472697000, i64 0, i64 0)) -// FULL: call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_6]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], %Qubit* %[[VAL_22]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_18]], %Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__y(%Qubit* %[[VAL_16]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_12]], %Qubit* %[[VAL_14]], i8* %[[VAL_8]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_16]], i8* %[[VAL_8]], i8* %[[VAL_5]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__y__ctl to i8*), i8* %[[VAL_5]], %Qubit* %[[VAL_22]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_16]], i8* %[[VAL_8]], i8* %[[VAL_5]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_12]], %Qubit* %[[VAL_14]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__z(%Qubit* %[[VAL_16]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__z__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], %Qubit* %[[VAL_22]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__z__ctl to i8*), %Qubit* %[[VAL_18]], %Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__t(%Qubit* %[[VAL_16]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__t__ctl to i8*), i8* %[[VAL_8]], %Qubit* %[[VAL_22]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void @__quantum__qis__tdg(%Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_12]]) +// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__s(%Qubit* %[[VAL_16]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__s__ctl to i8*), i8* %[[VAL_8]], %Qubit* %[[VAL_22]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void @__quantum__qis__sdg(%Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__rx(double 5.612300e+00, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__rx(double 5.612300e+00, %Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 1, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (double, %Array*, %Qubit*)* @__quantum__qis__rx__ctl to i8*), double 5.612300e+00, i8* %[[VAL_8]], %Qubit* %[[VAL_22]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void @__quantum__qis__rx(double -5.612300e+00, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__ry(double 6.612300e+00, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__ry(double 6.612300e+00, %Qubit* %[[VAL_12]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_12]], i8* %[[VAL_8]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 1, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (double, %Array*, %Qubit*)* @__quantum__qis__ry__ctl to i8*), double 6.612300e+00, i8* %[[VAL_8]], %Qubit* %[[VAL_22]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_12]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__ry(double -6.612300e+00, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__rz(double 7.612300e+00, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__rz(double 0x4021397F62B6AE7E, %Qubit* %[[VAL_14]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 1, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (double, %Array*, %Qubit*)* @__quantum__qis__rz__ctl to i8*), double 0x4023397F62B6AE7E, i8* %[[VAL_8]], %Qubit* %[[VAL_22]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_20]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__x(%Qubit* %[[VAL_20]]) +// FULL: tail call void @__quantum__qis__rz(double 0xC025397F62B6AE7E, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__r1(double 4.612300e+00, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__r1(double 0x400CE5FD8ADAB9F6, %Qubit* %[[VAL_16]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_14]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 1, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (double, %Array*, %Qubit*)* @__quantum__qis__r1__ctl to i8*), double 0x4004E5FD8ADAB9F6, i8* %[[VAL_8]], %Qubit* %[[VAL_22]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_14]], %Qubit* %[[VAL_16]], i8* %[[VAL_8]]) +// FULL: tail call void @__quantum__qis__r1(double 0xBFF9CBFB15B573EC, %Qubit* %[[VAL_10]]) +// FULL: tail call void @__quantum__qis__swap(%Qubit* %[[VAL_10]], %Qubit* %[[VAL_16]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 2, i8* nonnull bitcast (void (%Array*, %Qubit*, %Qubit*)* @__quantum__qis__swap__ctl to i8*), %Qubit* %[[VAL_10]], %Qubit* %[[VAL_22]], %Qubit* %[[VAL_20]]) +// FULL: tail call void @__quantum__qis__u3(double 8.000000e-01, double 5.000000e-01, double -1.000000e+00, %Qubit* %[[VAL_20]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_14]], %Qubit* %[[VAL_12]], i8* %[[VAL_8]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 3, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (double, double, double, %Array*, %Qubit*)* @__quantum__qis__u3__ctl to i8*), double 6.200000e+00, double -3.100000e+00, double 0x401F333333333333, i8* %[[VAL_8]], %Qubit* %[[VAL_10]]) +// FULL: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_14]], %Qubit* %[[VAL_12]], i8* %[[VAL_8]]) +// FULL: %[[VAL_23:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_10]], i8* nonnull getelementptr inbounds ([10 x i8], [10 x i8]* @cstr.73696E676C65746F6E00, i64 0, i64 0)) +// FULL: %[[VAL_25:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_22]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.65696E7300, i64 0, i64 0)) +// FULL: %[[VAL_26:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_18]], i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @cstr.64756200, i64 0, i64 0)) +// FULL: %[[VAL_27:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_20]], i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @cstr.64756200, i64 0, i64 0)) +// FULL: %[[VAL_28:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_12]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.7472697000, i64 0, i64 0)) +// FULL: %[[VAL_29:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_14]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.7472697000, i64 0, i64 0)) +// FULL: %[[VAL_30:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_16]], i8* nonnull getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.7472697000, i64 0, i64 0)) +// FULL: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // FULL: ret void // FULL: } +// FULL: ret void + diff --git a/test/AST-Quake/negated_control.cpp b/test/AST-Quake/negated_control.cpp index 49fe0d360c4..a668046e23d 100644 --- a/test/AST-Quake/negated_control.cpp +++ b/test/AST-Quake/negated_control.cpp @@ -21,24 +21,27 @@ struct Stuart { // CHECK-LABEL: define void @__nvqpp__mlirgen__Stuart() // CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 5) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %Qubit** -// CHECK: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_3]], align 8 -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %Qubit** -// CHECK: %[[VAL_8:.*]] = load %Qubit*, %Qubit** %[[VAL_7]], align 8 -// CHECK: %[[VAL_9:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 4) -// CHECK: %[[VAL_10:.*]] = bitcast i8* %[[VAL_9]] to %Qubit** -// CHECK: %[[VAL_11:.*]] = load %Qubit*, %Qubit** %[[VAL_10]], align 8 -// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_5]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__y__ctl, %Qubit* %[[VAL_5]], %Qubit* %[[VAL_8]], %Qubit* %[[VAL_11]]) -// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_5]]) -// CHECK: %[[VAL_12:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) -// CHECK: %[[VAL_13:.*]] = bitcast i8* %[[VAL_12]] to %Qubit** -// CHECK: %[[VAL_14:.*]] = load %Qubit*, %Qubit** %[[VAL_13]], align 8 -// CHECK: %[[VAL_15:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 3) -// CHECK: %[[VAL_16:.*]] = bitcast i8* %[[VAL_15]] to %Qubit** -// CHECK: %[[VAL_17:.*]] = load %Qubit*, %Qubit** %[[VAL_16]], align 8 -// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_17]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 2, void (%Array*, %Qubit*)* nonnull @__quantum__qis__z__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_17]], %Qubit* %[[VAL_11]]) -// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_17]]) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// CHECK: %[[VAL_5:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_6:.*]] = bitcast %Qubit** %[[VAL_5]] to i8** +// CHECK: %[[VAL_7:.*]] = load i8*, i8** %[[VAL_6]], align 8 +// CHECK: %[[VAL_8:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 4) +// CHECK: %[[VAL_9:.*]] = bitcast %Qubit** %[[VAL_8]] to i8** +// CHECK: %[[VAL_10:.*]] = load i8*, i8** %[[VAL_9]], align 8 +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_4]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__y__ctl to i8*), %Qubit* %[[VAL_4]], i8* %[[VAL_7]], i8* %[[VAL_10]]) +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_11:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// CHECK: %[[VAL_12:.*]] = bitcast %Qubit** %[[VAL_11]] to i8** +// CHECK: %[[VAL_13:.*]] = load i8*, i8** %[[VAL_12]], align 8 +// CHECK: %[[VAL_14:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 3) +// CHECK: %[[VAL_15:.*]] = load %Qubit*, %Qubit** %[[VAL_14]], align 8 +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_15]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 2, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__z__ctl to i8*), i8* %[[VAL_13]], %Qubit* %[[VAL_15]], i8* %[[VAL_10]]) +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_15]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) +// CHECK: ret void +// CHECK: } +// CHECK: ret void + diff --git a/test/AST-Quake/pure_quantum_struct.cpp b/test/AST-Quake/pure_quantum_struct.cpp index 7e5dc24c35c..464df3cafe7 100644 --- a/test/AST-Quake/pure_quantum_struct.cpp +++ b/test/AST-Quake/pure_quantum_struct.cpp @@ -81,77 +81,68 @@ __qpu__ void entry_ctor() { // clang-format off // QIR-LABEL: define void @__nvqpp__mlirgen__function_kernel._Z6kernel4test({ -// QIR-SAME: %[[VAL_0:.*]]*, %[[VAL_0]]* } %[[VAL_1:.*]]) local_unnamed_addr { -// QIR: %[[VAL_2:.*]] = extractvalue { %[[VAL_0]]*, %[[VAL_0]]* } %[[VAL_1]], 0 -// QIR: %[[VAL_3:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_0]]* %[[VAL_2]]) +// QIR-SAME: %Array*, %Array* } %[[VAL_1:.*]]) local_unnamed_addr { +// QIR: %[[VAL_2:.*]] = extractvalue { %Array*, %Array* } %[[VAL_1]], 0 +// QIR: %[[VAL_3:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%Array* %[[VAL_2]]) // QIR: %[[VAL_4:.*]] = icmp sgt i64 %[[VAL_3]], 0 // QIR: br i1 %[[VAL_4]], label %[[VAL_5:.*]], label %[[VAL_6:.*]] // QIR: .lr.ph: ; preds = %[[VAL_7:.*]], %[[VAL_5]] // QIR: %[[VAL_8:.*]] = phi i64 [ %[[VAL_9:.*]], %[[VAL_5]] ], [ 0, %[[VAL_7]] ] -// QIR: %[[VAL_10:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_0]]* %[[VAL_2]], i64 %[[VAL_8]]) -// QIR: %[[VAL_11:.*]] = bitcast i8* %[[VAL_10]] to %[[VAL_12:.*]]** -// QIR: %[[VAL_13:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_11]], align 8 -// QIR: tail call void @__quantum__qis__h(%[[VAL_12]]* %[[VAL_13]]) +// QIR: %[[VAL_10:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_2]], i64 %[[VAL_8]]) +// QIR: %[[VAL_13:.*]] = load %Qubit*, %Qubit** %[[VAL_10]], align 8 +// QIR: tail call void @__quantum__qis__h(%Qubit* %[[VAL_13]]) // QIR: %[[VAL_9]] = add nuw nsw i64 %[[VAL_8]], 1 // QIR: %[[VAL_14:.*]] = icmp eq i64 %[[VAL_9]], %[[VAL_3]] // QIR: br i1 %[[VAL_14]], label %[[VAL_6]], label %[[VAL_5]] // QIR: ._crit_edge: ; preds = %[[VAL_5]], %[[VAL_7]] -// QIR: %[[VAL_15:.*]] = extractvalue { %[[VAL_0]]*, %[[VAL_0]]* } %[[VAL_1]], 1 -// QIR: %[[VAL_16:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_0]]* %[[VAL_15]]) +// QIR: %[[VAL_15:.*]] = extractvalue { %Array*, %Array* } %[[VAL_1]], 1 +// QIR: %[[VAL_16:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%Array* %[[VAL_15]]) // QIR: %[[VAL_17:.*]] = icmp sgt i64 %[[VAL_16]], 0 // QIR: br i1 %[[VAL_17]], label %[[VAL_18:.*]], label %[[VAL_19:.*]] // QIR: .lr.ph3: ; preds = %[[VAL_6]], %[[VAL_18]] // QIR: %[[VAL_20:.*]] = phi i64 [ %[[VAL_21:.*]], %[[VAL_18]] ], [ 0, %[[VAL_6]] ] -// QIR: %[[VAL_22:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_0]]* %[[VAL_15]], i64 %[[VAL_20]]) -// QIR: %[[VAL_23:.*]] = bitcast i8* %[[VAL_22]] to %[[VAL_12]]** -// QIR: %[[VAL_24:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_23]], align 8 -// QIR: tail call void @__quantum__qis__s(%[[VAL_12]]* %[[VAL_24]]) +// QIR: %[[VAL_22:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_15]], i64 %[[VAL_20]]) +// QIR: %[[VAL_24:.*]] = load %Qubit*, %Qubit** %[[VAL_22]] +// QIR: tail call void @__quantum__qis__s(%Qubit* %[[VAL_24]]) // QIR: %[[VAL_21]] = add nuw nsw i64 %[[VAL_20]], 1 // QIR: %[[VAL_25:.*]] = icmp eq i64 %[[VAL_21]], %[[VAL_16]] // QIR: br i1 %[[VAL_25]], label %[[VAL_19]], label %[[VAL_18]] // QIR: ._crit_edge4: ; preds = %[[VAL_18]], %[[VAL_6]] -// QIR: %[[VAL_26:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_0]]* %[[VAL_2]], i64 0) -// QIR: %[[VAL_27:.*]] = bitcast i8* %[[VAL_26]] to %[[VAL_12]]** -// QIR: %[[VAL_28:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_27]], align 8 -// QIR: tail call void @__quantum__qis__h(%[[VAL_12]]* %[[VAL_28]]) -// QIR: %[[VAL_29:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_0]]* %[[VAL_15]], i64 0) -// QIR: %[[VAL_30:.*]] = bitcast i8* %[[VAL_29]] to %[[VAL_12]]** -// QIR: %[[VAL_31:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_30]], align 8 -// QIR: tail call void @__quantum__qis__x(%[[VAL_12]]* %[[VAL_31]]) +// QIR: %[[VAL_26:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_2]], i64 0) +// QIR: %[[VAL_28:.*]] = load %Qubit*, %Qubit** %[[VAL_26]] +// QIR: tail call void @__quantum__qis__h(%Qubit* %[[VAL_28]]) +// QIR: %[[VAL_29:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_15]], i64 0) +// QIR: %[[VAL_31:.*]] = load %Qubit*, %Qubit** %[[VAL_29]] +// QIR: tail call void @__quantum__qis__x(%Qubit* %[[VAL_31]]) // QIR: ret void // QIR: } // QIR-LABEL: define void @__nvqpp__mlirgen__function_entry_initlist._Z14entry_initlistv() local_unnamed_addr { -// QIR: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 4) -// QIR: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// QIR: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// QIR: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// QIR: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// QIR: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) -// QIR: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// QIR: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// QIR: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_8]]) -// QIR: %[[VAL_9:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 2) -// QIR: %[[VAL_10:.*]] = bitcast i8* %[[VAL_9]] to %[[VAL_4]]** -// QIR: %[[VAL_11:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_10]], align 8 -// QIR: tail call void @__quantum__qis__s(%[[VAL_4]]* %[[VAL_11]]) -// QIR: %[[VAL_12:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 3) -// QIR: %[[VAL_13:.*]] = bitcast i8* %[[VAL_12]] to %[[VAL_4]]** -// QIR: %[[VAL_14:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_13]], align 8 -// QIR: tail call void @__quantum__qis__s(%[[VAL_4]]* %[[VAL_14]]) -// QIR: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// QIR: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_11]]) -// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// QIR: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 4) +// QIR: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// QIR: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// QIR: tail call void @__quantum__qis__h(%Qubit* %[[VAL_5]]) +// QIR: %[[VAL_6:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// QIR: %[[VAL_8:.*]] = load %Qubit*, %Qubit** %[[VAL_6]] +// QIR: tail call void @__quantum__qis__h(%Qubit* %[[VAL_8]]) +// QIR: %[[VAL_9:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// QIR: %[[VAL_11:.*]] = load %Qubit*, %Qubit** %[[VAL_9]] +// QIR: tail call void @__quantum__qis__s(%Qubit* %[[VAL_11]]) +// QIR: %[[VAL_12:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 3) +// QIR: %[[VAL_14:.*]] = load %Qubit*, %Qubit** %[[VAL_12]] +// QIR: tail call void @__quantum__qis__s(%Qubit* %[[VAL_14]]) +// QIR: tail call void @__quantum__qis__h(%Qubit* %[[VAL_5]]) +// QIR: tail call void @__quantum__qis__x(%Qubit* %[[VAL_11]]) +// QIR: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // QIR: ret void // QIR: } // QIR-LABEL: define void @__nvqpp__mlirgen__function_entry_ctor._Z10entry_ctorv() local_unnamed_addr { -// QIR: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 4) -// QIR: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 2) -// QIR: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// QIR: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// QIR: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// QIR: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 4) +// QIR: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// QIR: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// QIR: tail call void @__quantum__qis__h(%Qubit* %[[VAL_5]]) +// QIR: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // QIR: ret void // QIR: } // clang-format on diff --git a/test/AST-Quake/qalloc_initialization.cpp b/test/AST-Quake/qalloc_initialization.cpp index 6a35f73b7d4..a0694e216fb 100644 --- a/test/AST-Quake/qalloc_initialization.cpp +++ b/test/AST-Quake/qalloc_initialization.cpp @@ -278,47 +278,119 @@ __qpu__ bool Peppermint() { //===----------------------------------------------------------------------===// // clang-format off -// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__Vanilla() local_unnamed_addr { -// QIR: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 2, i8* nonnull bitcast ([4 x double]* @__nvqpp__rodata_init_0 to i8*)) + +// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__Vanilla() +// QIR: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 2, double* nonnull getelementptr inbounds ([4 x double], [4 x double]* @__nvqpp__rodata_init_0, i64 0, i64 0)) +// QIR: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) +// QIR: %[[VAL_4:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_2]], align 8 +// QIR: tail call void @__quantum__qis__h(%[[VAL_3]]* %[[VAL_4]]) +// QIR: %[[VAL_5:.*]] = tail call %[[VAL_3]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) +// QIR: %[[VAL_6:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_5]], align 8 +// QIR: tail call void @__quantum__qis__h(%[[VAL_3]]* %[[VAL_6]]) +// QIR: %[[VAL_7:.*]] = tail call %[[VAL_8:.*]]* @__quantum__qis__mz(%[[VAL_3]]* %[[VAL_4]]) +// QIR: %[[VAL_9:.*]] = bitcast %[[VAL_8]]* %[[VAL_7]] to i1* +// QIR: %[[VAL_10:.*]] = load i1, i1* %[[VAL_9]], align 1 +// QIR: %[[VAL_11:.*]] = zext i1 %[[VAL_10]] to i8 +// QIR: %[[VAL_12:.*]] = tail call %[[VAL_8]]* @__quantum__qis__mz(%[[VAL_3]]* %[[VAL_6]]) +// QIR: %[[VAL_13:.*]] = bitcast %[[VAL_8]]* %[[VAL_12]] to i1* +// QIR: %[[VAL_14:.*]] = load i1, i1* %[[VAL_13]], align 1 +// QIR: %[[VAL_15:.*]] = zext i1 %[[VAL_14]] to i8 +// QIR: %[[VAL_16:.*]] = tail call dereferenceable_or_null(2) i8* @malloc(i64 2) +// QIR: store i8 %[[VAL_11]], i8* %[[VAL_16]], align 1 +// QIR: %[[VAL_17:.*]] = getelementptr inbounds i8, i8* %[[VAL_16]], i64 1 +// QIR: store i8 %[[VAL_15]], i8* %[[VAL_17]], align 1 +// QIR: %[[VAL_18:.*]] = bitcast i8* %[[VAL_16]] to i1* +// QIR: %[[VAL_19:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_18]], 0 +// QIR: %[[VAL_20:.*]] = insertvalue { i1*, i64 } %[[VAL_19]], i64 2, 1 +// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// QIR: ret { i1*, i64 } %[[VAL_20]] // QIR: } -// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__Cherry() local_unnamed_addr { +// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__Cherry() // QIR: %[[VAL_0:.*]] = alloca [4 x { double, double }], align 8 -// QIR: %[[VAL_1:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 0 -// QIR: store double 0.000000e+00, double* %[[VAL_1]], align 8 -// QIR: %[[VAL_2:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 1 -// QIR: store double 1.000000e+00, double* %[[VAL_2]], align 8 -// QIR: %[[VAL_3:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 0 -// QIR: store double 6.000000e-01, double* %[[VAL_3]], align 8 -// QIR: %[[VAL_4:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 1 -// QIR: store double 4.000000e-01, double* %[[VAL_4]], align 8 -// QIR: %[[VAL_5:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 0 -// QIR: store double 1.000000e+00, double* %[[VAL_5]], align 8 -// QIR: %[[VAL_6:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 1 -// QIR: %[[VAL_7:.*]] = bitcast [4 x { double, double }]* %[[VAL_0]] to i8* -// QIR: call void @llvm.memset -// QIR: %[[VAL_8:.*]] = call %[[VAL_9:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 2, i8* nonnull %[[VAL_7]]) +// QIR: %[[VAL_1:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0 +// QIR: %[[VAL_2:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 0 +// QIR: store double 0.000000e+00, double* %[[VAL_2]], align 8 +// QIR: %[[VAL_3:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 1 +// QIR: store double 1.000000e+00, double* %[[VAL_3]], align 8 +// QIR: %[[VAL_4:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 0 +// QIR: store double 6.000000e-01, double* %[[VAL_4]], align 8 +// QIR: %[[VAL_5:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 1 +// QIR: store double 4.000000e-01, double* %[[VAL_5]], align 8 +// QIR: %[[VAL_6:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 0 +// QIR: store double 1.000000e+00, double* %[[VAL_6]], align 8 +// QIR: %[[VAL_7:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 1 +// QIR: %[[VAL_8:.*]] = bitcast double* %[[VAL_7]] to i8* +// QIR: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(24) %[[VAL_8]], i8 0, i64 24, i1 false) +// QIR: %[[VAL_9:.*]] = call %[[VAL_10:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 2, { double, double }* nonnull %[[VAL_1]]) +// QIR: %[[VAL_11:.*]] = call %[[VAL_12:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_10]]* %[[VAL_9]], i64 0) +// QIR: %[[VAL_13:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_11]], align 8 +// QIR: call void @__quantum__qis__h(%[[VAL_12]]* %[[VAL_13]]) +// QIR: %[[VAL_14:.*]] = call %[[VAL_12]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_10]]* %[[VAL_9]], i64 1) +// QIR: %[[VAL_15:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_14]], align 8 +// QIR: call void @__quantum__qis__h(%[[VAL_12]]* %[[VAL_15]]) +// QIR: %[[VAL_16:.*]] = call %[[VAL_17:.*]]* @__quantum__qis__mz(%[[VAL_12]]* %[[VAL_13]]) +// QIR: %[[VAL_18:.*]] = bitcast %[[VAL_17]]* %[[VAL_16]] to i1* +// QIR: %[[VAL_19:.*]] = load i1, i1* %[[VAL_18]], align 1 +// QIR: %[[VAL_20:.*]] = zext i1 %[[VAL_19]] to i8 +// QIR: %[[VAL_21:.*]] = call %[[VAL_17]]* @__quantum__qis__mz(%[[VAL_12]]* %[[VAL_15]]) +// QIR: %[[VAL_22:.*]] = bitcast %[[VAL_17]]* %[[VAL_21]] to i1* +// QIR: %[[VAL_23:.*]] = load i1, i1* %[[VAL_22]], align 1 +// QIR: %[[VAL_24:.*]] = zext i1 %[[VAL_23]] to i8 +// QIR: %[[VAL_25:.*]] = call dereferenceable_or_null(2) i8* @malloc(i64 2) +// QIR: store i8 %[[VAL_20]], i8* %[[VAL_25]], align 1 +// QIR: %[[VAL_26:.*]] = getelementptr inbounds i8, i8* %[[VAL_25]], i64 1 +// QIR: store i8 %[[VAL_24]], i8* %[[VAL_26]], align 1 +// QIR: %[[VAL_27:.*]] = bitcast i8* %[[VAL_25]] to i1* +// QIR: %[[VAL_28:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_27]], 0 +// QIR: %[[VAL_29:.*]] = insertvalue { i1*, i64 } %[[VAL_28]], i64 2, 1 +// QIR: call void @__quantum__rt__qubit_release_array(%[[VAL_10]]* %[[VAL_9]]) +// QIR: ret { i1*, i64 } %[[VAL_29]] // QIR: } -// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__MooseTracks() local_unnamed_addr { +// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__MooseTracks() // QIR: %[[VAL_0:.*]] = alloca [4 x { double, double }], align 8 -// QIR: %[[VAL_1:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 0 -// QIR: store double 0.000000e+00, double* %[[VAL_1]], align 8 -// QIR: %[[VAL_2:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 1 -// QIR: store double 1.000000e+00, double* %[[VAL_2]], align 8 -// QIR: %[[VAL_3:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 0 -// QIR: store double 7.500000e-01, double* %[[VAL_3]], align 8 -// QIR: %[[VAL_4:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 1 -// QIR: store double 2.500000e-01, double* %[[VAL_4]], align 8 -// QIR: %[[VAL_5:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 0 -// QIR: store double 1.000000e+00, double* %[[VAL_5]], align 8 -// QIR: %[[VAL_6:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 1 -// QIR: %[[VAL_7:.*]] = bitcast [4 x { double, double }]* %[[VAL_0]] to i8* -// QIR: call void @llvm.memset -// QIR: %[[VAL_8:.*]] = call %[[VAL_9:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 2, i8* nonnull %[[VAL_7]]) +// QIR: %[[VAL_1:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0 +// QIR: %[[VAL_2:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 0 +// QIR: store double 0.000000e+00, double* %[[VAL_2]], align 8 +// QIR: %[[VAL_3:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 1 +// QIR: store double 1.000000e+00, double* %[[VAL_3]], align 8 +// QIR: %[[VAL_4:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 0 +// QIR: store double 7.500000e-01, double* %[[VAL_4]], align 8 +// QIR: %[[VAL_5:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 1 +// QIR: store double 2.500000e-01, double* %[[VAL_5]], align 8 +// QIR: %[[VAL_6:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 0 +// QIR: store double 1.000000e+00, double* %[[VAL_6]], align 8 +// QIR: %[[VAL_7:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_0]], i64 0, i64 2, i32 1 +// QIR: %[[VAL_8:.*]] = bitcast double* %[[VAL_7]] to i8* +// QIR: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(24) %[[VAL_8]], i8 0, i64 24, i1 false) +// QIR: %[[VAL_9:.*]] = call %[[VAL_10:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 2, { double, double }* nonnull %[[VAL_1]]) +// QIR: %[[VAL_11:.*]] = call %[[VAL_12:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_10]]* %[[VAL_9]], i64 0) +// QIR: %[[VAL_13:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_11]], align 8 +// QIR: call void @__quantum__qis__h(%[[VAL_12]]* %[[VAL_13]]) +// QIR: %[[VAL_14:.*]] = call %[[VAL_12]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_10]]* %[[VAL_9]], i64 1) +// QIR: %[[VAL_15:.*]] = load %[[VAL_12]]*, %[[VAL_12]]** %[[VAL_14]], align 8 +// QIR: call void @__quantum__qis__h(%[[VAL_12]]* %[[VAL_15]]) +// QIR: %[[VAL_16:.*]] = call %[[VAL_17:.*]]* @__quantum__qis__mz(%[[VAL_12]]* %[[VAL_13]]) +// QIR: %[[VAL_18:.*]] = bitcast %[[VAL_17]]* %[[VAL_16]] to i1* +// QIR: %[[VAL_19:.*]] = load i1, i1* %[[VAL_18]], align 1 +// QIR: %[[VAL_20:.*]] = zext i1 %[[VAL_19]] to i8 +// QIR: %[[VAL_21:.*]] = call %[[VAL_17]]* @__quantum__qis__mz(%[[VAL_12]]* %[[VAL_15]]) +// QIR: %[[VAL_22:.*]] = bitcast %[[VAL_17]]* %[[VAL_21]] to i1* +// QIR: %[[VAL_23:.*]] = load i1, i1* %[[VAL_22]], align 1 +// QIR: %[[VAL_24:.*]] = zext i1 %[[VAL_23]] to i8 +// QIR: %[[VAL_25:.*]] = call dereferenceable_or_null(2) i8* @malloc(i64 2) +// QIR: store i8 %[[VAL_20]], i8* %[[VAL_25]], align 1 +// QIR: %[[VAL_26:.*]] = getelementptr inbounds i8, i8* %[[VAL_25]], i64 1 +// QIR: store i8 %[[VAL_24]], i8* %[[VAL_26]], align 1 +// QIR: %[[VAL_27:.*]] = bitcast i8* %[[VAL_25]] to i1* +// QIR: %[[VAL_28:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_27]], 0 +// QIR: %[[VAL_29:.*]] = insertvalue { i1*, i64 } %[[VAL_28]], i64 2, 1 +// QIR: call void @__quantum__rt__qubit_release_array(%[[VAL_10]]* %[[VAL_9]]) +// QIR: ret { i1*, i64 } %[[VAL_29]] // QIR: } -// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__RockyRoad() local_unnamed_addr { +// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__RockyRoad() // QIR: %[[VAL_0:.*]] = alloca double, align 8 // QIR: store double 0.000000e+00, double* %[[VAL_0]], align 8 // QIR: %[[VAL_1:.*]] = tail call { double, double } @_ZNSt{{.*}}8literals16complex_literalsli1i{{.*}}Ee( @@ -329,7 +401,7 @@ __qpu__ bool Peppermint() { // QIR: %[[VAL_5:.*]] = extractvalue { double, double } %[[VAL_1]], 1 // QIR: %[[VAL_6:.*]] = getelementptr inbounds { double, double }, { double, double }* %[[VAL_2]], i64 0, i32 1 // QIR: store double %[[VAL_5]], double* %[[VAL_6]], align 8 -// QIR: %[[VAL_7:.*]] = call { double, double } @_Z{{.*}}7complexIT_{{.*}}_(double* nonnull %[[VAL_0]], { double, double }* nonnull %[[VAL_2]]) +// QIR: %[[VAL_7:.*]] = call { double, double } @_ZStplIdESt7complexIT_ERKS1_RKS2_(double* nonnull %[[VAL_0]], { double, double }* nonnull %[[VAL_2]]) // QIR: %[[VAL_8:.*]] = alloca double, align 8 // QIR: store double 1.000000e+00, double* %[[VAL_8]], align 8 // QIR: %[[VAL_9:.*]] = call { double, double } @_ZNSt{{.*}}8literals16complex_literalsli1i{{.*}}Ee( @@ -340,61 +412,239 @@ __qpu__ bool Peppermint() { // QIR: %[[VAL_13:.*]] = extractvalue { double, double } %[[VAL_9]], 1 // QIR: %[[VAL_14:.*]] = getelementptr inbounds { double, double }, { double, double }* %[[VAL_10]], i64 0, i32 1 // QIR: store double %[[VAL_13]], double* %[[VAL_14]], align 8 -// QIR: %[[VAL_15:.*]] = call { double, double } @_Z{{.*}}7complexIT_{{.*}}_(double* nonnull %[[VAL_8]], { double, double }* nonnull %[[VAL_10]]) +// QIR: %[[VAL_15:.*]] = call { double, double } @_ZStplIdESt7complexIT_ERKS1_RKS2_(double* nonnull %[[VAL_8]], { double, double }* nonnull %[[VAL_10]]) // QIR: %[[VAL_16:.*]] = alloca [4 x { double, double }], align 8 -// QIR: %[[VAL_17:.*]] = extractvalue { double, double } %[[VAL_7]], 0 -// QIR: %[[VAL_18:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 0, i32 0 -// QIR: store double %[[VAL_17]], double* %[[VAL_18]], align 8 -// QIR: %[[VAL_19:.*]] = extractvalue { double, double } %[[VAL_7]], 1 -// QIR: %[[VAL_20:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 0, i32 1 -// QIR: store double %[[VAL_19]], double* %[[VAL_20]], align 8 -// QIR: %[[VAL_21:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 1, i32 0 -// QIR: store double 8.000000e-01, double* %[[VAL_21]], align 8 -// QIR: %[[VAL_22:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 1, i32 1 -// QIR: store double 2.000000e-01, double* %[[VAL_22]], align 8 -// QIR: %[[VAL_23:.*]] = extractvalue { double, double } %[[VAL_15]], 0 -// QIR: %[[VAL_24:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 2, i32 0 -// QIR: store double %[[VAL_23]], double* %[[VAL_24]], align 8 -// QIR: %[[VAL_25:.*]] = extractvalue { double, double } %[[VAL_15]], 1 -// QIR: %[[VAL_26:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 2, i32 1 -// QIR: store double %[[VAL_25]], double* %[[VAL_26]], align 8 -// QIR: %[[VAL_27:.*]] = bitcast [4 x { double, double }]* %[[VAL_16]] to i8* -// QIR: %[[VAL_28:.*]] = call %[[VAL_29:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 2, i8* nonnull %[[VAL_27]]) +// QIR: %[[VAL_17:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 0 +// QIR: %[[VAL_18:.*]] = extractvalue { double, double } %[[VAL_7]], 0 +// QIR: %[[VAL_19:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 0, i32 0 +// QIR: store double %[[VAL_18]], double* %[[VAL_19]], align 8 +// QIR: %[[VAL_20:.*]] = extractvalue { double, double } %[[VAL_7]], 1 +// QIR: %[[VAL_21:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 0, i32 1 +// QIR: store double %[[VAL_20]], double* %[[VAL_21]], align 8 +// QIR: %[[VAL_22:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 1, i32 0 +// QIR: store double 8.000000e-01, double* %[[VAL_22]], align 8 +// QIR: %[[VAL_23:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 1, i32 1 +// QIR: store double 2.000000e-01, double* %[[VAL_23]], align 8 +// QIR: %[[VAL_24:.*]] = extractvalue { double, double } %[[VAL_15]], 0 +// QIR: %[[VAL_25:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 2, i32 0 +// QIR: store double %[[VAL_24]], double* %[[VAL_25]], align 8 +// QIR: %[[VAL_26:.*]] = extractvalue { double, double } %[[VAL_15]], 1 +// QIR: %[[VAL_27:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 2, i32 1 +// QIR: store double %[[VAL_26]], double* %[[VAL_27]], align 8 +// QIR: %[[VAL_28:.*]] = getelementptr inbounds [4 x { double, double }], [4 x { double, double }]* %[[VAL_16]], i64 0, i64 3, i32 0 +// QIR: %[[VAL_29:.*]] = bitcast double* %[[VAL_28]] to i8* +// QIR: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) %[[VAL_29]], i8 0, i64 16, i1 false) +// QIR: %[[VAL_30:.*]] = call %[[VAL_31:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 2, { double, double }* nonnull %[[VAL_17]]) +// QIR: %[[VAL_32:.*]] = call %[[VAL_33:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_31]]* %[[VAL_30]], i64 0) +// QIR: %[[VAL_34:.*]] = load %[[VAL_33]]*, %[[VAL_33]]** %[[VAL_32]], align 8 +// QIR: call void @__quantum__qis__h(%[[VAL_33]]* %[[VAL_34]]) +// QIR: %[[VAL_35:.*]] = call %[[VAL_33]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_31]]* %[[VAL_30]], i64 1) +// QIR: %[[VAL_36:.*]] = load %[[VAL_33]]*, %[[VAL_33]]** %[[VAL_35]], align 8 +// QIR: call void @__quantum__qis__h(%[[VAL_33]]* %[[VAL_36]]) +// QIR: %[[VAL_37:.*]] = call %[[VAL_38:.*]]* @__quantum__qis__mz(%[[VAL_33]]* %[[VAL_34]]) +// QIR: %[[VAL_39:.*]] = bitcast %[[VAL_38]]* %[[VAL_37]] to i1* +// QIR: %[[VAL_40:.*]] = load i1, i1* %[[VAL_39]], align 1 +// QIR: %[[VAL_41:.*]] = zext i1 %[[VAL_40]] to i8 +// QIR: %[[VAL_42:.*]] = call %[[VAL_38]]* @__quantum__qis__mz(%[[VAL_33]]* %[[VAL_36]]) +// QIR: %[[VAL_43:.*]] = bitcast %[[VAL_38]]* %[[VAL_42]] to i1* +// QIR: %[[VAL_44:.*]] = load i1, i1* %[[VAL_43]], align 1 +// QIR: %[[VAL_45:.*]] = zext i1 %[[VAL_44]] to i8 +// QIR: %[[VAL_46:.*]] = call dereferenceable_or_null(2) i8* @malloc(i64 2) +// QIR: store i8 %[[VAL_41]], i8* %[[VAL_46]], align 1 +// QIR: %[[VAL_47:.*]] = getelementptr inbounds i8, i8* %[[VAL_46]], i64 1 +// QIR: store i8 %[[VAL_45]], i8* %[[VAL_47]], align 1 +// QIR: %[[VAL_48:.*]] = bitcast i8* %[[VAL_46]] to i1* +// QIR: %[[VAL_49:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_48]], 0 +// QIR: %[[VAL_50:.*]] = insertvalue { i1*, i64 } %[[VAL_49]], i64 2, 1 +// QIR: call void @__quantum__rt__qubit_release_array(%[[VAL_31]]* %[[VAL_30]]) +// QIR: ret { i1*, i64 } %[[VAL_50]] // QIR: } -// QIR-LABEL: define i1 @__nvqpp__mlirgen__Pistachio() local_unnamed_addr { +// QIR-LABEL: define i1 @__nvqpp__mlirgen__Pistachio() // QIR: %[[VAL_0:.*]] = tail call { double*, i64 } @_Z15getTwoTimesRankv() -// QIR: %[[VAL_I:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 1 -// QIR: %[[VAL_1:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_I]], i1 false) -// QIR: %[[VAL_2:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 0 -// QIR: %[[VAL_3:.*]] = bitcast double* %[[VAL_2]] to i8* -// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 %[[VAL_1]], i8* %[[VAL_3]]) +// QIR: %[[VAL_1:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 1 +// QIR: %[[VAL_2:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_1]], i1 false), !range !1 +// QIR: %[[VAL_3:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 0 +// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 %[[VAL_2]], double* %[[VAL_3]]) +// QIR: %[[VAL_6:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_5]]* %[[VAL_4]]) +// QIR: %[[VAL_7:.*]] = icmp sgt i64 %[[VAL_6]], 0 +// QIR: br i1 %[[VAL_7]], label %[[VAL_8:.*]], label %[[VAL_9:.*]] +// QIR: .lr.ph: ; preds = %[[VAL_10:.*]], %[[VAL_8]] +// QIR: %[[VAL_11:.*]] = phi i64 [ %[[VAL_12:.*]], %[[VAL_8]] ], [ 0, %[[VAL_10]] ] +// QIR: %[[VAL_13:.*]] = tail call %[[VAL_14:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 %[[VAL_11]]) +// QIR: %[[VAL_15:.*]] = load %[[VAL_14]]*, %[[VAL_14]]** %[[VAL_13]], align 8 +// QIR: tail call void @__quantum__qis__h(%[[VAL_14]]* %[[VAL_15]]) +// QIR: %[[VAL_12]] = add nuw nsw i64 %[[VAL_11]], 1 +// QIR: %[[VAL_16:.*]] = icmp eq i64 %[[VAL_12]], %[[VAL_6]] +// QIR: br i1 %[[VAL_16]], label %[[VAL_9]], label %[[VAL_8]] +// QIR: ._crit_edge: ; preds = %[[VAL_8]], %[[VAL_10]] +// QIR: %[[VAL_17:.*]] = tail call %[[VAL_14]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 0) +// QIR: %[[VAL_18:.*]] = load %[[VAL_14]]*, %[[VAL_14]]** %[[VAL_17]], align 8 +// QIR: %[[VAL_19:.*]] = tail call %[[VAL_20:.*]]* @__quantum__qis__mz(%[[VAL_14]]* %[[VAL_18]]) +// QIR: %[[VAL_21:.*]] = bitcast %[[VAL_20]]* %[[VAL_19]] to i1* +// QIR: %[[VAL_22:.*]] = load i1, i1* %[[VAL_21]], align 1 +// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_5]]* %[[VAL_4]]) +// QIR: ret i1 %[[VAL_22]] // QIR: } -// QIR-LABEL: define i1 @__nvqpp__mlirgen__ChocolateMint() local_unnamed_addr { +// QIR-LABEL: define i1 @__nvqpp__mlirgen__ChocolateMint() // QIR: %[[VAL_0:.*]] = tail call { double*, i64 } @_Z15getTwoTimesRankv() -// QIR: %[[VAL_I:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 1 -// QIR: %[[VAL_1:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_I]], i1 false) -// QIR: %[[VAL_2:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 0 -// QIR: %[[VAL_3:.*]] = bitcast double* %[[VAL_2]] to i8* -// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 %[[VAL_1]], i8* %[[VAL_3]]) +// QIR: %[[VAL_1:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 1 +// QIR: %[[VAL_2:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_1]], i1 false), !range !1 +// QIR: %[[VAL_3:.*]] = extractvalue { double*, i64 } %[[VAL_0]], 0 +// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 %[[VAL_2]], double* %[[VAL_3]]) +// QIR: %[[VAL_6:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_5]]* %[[VAL_4]]) +// QIR: %[[VAL_7:.*]] = icmp sgt i64 %[[VAL_6]], 0 +// QIR: br i1 %[[VAL_7]], label %[[VAL_8:.*]], label %[[VAL_9:.*]] +// QIR: .lr.ph: ; preds = %[[VAL_10:.*]], %[[VAL_8]] +// QIR: %[[VAL_11:.*]] = phi i64 [ %[[VAL_12:.*]], %[[VAL_8]] ], [ 0, %[[VAL_10]] ] +// QIR: %[[VAL_13:.*]] = tail call %[[VAL_14:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 %[[VAL_11]]) +// QIR: %[[VAL_15:.*]] = load %[[VAL_14]]*, %[[VAL_14]]** %[[VAL_13]], align 8 +// QIR: tail call void @__quantum__qis__h(%[[VAL_14]]* %[[VAL_15]]) +// QIR: %[[VAL_12]] = add nuw nsw i64 %[[VAL_11]], 1 +// QIR: %[[VAL_16:.*]] = icmp eq i64 %[[VAL_12]], %[[VAL_6]] +// QIR: br i1 %[[VAL_16]], label %[[VAL_9]], label %[[VAL_8]] +// QIR: ._crit_edge: ; preds = %[[VAL_8]], %[[VAL_10]] +// QIR: %[[VAL_17:.*]] = tail call %[[VAL_14]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 0) +// QIR: %[[VAL_18:.*]] = load %[[VAL_14]]*, %[[VAL_14]]** %[[VAL_17]], align 8 +// QIR: %[[VAL_19:.*]] = tail call %[[VAL_20:.*]]* @__quantum__qis__mz(%[[VAL_14]]* %[[VAL_18]]) +// QIR: %[[VAL_21:.*]] = bitcast %[[VAL_20]]* %[[VAL_19]] to i1* +// QIR: %[[VAL_22:.*]] = load i1, i1* %[[VAL_21]], align 1 +// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_5]]* %[[VAL_4]]) +// QIR: ret i1 %[[VAL_22]] // QIR: } -// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__Neapolitan() local_unnamed_addr { +// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__Neapolitan() // QIR: %[[VAL_0:.*]] = tail call { { double, double }*, i64 } @_Z14getComplexInitv() -// QIR: %[[VAL_I:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 1 -// QIR: %[[VAL_1:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_I]], i1 false) -// QIR: %[[VAL_2:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 0 -// QIR: %[[VAL_3:.*]] = bitcast { double, double }* %[[VAL_2]] to i8* -// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 %[[VAL_1]], i8* %[[VAL_3]]) +// QIR: %[[VAL_1:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 1 +// QIR: %[[VAL_2:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_1]], i1 false), !range !1 +// QIR: %[[VAL_3:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 0 +// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 %[[VAL_2]], { double, double }* %[[VAL_3]]) +// QIR: %[[VAL_6:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_5]]* %[[VAL_4]]) +// QIR: %[[VAL_7:.*]] = icmp sgt i64 %[[VAL_6]], 0 +// QIR: br i1 %[[VAL_7]], label %[[VAL_8:.*]], label %[[VAL_9:.*]] +// QIR: ._crit_edge.thread: ; preds = %[[VAL_10:.*]] +// QIR: %[[VAL_11:.*]] = alloca i8, i64 %[[VAL_6]], align 1 +// QIR: br label %[[VAL_12:.*]] +// QIR: .lr.ph: ; preds = %[[VAL_10]], %[[VAL_8]] +// QIR: %[[VAL_13:.*]] = phi i64 [ %[[VAL_14:.*]], %[[VAL_8]] ], [ 0, %[[VAL_10]] ] +// QIR: %[[VAL_15:.*]] = tail call %[[VAL_16:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 %[[VAL_13]]) +// QIR: %[[VAL_17:.*]] = load %[[VAL_16]]*, %[[VAL_16]]** %[[VAL_15]], align 8 +// QIR: tail call void @__quantum__qis__h(%[[VAL_16]]* %[[VAL_17]]) +// QIR: %[[VAL_14]] = add nuw nsw i64 %[[VAL_13]], 1 +// QIR: %[[VAL_18:.*]] = icmp eq i64 %[[VAL_14]], %[[VAL_6]] +// QIR: br i1 %[[VAL_18]], label %[[VAL_19:.*]], label %[[VAL_8]] +// QIR: ._crit_edge: ; preds = %[[VAL_8]] +// QIR: %[[VAL_20:.*]] = alloca i8, i64 %[[VAL_6]], align 1 +// QIR: br i1 %[[VAL_7]], label %[[VAL_21:.*]], label %[[VAL_12]] +// QIR: .lr.ph4: ; preds = %[[VAL_19]], %[[VAL_21]] +// QIR: %[[VAL_22:.*]] = phi i64 [ %[[VAL_23:.*]], %[[VAL_21]] ], [ 0, %[[VAL_19]] ] +// QIR: %[[VAL_24:.*]] = tail call %[[VAL_16]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 %[[VAL_22]]) +// QIR: %[[VAL_25:.*]] = load %[[VAL_16]]*, %[[VAL_16]]** %[[VAL_24]], align 8 +// QIR: %[[VAL_26:.*]] = tail call %[[VAL_27:.*]]* @__quantum__qis__mz(%[[VAL_16]]* %[[VAL_25]]) +// QIR: %[[VAL_28:.*]] = bitcast %[[VAL_27]]* %[[VAL_26]] to i1* +// QIR: %[[VAL_29:.*]] = load i1, i1* %[[VAL_28]], align 1 +// QIR: %[[VAL_30:.*]] = getelementptr i8, i8* %[[VAL_20]], i64 %[[VAL_22]] +// QIR: %[[VAL_31:.*]] = zext i1 %[[VAL_29]] to i8 +// QIR: store i8 %[[VAL_31]], i8* %[[VAL_30]], align 1 +// QIR: %[[VAL_23]] = add nuw nsw i64 %[[VAL_22]], 1 +// QIR: %[[VAL_32:.*]] = icmp eq i64 %[[VAL_23]], %[[VAL_6]] +// QIR: br i1 %[[VAL_32]], label %[[VAL_12]], label %[[VAL_21]] +// QIR: ._crit_edge5: ; preds = %[[VAL_21]], %[[VAL_9]], %[[VAL_19]] +// QIR: %[[VAL_33:.*]] = phi i8* [ %[[VAL_11]], %[[VAL_9]] ], [ %[[VAL_20]], %[[VAL_19]] ], [ %[[VAL_20]], %[[VAL_21]] ] +// QIR: %[[VAL_34:.*]] = tail call i8* @malloc(i64 %[[VAL_6]]) +// QIR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[VAL_34]], i8* nonnull align 1 %[[VAL_33]], i64 %[[VAL_6]], i1 false) +// QIR: %[[VAL_35:.*]] = bitcast i8* %[[VAL_34]] to i1* +// QIR: %[[VAL_36:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_35]], 0 +// QIR: %[[VAL_37:.*]] = insertvalue { i1*, i64 } %[[VAL_36]], i64 %[[VAL_6]], 1 +// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_5]]* %[[VAL_4]]) +// QIR: ret { i1*, i64 } %[[VAL_37]] // QIR: } -// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__ButterPecan() local_unnamed_addr { +// QIR-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__ButterPecan() // QIR: %[[VAL_0:.*]] = tail call { { double, double }*, i64 } @_Z14getComplexInitv() -// QIR: %[[VAL_I:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 1 -// QIR: %[[VAL_1:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_I]], i1 false) -// QIR: %[[VAL_2:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 0 -// QIR: %[[VAL_3:.*]] = bitcast { double, double }* %[[VAL_2]] to i8* -// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 %[[VAL_1]], i8* %[[VAL_3]]) +// QIR: %[[VAL_1:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 1 +// QIR: %[[VAL_2:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_1]], i1 false), !range !1 +// QIR: %[[VAL_3:.*]] = extractvalue { { double, double }*, i64 } %[[VAL_0]], 0 +// QIR: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 %[[VAL_2]], { double, double }* %[[VAL_3]]) +// QIR: %[[VAL_6:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_5]]* %[[VAL_4]]) +// QIR: %[[VAL_7:.*]] = icmp sgt i64 %[[VAL_6]], 0 +// QIR: br i1 %[[VAL_7]], label %[[VAL_8:.*]], label %[[VAL_9:.*]] +// QIR: ._crit_edge.thread: ; preds = %[[VAL_10:.*]] +// QIR: %[[VAL_11:.*]] = alloca i8, i64 %[[VAL_6]], align 1 +// QIR: br label %[[VAL_12:.*]] +// QIR: .lr.ph: ; preds = %[[VAL_10]], %[[VAL_8]] +// QIR: %[[VAL_13:.*]] = phi i64 [ %[[VAL_14:.*]], %[[VAL_8]] ], [ 0, %[[VAL_10]] ] +// QIR: %[[VAL_15:.*]] = tail call %[[VAL_16:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 %[[VAL_13]]) +// QIR: %[[VAL_17:.*]] = load %[[VAL_16]]*, %[[VAL_16]]** %[[VAL_15]], align 8 +// QIR: tail call void @__quantum__qis__h(%[[VAL_16]]* %[[VAL_17]]) +// QIR: %[[VAL_14]] = add nuw nsw i64 %[[VAL_13]], 1 +// QIR: %[[VAL_18:.*]] = icmp eq i64 %[[VAL_14]], %[[VAL_6]] +// QIR: br i1 %[[VAL_18]], label %[[VAL_19:.*]], label %[[VAL_8]] +// QIR: ._crit_edge: ; preds = %[[VAL_8]] +// QIR: %[[VAL_20:.*]] = alloca i8, i64 %[[VAL_6]], align 1 +// QIR: br i1 %[[VAL_7]], label %[[VAL_21:.*]], label %[[VAL_12]] +// QIR: .lr.ph4: ; preds = %[[VAL_19]], %[[VAL_21]] +// QIR: %[[VAL_22:.*]] = phi i64 [ %[[VAL_23:.*]], %[[VAL_21]] ], [ 0, %[[VAL_19]] ] +// QIR: %[[VAL_24:.*]] = tail call %[[VAL_16]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_5]]* %[[VAL_4]], i64 %[[VAL_22]]) +// QIR: %[[VAL_25:.*]] = load %[[VAL_16]]*, %[[VAL_16]]** %[[VAL_24]], align 8 +// QIR: %[[VAL_26:.*]] = tail call %[[VAL_27:.*]]* @__quantum__qis__mz(%[[VAL_16]]* %[[VAL_25]]) +// QIR: %[[VAL_28:.*]] = bitcast %[[VAL_27]]* %[[VAL_26]] to i1* +// QIR: %[[VAL_29:.*]] = load i1, i1* %[[VAL_28]], align 1 +// QIR: %[[VAL_30:.*]] = getelementptr i8, i8* %[[VAL_20]], i64 %[[VAL_22]] +// QIR: %[[VAL_31:.*]] = zext i1 %[[VAL_29]] to i8 +// QIR: store i8 %[[VAL_31]], i8* %[[VAL_30]], align 1 +// QIR: %[[VAL_23]] = add nuw nsw i64 %[[VAL_22]], 1 +// QIR: %[[VAL_32:.*]] = icmp eq i64 %[[VAL_23]], %[[VAL_6]] +// QIR: br i1 %[[VAL_32]], label %[[VAL_12]], label %[[VAL_21]] +// QIR: ._crit_edge5: ; preds = %[[VAL_21]], %[[VAL_9]], %[[VAL_19]] +// QIR: %[[VAL_33:.*]] = phi i8* [ %[[VAL_11]], %[[VAL_9]] ], [ %[[VAL_20]], %[[VAL_19]] ], [ %[[VAL_20]], %[[VAL_21]] ] +// QIR: %[[VAL_34:.*]] = tail call i8* @malloc(i64 %[[VAL_6]]) +// QIR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[VAL_34]], i8* nonnull align 1 %[[VAL_33]], i64 %[[VAL_6]], i1 false) +// QIR: %[[VAL_35:.*]] = bitcast i8* %[[VAL_34]] to i1* +// QIR: %[[VAL_36:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_35]], 0 +// QIR: %[[VAL_37:.*]] = insertvalue { i1*, i64 } %[[VAL_36]], i64 %[[VAL_6]], 1 +// QIR: tail call void @__quantum__rt__qubit_release_array(%[[VAL_5]]* %[[VAL_4]]) +// QIR: ret { i1*, i64 } %[[VAL_37]] +// QIR: } + +// QIR-LABEL: define i1 @__nvqpp__mlirgen__function_Strawberry._Z10Strawberryv() +// QIR: %[[VAL_0:.*]] = alloca [2 x { double, double }], align 8 +// QIR: %[[VAL_1:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 0 +// QIR: %[[VAL_2:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 0 +// QIR: %[[VAL_3:.*]] = bitcast [2 x { double, double }]* %[[VAL_0]] to i8* +// QIR: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) %[[VAL_3]], i8 0, i64 16, i1 false) +// QIR: store double 1.000000e+00, double* %[[VAL_2]], align 8 +// QIR: %[[VAL_4:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 1 +// QIR: store double 0.000000e+00, double* %[[VAL_4]], align 8 +// QIR: %[[VAL_5:.*]] = call %[[VAL_6:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 1, { double, double }* nonnull %[[VAL_1]]) +// QIR: %[[VAL_7:.*]] = call %[[VAL_8:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_6]]* %[[VAL_5]], i64 0) +// QIR: %[[VAL_9:.*]] = load %[[VAL_8]]*, %[[VAL_8]]** %[[VAL_7]], align 8 +// QIR: %[[VAL_10:.*]] = call %[[VAL_11:.*]]* @__quantum__qis__mz(%[[VAL_8]]* %[[VAL_9]]) +// QIR: %[[VAL_12:.*]] = bitcast %[[VAL_11]]* %[[VAL_10]] to i1* +// QIR: %[[VAL_13:.*]] = load i1, i1* %[[VAL_12]], align 1 +// QIR: call void @__quantum__rt__qubit_release_array(%[[VAL_6]]* %[[VAL_5]]) +// QIR: ret i1 %[[VAL_13]] +// QIR: } + +// QIR-LABEL: define i1 @__nvqpp__mlirgen__function_Peppermint._Z10Peppermintv() +// QIR: %[[VAL_0:.*]] = alloca [2 x { double, double }], align 8 +// QIR: %[[VAL_1:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 0 +// QIR: %[[VAL_2:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 0 +// QIR: store double 0x3FE6A09E667F3BCD, double* %[[VAL_2]], align 8 +// QIR: %[[VAL_3:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 0, i32 1 +// QIR: store double 0.000000e+00, double* %[[VAL_3]], align 8 +// QIR: %[[VAL_4:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 0 +// QIR: store double 0x3FE6A09E667F3BCD, double* %[[VAL_4]], align 8 +// QIR: %[[VAL_5:.*]] = getelementptr inbounds [2 x { double, double }], [2 x { double, double }]* %[[VAL_0]], i64 0, i64 1, i32 1 +// QIR: store double 0.000000e+00, double* %[[VAL_5]], align 8 +// QIR: %[[VAL_6:.*]] = call %[[VAL_7:.*]]* @__quantum__rt__qubit_allocate_array_with_state_complex64(i64 1, { double, double }* nonnull %[[VAL_1]]) +// QIR: %[[VAL_8:.*]] = call %[[VAL_9:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_7]]* %[[VAL_6]], i64 0) +// QIR: %[[VAL_10:.*]] = load %[[VAL_9]]*, %[[VAL_9]]** %[[VAL_8]], align 8 +// QIR: %[[VAL_11:.*]] = call %[[VAL_12:.*]]* @__quantum__qis__mz(%[[VAL_9]]* %[[VAL_10]]) +// QIR: %[[VAL_13:.*]] = bitcast %[[VAL_12]]* %[[VAL_11]] to i1* +// QIR: %[[VAL_14:.*]] = load i1, i1* %[[VAL_13]], align 1 +// QIR: call void @__quantum__rt__qubit_release_array(%[[VAL_7]]* %[[VAL_6]]) +// QIR: ret i1 %[[VAL_14]] // QIR: } diff --git a/test/AST-Quake/to_qir.cpp b/test/AST-Quake/to_qir.cpp index 367f9dcf6d5..ae424688e15 100644 --- a/test/AST-Quake/to_qir.cpp +++ b/test/AST-Quake/to_qir.cpp @@ -7,39 +7,61 @@ ******************************************************************************/ // REQUIRES: c++20 -// RUN: cudaq-quake %s | cudaq-opt --lower-to-cfg | cudaq-translate --convert-to=qir -o - | FileCheck %s +// clang-format off +// RUN: cudaq-quake %s | cudaq-opt --lower-to-cfg | cudaq-translate --convert-to=qir | FileCheck %s +// clang-format on #include struct kernel { - void operator()() __qpu__ { - cudaq::qarray<3> q; - h(q[1]); - x(q[1], q[2]); + void operator()() __qpu__ { + cudaq::qarray<3> q; + h(q[1]); + x(q[1], q[2]); - x(q[0], q[1]); - h(q[0]); + x(q[0], q[1]); + h(q[0]); - auto b0 = mz(q[0]); - auto b1 = mz(q[1]); + auto b0 = mz(q[0]); + auto b1 = mz(q[1]); - if (b1) x(q[2]); - if (b0) z(q[2]); - } + if (b1) + x(q[2]); + if (b0) + z(q[2]); + } }; +// clang-format off // CHECK-LABEL: define void @__nvqpp__mlirgen__kernel() -// CHECK: tail call %{{.*}}* @__quantum__rt__qubit_allocate_array(i64 3) -// CHECK: tail call i8* @__quantum__rt__array_get_element_ptr_1d(%{{.*}}* %{{.*}}, i64 1) -// CHECK: tail call void @__quantum__qis__h(%{{.*}}* %{{.*}}) -// CHECK: tail call i8* @__quantum__rt__array_get_element_ptr_1d(%{{.*}}* %{{.*}}, i64 2) -// CHECK: tail call void (i64, void (%{{.*}}*, %{{.*}}*)*, ...) @invokeWithControlQubits(i64 1, void (%{{.*}}*, %{{.*}}*)* nonnull @__quantum__qis__x__ctl, %{{.*}}* %{{.*}}, %{{.*}}* %{{.*}}) -// CHECK: tail call i8* @__quantum__rt__array_get_element_ptr_1d(%{{.*}}* %{{.*}}, i64 0) -// CHECK: tail call void (i64, void (%{{.*}}*, %{{.*}}*)*, ...) @invokeWithControlQubits(i64 1, void (%{{.*}}*, %{{.*}}*)* nonnull @__quantum__qis__x__ctl, %{{.*}}* %{{.*}}, %{{.*}}* %{{.*}}) -// CHECK: tail call void @__quantum__qis__h(%{{.*}}* %{{.*}}) -// CHECK: tail call %{{.*}}* @__quantum__qis__mz__to__register(%{{.*}}* %{{.*}}, i8* nonnull getelementptr inbounds ([3 x i8], [3 x i8]* @cstr.623000, i64 0, i64 0)) -// CHECK: tail call %{{.*}}* @__quantum__qis__mz__to__register(%{{.*}}* %{{.*}}, i8* nonnull getelementptr inbounds ([3 x i8], [3 x i8]* @cstr.623100, i64 0, i64 0)) -// CHECK: tail call void @__quantum__rt__qubit_release_array(%{{.*}}* %{{.*}}) +// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 3) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_4:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_5:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// CHECK: %[[VAL_6:.*]] = load %Qubit*, %Qubit** %[[VAL_5]], align 8 +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_4]], %Qubit* %[[VAL_6]]) +// CHECK: %[[VAL_7:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_8:.*]] = load %Qubit*, %Qubit** %[[VAL_7]], align 8 +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_8]], %Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_8]]) +// CHECK: %[[VAL_9:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_8]], i8* nonnull getelementptr inbounds ([3 x i8], [3 x i8]* @cstr.623000, i64 0, i64 0)) +// CHECK: %[[VAL_11:.*]] = bitcast %Result* %[[VAL_9]] to i1* +// CHECK: %[[VAL_12:.*]] = load i1, i1* %[[VAL_11]], align 1 +// CHECK: %[[VAL_13:.*]] = tail call %Result* @__quantum__qis__mz__to__register(%Qubit* %[[VAL_4]], i8* nonnull getelementptr inbounds ([3 x i8], [3 x i8]* @cstr.623100, i64 0, i64 0)) +// CHECK: %[[VAL_14:.*]] = bitcast %Result* %[[VAL_13]] to i1* +// CHECK: %[[VAL_15:.*]] = load i1, i1* %[[VAL_14]], align 1 +// CHECK: br i1 %[[VAL_15]], label %[[VAL_16:.*]], label %[[VAL_17:.*]] +// CHECK: 14: ; preds = %[[VAL_18:.*]] +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_6]]) +// CHECK: br label %[[VAL_17]] +// CHECK: 15: ; preds = %[[VAL_16]], %[[VAL_18]] +// CHECK: br i1 %[[VAL_12]], label %[[VAL_19:.*]], label %[[VAL_20:.*]] +// CHECK: 16: ; preds = %[[VAL_17]] +// CHECK: tail call void @__quantum__qis__z(%Qubit* %[[VAL_6]]) +// CHECK: br label %[[VAL_20]] +// CHECK: 17: ; preds = %[[VAL_19]], %[[VAL_17]] +// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // CHECK: ret void // CHECK: } diff --git a/test/NVQPP/struct_arg.cpp b/test/NVQPP/struct_arg.cpp index c663cc81115..a3f6e5fbd69 100644 --- a/test/NVQPP/struct_arg.cpp +++ b/test/NVQPP/struct_arg.cpp @@ -27,7 +27,7 @@ struct foo { // clang-format off // CHECK-LABEL: define void @_ZN3fooclI3bazEEvOT_i -// CHECK-SAME: (i8* nocapture readnone %{{.*}}, {}* nocapture readnone %{{.*}}, i32 %{{.*}}) +// CHECK-SAME: (i8* nocapture readnone %[[ARG0:[0-9]*]], {}* {{.*}}%[[ARG1:[0-9]*]], i32 %[[ARG2:[0-9]*]]) // clang-format on int main() { diff --git a/test/Translate/alloca_no_operand.qke b/test/Translate/alloca_no_operand.qke index b5c078b8e85..caa7f0624cd 100644 --- a/test/Translate/alloca_no_operand.qke +++ b/test/Translate/alloca_no_operand.qke @@ -61,47 +61,43 @@ func.func @adder_n4() { return } -// CHECK-LABEL: @adder_n4() -// CHECK: %[[VAL_0:.*]] = tail call -// CHECK-SAME: %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 4) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_9:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 3) -// CHECK: %[[VAL_10:.*]] = bitcast i8* %[[VAL_9]] to %[[VAL_4]]** -// CHECK: %[[VAL_11:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_10]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_11]]) -// CHECK: %[[VAL_12:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 2) -// CHECK: %[[VAL_13:.*]] = bitcast i8* %[[VAL_12]] to %[[VAL_4]]** -// CHECK: %[[VAL_14:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_13]], align 8 -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_11]]) -// CHECK: tail call void @__quantum__qis__t(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: tail call void @__quantum__qis__t(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: tail call void @__quantum__qis__t(%[[VAL_4]]* %[[VAL_14]]) -// CHECK: tail call void @__quantum__qis__t__adj(%[[VAL_4]]* %[[VAL_11]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_5]], %Qubit* %[[VAL_8]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_11]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_11]], %Qubit* %[[VAL_5]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_8]], %Qubit* %[[VAL_14]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_5]], %Qubit* %[[VAL_8]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_11]]) -// CHECK: tail call void @__quantum__qis__t__adj(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: tail call void @__quantum__qis__t__adj(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: tail call void @__quantum__qis__t__adj(%[[VAL_4]]* %[[VAL_14]]) -// CHECK: tail call void @__quantum__qis__t(%[[VAL_4]]* %[[VAL_11]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_5]], %Qubit* %[[VAL_8]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_14]], %Qubit* %[[VAL_11]]) -// CHECK: tail call void @__quantum__qis__s(%[[VAL_4]]* %[[VAL_11]]) -// CHECK: tail call void (i64, void (%Array*, %Qubit*)*, ...) @invokeWithControlQubits(i64 1, void (%Array*, %Qubit*)* nonnull @__quantum__qis__x__ctl, %Qubit* %[[VAL_11]], %Qubit* %[[VAL_5]]) -// CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_11]]) -// CHECK: %[[VAL_15:.*]] = tail call %Result* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_17:.*]] = tail call %Result* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_18:.*]] = tail call %Result* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_14]]) -// CHECK: %[[VAL_19:.*]] = tail call %Result* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_11]]) -// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// CHECK-LABEL: define void @adder_n4() local_unnamed_addr { +// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 4) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_5:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_6:.*]] = load %Qubit*, %Qubit** %[[VAL_5]], align 8 +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_6]]) +// CHECK: %[[VAL_7:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 3) +// CHECK: %[[VAL_8:.*]] = load %Qubit*, %Qubit** %[[VAL_7]], align 8 +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_8]]) +// CHECK: %[[VAL_9:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// CHECK: %[[VAL_10:.*]] = load %Qubit*, %Qubit** %[[VAL_9]], align 8 +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_10]], %Qubit* %[[VAL_8]]) +// CHECK: tail call void @__quantum__qis__t(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__t(%Qubit* %[[VAL_6]]) +// CHECK: tail call void @__quantum__qis__t(%Qubit* %[[VAL_10]]) +// CHECK: tail call void @__quantum__qis__tdg(%Qubit* %[[VAL_8]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_4]], %Qubit* %[[VAL_6]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_10]], %Qubit* %[[VAL_8]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_8]], %Qubit* %[[VAL_4]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_6]], %Qubit* %[[VAL_10]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_4]], %Qubit* %[[VAL_6]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_10]], %Qubit* %[[VAL_8]]) +// CHECK: tail call void @__quantum__qis__tdg(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__tdg(%Qubit* %[[VAL_6]]) +// CHECK: tail call void @__quantum__qis__tdg(%Qubit* %[[VAL_10]]) +// CHECK: tail call void @__quantum__qis__t(%Qubit* %[[VAL_8]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_4]], %Qubit* %[[VAL_6]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_10]], %Qubit* %[[VAL_8]]) +// CHECK: tail call void @__quantum__qis__s(%Qubit* %[[VAL_8]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), %Qubit* %[[VAL_8]], %Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_8]]) +// CHECK: %[[VAL_11:.*]] = tail call %Result* @__quantum__qis__mz(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_13:.*]] = tail call %Result* @__quantum__qis__mz(%Qubit* %[[VAL_6]]) +// CHECK: %[[VAL_14:.*]] = tail call %Result* @__quantum__qis__mz(%Qubit* %[[VAL_10]]) +// CHECK: %[[VAL_15:.*]] = tail call %Result* @__quantum__qis__mz(%Qubit* %[[VAL_8]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // CHECK: ret void +// CHECK: } diff --git a/test/Translate/base_profile-1.qke b/test/Translate/base_profile-1.qke index 685bd559953..0e6f2f91855 100644 --- a/test/Translate/base_profile-1.qke +++ b/test/Translate/base_profile-1.qke @@ -8,7 +8,7 @@ // RUN: cudaq-translate --convert-to=qir-base %s | FileCheck %s -func.func @__nvqpp__mlirgen__ghz() { +func.func @__nvqpp__mlirgen__ghz() attributes {"cudaq-kernel"} { %c2_i32 = arith.constant 2 : i32 %c1_i32 = arith.constant 1 : i32 %c1 = arith.constant 1 : index @@ -30,17 +30,17 @@ func.func @__nvqpp__mlirgen__ghz() { return } -// CHECK-LABEL: __nvqpp__mlirgen__ghz -// CHECK: tail call void @__quantum__qis__h__body(%[[VAL_0:.*]]* null) -// CHECK: tail call void @__quantum__qis__cnot__body(%Qubit* null, %Qubit* nonnull inttoptr (i64 1 to %Qubit*)) -// CHECK: tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Qubit* nonnull inttoptr (i64 2 to %Qubit*)) -// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0]]* null, %[[VAL_1:.*]]* null) -// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_1]]* nonnull inttoptr (i64 1 to %[[VAL_1]]*)) -// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*), %[[VAL_1]]* nonnull inttoptr (i64 2 to %[[VAL_1]]*)) -// CHECK: tail call void @__quantum__rt__result_record_output(%Result* null, i8* nonnull -// CHECK: tail call void @__quantum__rt__result_record_output(%Result* nonnull inttoptr (i64 1 to %Result*), i8* nonnull -// CHECK: tail call void @__quantum__rt__result_record_output(%Result* nonnull inttoptr (i64 2 to %Result*), i8* nonnull -// CHECK: ret void +// CHECK-LABEL: define void @__nvqpp__mlirgen__ghz() +// CHECK: tail call void @__quantum__qis__h__body(%[[VAL_0:.*]]* null) +// CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* null, %[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*)) +// CHECK: tail call void @__quantum__qis__cnot__body(%[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*)) +// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0]]* null, %[[VAL_1:.*]]* null) +// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_1]]* nonnull inttoptr (i64 1 to %[[VAL_1]]*)) +// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0]]* nonnull inttoptr (i64 2 to %[[VAL_0]]*), %[[VAL_1]]* nonnull inttoptr (i64 2 to %[[VAL_1]]*)) +// CHECK: tail call void @__quantum__rt__result_record_output(%[[VAL_1]]* null, i8* nonnull getelementptr inbounds ([7 x i8], [7 x i8]* @cstr.72303030303000, i64 0, i64 0)) +// CHECK: tail call void @__quantum__rt__result_record_output(%[[VAL_1]]* nonnull inttoptr (i64 1 to %[[VAL_1]]*), i8* nonnull getelementptr inbounds ([7 x i8], [7 x i8]* @cstr.72303030303100, i64 0, i64 0)) +// CHECK: tail call void @__quantum__rt__result_record_output(%[[VAL_1]]* nonnull inttoptr (i64 2 to %[[VAL_1]]*), i8* nonnull getelementptr inbounds ([7 x i8], [7 x i8]* @cstr.72303030303200, i64 0, i64 0)) +// CHECK: ret void +// CHECK: } -// CHECK: attributes #0 = { "irreversible" } -// CHECK: attributes #1 = { "entry_point" "output_labeling_schema"="schema_id" "output_names"="{{.*}}" "qir_profiles"="base_profile" "requiredQubits"="3" "requiredResults"="3" } +// CHECK: = { "output_labeling_schema"="schema_id" "output_names"="{{\[\[\[}}0,[0,\22r00000\22]],[1,[1,\22r00001\22]],[2,[2,\22r00002\22]]]]" "qir_profiles"="base_profile" "requiredQubits"="3" "requiredResults"="3" } diff --git a/test/Translate/base_profile-2.qke b/test/Translate/base_profile-2.qke index 69129c22f5e..92226ee6bbc 100644 --- a/test/Translate/base_profile-2.qke +++ b/test/Translate/base_profile-2.qke @@ -8,7 +8,7 @@ // RUN: cudaq-translate --convert-to=qir-base %s | FileCheck %s -func.func @__nvqpp__mlirgen__t1() { +func.func @__nvqpp__mlirgen__t1() attributes {"cudaq-kernel"} { %c2_i32 = arith.constant 2 : i32 %0 = arith.extsi %c2_i32 : i32 to i64 %c2_i64 = arith.constant 2 : i64 @@ -21,8 +21,9 @@ func.func @__nvqpp__mlirgen__t1() { } // CHECK-LABEL: define void @__nvqpp__mlirgen__t1() -// CHECK: tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* null) -// CHECK: tail call void @__quantum__rt__result_record_output(%Result* null, i8* nonnull -// CHECK: ret void +// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0:.*]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_1:.*]]* null) +// CHECK: tail call void @__quantum__rt__result_record_output(%[[VAL_1]]* null, i8* nonnull getelementptr inbounds ([7 x i8], [7 x i8]* @cstr.72303030303000, i64 0, i64 0)) +// CHECK: ret void +// CHECK: } -// CHECK: { "entry_point" "output_labeling_schema"="schema_id" "output_names"="{{.*}}" "qir_profiles"="base_profile" "requiredQubits"="2" "requiredResults"="1" } +// CHECK: = { "output_labeling_schema"="schema_id" "output_names"="{{\[\[\[}}0,[1,\22r00000\22]]]]" "qir_profiles"="base_profile" "requiredQubits"="2" "requiredResults"="1" } diff --git a/test/Translate/base_profile-3.qke b/test/Translate/base_profile-3.qke index 924a6ac6a7b..d08b687a9af 100644 --- a/test/Translate/base_profile-3.qke +++ b/test/Translate/base_profile-3.qke @@ -8,7 +8,7 @@ // RUN: cudaq-translate --convert-to=qir-base %s | FileCheck %s -func.func @__nvqpp__mlirgen__t1() { +func.func @__nvqpp__mlirgen__t1() attributes {"cudaq-kernel"} { %c2_i32 = arith.constant 2 : i32 %0 = arith.extsi %c2_i32 : i32 to i64 %c2_i64 = arith.constant 2 : i64 @@ -21,8 +21,9 @@ func.func @__nvqpp__mlirgen__t1() { } // CHECK-LABEL: define void @__nvqpp__mlirgen__t1() -// CHECK: tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* null) -// CHECK: tail call void @__quantum__rt__result_record_output(%Result* null, i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @cstr.426F6200, i64 0, i64 0) -// CHECK: ret void +// CHECK: tail call void @__quantum__qis__mz__body(%[[VAL_0:.*]]* nonnull inttoptr (i64 1 to %[[VAL_0]]*), %[[VAL_1:.*]]* null) +// CHECK: tail call void @__quantum__rt__result_record_output(%[[VAL_1]]* null, i8* nonnull getelementptr inbounds ([4 x i8], [4 x i8]* @cstr.426F6200, i64 0, i64 0)) +// CHECK: ret void +// CHECK: } -// CHECK: { "entry_point" "output_labeling_schema"="schema_id" "output_names"="{{.*}}" "qir_profiles"="base_profile" "requiredQubits"="2" "requiredResults"="1" } +// CHECK: = { "output_labeling_schema"="schema_id" "output_names"="{{\[\[\[}}0,[1,\22Bob\22]]]]" "qir_profiles"="base_profile" "requiredQubits"="2" "requiredResults"="1" } diff --git a/test/Translate/basic.qke b/test/Translate/basic.qke index 28a12750240..12077b90cb5 100644 --- a/test/Translate/basic.qke +++ b/test/Translate/basic.qke @@ -16,16 +16,16 @@ func.func @test_func(%p : i32) { } // CHECK-LABEL: define void @test_func(i32 -// CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { +// CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { // CHECK: %[[VAL_1:.*]] = zext i32 %[[VAL_0]] to i64 -// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_1]]) -// CHECK: %[[VAL_4:.*]] = tail call %[[VAL_3]]* @__quantum__rt__qubit_allocate_array(i64 2) -// CHECK-DAG: tail call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_4]]) -// CHECK-DAG: tail call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_2]]) +// CHECK: %[[VAL_2:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_1]]) +// CHECK: %[[VAL_4:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 2) +// CHECK-DAG: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_4]]) +// CHECK-DAG: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_2]]) // CHECK: ret void // CHECK: } -func.func @test_func2(){ +func.func @test_func2() { %zero = arith.constant 0 : i32 %one = arith.constant 1 : i32 %neg = arith.constant -5 : i32 @@ -51,16 +51,15 @@ func.func @test_func2(){ // CHECK-LABEL: define void @test_func2() local_unnamed_addr { // CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 5) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 3) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: tail call void (i64, void (%[[VAL_1]]*, %[[VAL_4]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_1]]*, %[[VAL_4]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]* %[[VAL_8]]) -// CHECK: tail call void @__quantum__qis__rx(double 4.300000e-01, %[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_10:.*]]* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_5]]) +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_2]], align 8 +// CHECK: %[[VAL_5:.*]] = tail call %[[VAL_3]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 3) +// CHECK: %[[VAL_6:.*]] = bitcast %[[VAL_3]]** %[[VAL_5]] to i8** +// CHECK: %[[VAL_7:.*]] = load i8*, i8** %[[VAL_6]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_3]]* %[[VAL_4]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%[[VAL_1]]*, %[[VAL_3]]*)* @__quantum__qis__x__ctl to i8*), %[[VAL_3]]* %[[VAL_4]], i8* %[[VAL_7]]) +// CHECK: tail call void @__quantum__qis__rx(double 4.300000e-01, %[[VAL_3]]* %[[VAL_4]]) +// CHECK: %[[VAL_8:.*]] = tail call %[[VAL_9:.*]]* @__quantum__qis__mz(%[[VAL_3]]* %[[VAL_4]]) // CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) // CHECK: ret void // CHECK: } @@ -74,20 +73,18 @@ func.func @test_ctrl_swap_basic() { } // CHECK-LABEL: define void @test_ctrl_swap_basic() local_unnamed_addr { -// CHECK: %[[VAL_12:.*]] = alloca i64, align 8 -// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 3) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %Qubit** -// CHECK: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_3]], align 8 -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %Qubit** -// CHECK: %[[VAL_8:.*]] = load %Qubit*, %Qubit** %[[VAL_7]], align 8 -// CHECK: %[[VAL_9:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) -// CHECK: %[[VAL_10:.*]] = bitcast i8* %[[VAL_9]] to %Qubit** -// CHECK: %[[VAL_11:.*]] = load %Qubit*, %Qubit** %[[VAL_10]], align 8 -// CHECK: store i64 0, i64* %[[VAL_12]], align 8 -// CHECK: call void (i64, i64*, i64, void (%Array*, %Qubit*, %Qubit*)*, ...) @invokeWithControlRegisterOrQubits(i64 1, i64* nonnull %[[VAL_12]], i64 2, void (%Array*, %Qubit*, %Qubit*)* nonnull @__quantum__qis__swap__ctl, %Qubit* %[[VAL_5]], %Qubit* %[[VAL_8]], %Qubit* %[[VAL_11]]) -// CHECK: call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) +// CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 3) +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = bitcast %[[VAL_3]]** %[[VAL_2]] to i8** +// CHECK: %[[VAL_5:.*]] = load i8*, i8** %[[VAL_4]], align 8 +// CHECK: %[[VAL_6:.*]] = tail call %[[VAL_3]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_7:.*]] = bitcast %[[VAL_3]]** %[[VAL_6]] to i8** +// CHECK: %[[VAL_8:.*]] = load i8*, i8** %[[VAL_7]], align 8 +// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_3]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 2) +// CHECK: %[[VAL_10:.*]] = bitcast %[[VAL_3]]** %[[VAL_9]] to i8** +// CHECK: %[[VAL_11:.*]] = load i8*, i8** %[[VAL_10]], align 8 +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 2, i8* nonnull bitcast (void (%[[VAL_1]]*, %[[VAL_3]]*, %[[VAL_3]]*)* @__quantum__qis__swap__ctl to i8*), i8* %[[VAL_5]], i8* %[[VAL_8]], i8* %[[VAL_11]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) // CHECK: ret void // CHECK: } @@ -103,33 +100,20 @@ func.func @test_ctrl_swap_complex() { } // CHECK-LABEL: define void @test_ctrl_swap_complex() local_unnamed_addr { -// CHECK: %[[VAL_17:.*]] = alloca [2 x i64], align 8 -// CHECK: %[[VAL_13:.*]] = alloca [2 x i64], align 8 -// CHECK: %[[VAL_14:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[VAL_13]], i64 0, i64 0 -// CHECK: %[[VAL_18:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[VAL_17]], i64 0, i64 0 -// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 7) -// CHECK: %[[VAL_2:.*]] = tail call %Array* @__quantum__rt__array_slice(%Array* %[[VAL_0]], i32 1, i64 0, i64 1, i64 3) -// CHECK: %[[VAL_3:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 4) -// CHECK: %[[VAL_4:.*]] = bitcast i8* %[[VAL_3]] to %Qubit** -// CHECK: %[[VAL_6:.*]] = load %Qubit*, %Qubit** %[[VAL_4]], align 8 -// CHECK: %[[VAL_7:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 5) -// CHECK: %[[VAL_8:.*]] = bitcast i8* %[[VAL_7]] to %Qubit** -// CHECK: %[[VAL_9:.*]] = load %Qubit*, %Qubit** %[[VAL_8]], align 8 -// CHECK: %[[VAL_10:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 6) -// CHECK: %[[VAL_11:.*]] = bitcast i8* %[[VAL_10]] to %Qubit** -// CHECK: %[[VAL_12:.*]] = load %Qubit*, %Qubit** %[[VAL_11]], align 8 -// CHECK: tail call void @__quantum__qis__swap__ctl(%Array* %[[VAL_2]], %Qubit* %[[VAL_6]], %Qubit* %[[VAL_9]]) -// CHECK: store i64 0, i64* %[[VAL_14]], align 8 -// CHECK: %[[VAL_15:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[VAL_13]], i64 0, i64 1 -// CHECK: %[[VAL_16:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%Array* %[[VAL_2]]) -// CHECK: store i64 %[[VAL_16]], i64* %[[VAL_15]], align 8 -// CHECK: call void (i64, i64*, i64, void (%Array*, %Qubit*, %Qubit*)*, ...) @invokeWithControlRegisterOrQubits(i64 2, i64* nonnull %[[VAL_14]], i64 2, void (%Array*, %Qubit*, %Qubit*)* nonnull @__quantum__qis__swap__ctl, %Qubit* %[[VAL_6]], %Array* %[[VAL_2]], %Qubit* %[[VAL_9]], %Qubit* %[[VAL_12]]) -// CHECK: %[[VAL_19:.*]] = call i64 @__quantum__rt__array_get_size_1d(%Array* %[[VAL_2]]) -// CHECK: store i64 %[[VAL_19]], i64* %[[VAL_18]], align 8 -// CHECK: %[[VAL_20:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[VAL_17]], i64 0, i64 1 -// CHECK: store i64 0, i64* %[[VAL_20]], align 8 -// CHECK: call void (i64, i64*, i64, void (%Array*, %Qubit*, %Qubit*)*, ...) @invokeWithControlRegisterOrQubits(i64 2, i64* nonnull %[[VAL_18]], i64 2, void (%Array*, %Qubit*, %Qubit*)* nonnull @__quantum__qis__swap__ctl, %Array* %[[VAL_2]], %Qubit* %[[VAL_9]], %Qubit* %[[VAL_6]], %Qubit* %[[VAL_12]]) -// CHECK: call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) +// CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 7) +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_slice(%[[VAL_1]]* %[[VAL_0]], i32 1, i64 0, i64 1, i64 3) +// CHECK: %[[VAL_3:.*]] = tail call %[[VAL_4:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 4) +// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 +// CHECK: %[[VAL_6:.*]] = tail call %[[VAL_4]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 5) +// CHECK: %[[VAL_7:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_6]], align 8 +// CHECK: %[[VAL_8:.*]] = tail call %[[VAL_4]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 6) +// CHECK: %[[VAL_9:.*]] = bitcast %[[VAL_4]]** %[[VAL_8]] to i8** +// CHECK: %[[VAL_10:.*]] = load i8*, i8** %[[VAL_9]], align 8 +// CHECK: tail call void @__quantum__qis__swap__ctl(%[[VAL_1]]* %[[VAL_2]], %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]* %[[VAL_7]]) +// CHECK: %[[VAL_11:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_1]]* %[[VAL_2]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 1, i64 1, i64 2, i8* nonnull bitcast (void (%[[VAL_1]]*, %[[VAL_4]]*, %[[VAL_4]]*)* @__quantum__qis__swap__ctl to i8*), i64 %[[VAL_11]], %[[VAL_1]]* %[[VAL_2]], %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]* %[[VAL_7]], i8* %[[VAL_10]]) +// CHECK: %[[VAL_12:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_1]]* %[[VAL_2]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 1, i64 1, i64 2, i8* nonnull bitcast (void (%[[VAL_1]]*, %[[VAL_4]]*, %[[VAL_4]]*)* @__quantum__qis__swap__ctl to i8*), i64 %[[VAL_12]], %[[VAL_1]]* %[[VAL_2]], %[[VAL_4]]* %[[VAL_7]], %[[VAL_4]]* %[[VAL_5]], i8* %[[VAL_10]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) // CHECK: ret void // CHECK: } - diff --git a/test/Translate/const_array.qke b/test/Translate/const_array.qke index 6c3808789ca..3b1ba642c09 100644 --- a/test/Translate/const_array.qke +++ b/test/Translate/const_array.qke @@ -22,16 +22,10 @@ func.func @f() { return } +// CHECK-LABEL: @f.rodata_0 = private constant [3 x i32] [i32 0, i32 1, i32 0] + // CHECK-LABEL: define void @f() local_unnamed_addr { -// CHECK: %[[VAL_0:.*]] = alloca [3 x i32], align 4 -// CHECK: %[[VAL_1:.*]] = getelementptr inbounds [3 x i32], [3 x i32]* %[[VAL_0]], i64 0, i64 0 -// CHECK: store i32 0, i32* %[[VAL_1]], align 4 -// CHECK: %[[VAL_2:.*]] = getelementptr inbounds [3 x i32], [3 x i32]* %[[VAL_0]], i64 0, i64 1 -// CHECK: store i32 1, i32* %[[VAL_2]], align 4 -// CHECK: %[[VAL_3:.*]] = getelementptr inbounds [3 x i32], [3 x i32]* %[[VAL_0]], i64 0, i64 2 -// CHECK: store i32 0, i32* %[[VAL_3]], align 4 -// CHECK: %[[VAL_4:.*]] = insertvalue { i32*, i64 } undef, i32* %[[VAL_1]], 0 -// CHECK: %[[VAL_5:.*]] = insertvalue { i32*, i64 } %[[VAL_4]], i64 3, 1 -// CHECK: call void @g({ i32*, i64 } %[[VAL_5]]) +// CHECK: tail call void @g({ i32*, i64 } { i32* getelementptr inbounds ([3 x i32], [3 x i32]* @f.rodata_0, i32 0, i32 0), i64 3 }) // CHECK: ret void // CHECK: } + diff --git a/test/Translate/custom_operation.qke b/test/Translate/custom_operation.qke index 115c24b89b3..8a826dbeb0d 100644 --- a/test/Translate/custom_operation.qke +++ b/test/Translate/custom_operation.qke @@ -13,6 +13,8 @@ module { func.func @__nvqpp__mlirgen__function_kernel._Z6kernelv() attributes {"cudaq-entrypoint", "cudaq-kernel", no_this} { %0 = quake.alloca !quake.ref quake.h %0 : (!quake.ref) -> () + // NB: the custom sans "adj" in the name is called with the modifier, + // while the custom with "adj" in the name is called with no modifiers. quake.custom_op @__nvqpp__mlirgen__function_custom_s_generator_1._Z20custom_s_generator_1RKSt6vectorIdSaIdEE.rodata_0 %0 : (!quake.ref) -> () quake.custom_op @__nvqpp__mlirgen__function_custom_s_adj_generator_1._Z24custom_s_adj_generator_1RKSt6vectorIdSaIdEE.rodata_1 %0 : (!quake.ref) -> () quake.h %0 : (!quake.ref) -> () @@ -24,25 +26,22 @@ module { } -// CHECK: %[[VAL_0:.*]] = tail call -// CHECK: %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 1) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_6:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_7:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_6]], i64 0) -// CHECK: %[[VAL_8:.*]] = bitcast i8* %[[VAL_7]] to %[[VAL_4]]** -// CHECK: store %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]** %[[VAL_8]], align 8 -// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 0) -// CHECK: tail call void @__quantum__qis__custom_unitary__adj({ double, double }* nonnull getelementptr inbounds ([4 x { double, double }], [4 x { double, double }]* @__nvqpp__mlirgen__function_custom_s_generator_1._Z20custom_s_generator_1RKSt6vectorIdSaIdEE.rodata_0, i64 0, i64 0), %[[VAL_1]]* %[[VAL_9]], %[[VAL_1]]* %[[VAL_6]], i8* nonnull getelementptr inbounds ([18 x i8], [18 x i8]* @cstr.{{.*}}, i64 0, i64 0)) -// CHECK: %[[VAL_10:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_11:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_10]], i64 0) -// CHECK: %[[VAL_12:.*]] = bitcast i8* %[[VAL_11]] to %[[VAL_4]]** -// CHECK: store %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]** %[[VAL_12]], align 8 -// CHECK: %[[VAL_13:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 0) -// CHECK: tail call void @__quantum__qis__custom_unitary({ double, double }* nonnull getelementptr inbounds ([4 x { double, double }], [4 x { double, double }]* @__nvqpp__mlirgen__function_custom_s_adj_generator_1._Z24custom_s_adj_generator_1RKSt6vectorIdSaIdEE.rodata_1, i64 0, i64 0), %[[VAL_1]]* %[[VAL_13]], %[[VAL_1]]* %[[VAL_10]], i8* nonnull getelementptr inbounds ([22 x i8], [22 x i8]* @cstr.{{.*}}, i64 0, i64 0)) -// CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_14:.*]] = tail call %[[VAL_15:.*]]* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// CHECK-LABEL: define void @__nvqpp__mlirgen__function_kernel._Z6kernelv() +// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 1) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_5:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_6:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_5]], i64 0) +// CHECK: store %Qubit* %[[VAL_4]], %Qubit** %[[VAL_6]], align 8 +// CHECK: tail call void @__quantum__qis__custom_unitary__adj({ double, double }* nonnull getelementptr inbounds ([4 x { double, double }], [4 x { double, double }]* @__nvqpp__mlirgen__function_custom_s_generator_1.{{.*}}, i64 0, i64 0), %Array* null, %Array* %[[VAL_5]], i8* nonnull getelementptr inbounds ([18 x i8], [18 x i8]* @cstr.66756E6374696F6E5F637573746F6D5F7300, i64 0, i64 0)) +// CHECK: %[[VAL_7:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_8:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_7]], i64 0) +// CHECK: store %Qubit* %[[VAL_4]], %Qubit** %[[VAL_8]], align 8 +// CHECK: tail call void @__quantum__qis__custom_unitary({ double, double }* nonnull getelementptr inbounds ([4 x { double, double }], [4 x { double, double }]* @__nvqpp__mlirgen__function_custom_s_adj_generator_1.{{.*}}, i64 0, i64 0), %Array* null, %Array* %[[VAL_7]], i8* nonnull getelementptr inbounds ([22 x i8], [22 x i8]* @cstr.66756E6374696F6E5F637573746F6D5F735F61646A00, i64 0, i64 0)) +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_10:.*]]* @__quantum__qis__mz(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // CHECK: ret void +// CHECK: } + diff --git a/test/Translate/emit-mlir.qke b/test/Translate/emit-mlir.qke index 968b217764c..a43259d83ae 100644 --- a/test/Translate/emit-mlir.qke +++ b/test/Translate/emit-mlir.qke @@ -8,16 +8,7 @@ // RUN: cudaq-translate --convert-to=qir --emit-llvm=false %s | FileCheck %s -// CHECK-LABEL: llvm.func @test_func( -// CHECK-SAME: %[[ARG_0:.*]]: i32) { -// CHECK: %[[VAL_0:.*]] = llvm.zext %[[ARG_0]] : i32 to i64 -// CHECK: %[[VAL_1:.*]] = llvm.call @__quantum__rt__qubit_allocate_array(%[[VAL_0]]) : (i64) -> ![[ARRAY_TYPE:.*]] -// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(2 : i64) : i64 -// CHECK: %[[VAL_3:.*]] = llvm.call @__quantum__rt__qubit_allocate_array(%[[VAL_2]]) : (i64) -> ![[ARRAY_TYPE]] -// CHECK-DAG: llvm.call @__quantum__rt__qubit_release_array(%[[VAL_1]]) : (![[ARRAY_TYPE]]) -> () -// CHECK-DAG: llvm.call @__quantum__rt__qubit_release_array(%[[VAL_3]]) : (![[ARRAY_TYPE]]) -> () -// CHECK: llvm.return -// CHECK: } +// Test that the output is MLIR's LLVM-IR dialect and not LLVM IR itself. func.func @test_func(%p : i32) { %qv = quake.alloca !quake.veq[%p : i32] @@ -25,3 +16,17 @@ func.func @test_func(%p : i32) { %v = quake.alloca !quake.veq[%t : i32] return } + +// CHECK-LABEL: llvm.func @test_func( +// CHECK-SAME: %[[VAL_0:.*]]: i32) { +// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(2 : i64) : i64 +// CHECK: %[[VAL_2:.*]] = llvm.zext %[[VAL_0]] : i32 to i64 +// CHECK: %[[VAL_3:.*]] = llvm.call @__quantum__rt__qubit_allocate_array(%[[VAL_2]]) : (i64) -> !llvm.ptr> +// CHECK: %[[VAL_4:.*]] = llvm.call @__quantum__rt__qubit_allocate_array(%[[VAL_1]]) : (i64) -> !llvm.ptr> +// CHECK-DAG: llvm.call @__quantum__rt__qubit_release_array(%[[VAL_4]]) : (!llvm.ptr>) -> () +// CHECK-DAG: llvm.call @__quantum__rt__qubit_release_array(%[[VAL_3]]) : (!llvm.ptr>) -> () +// CHECK: llvm.return +// CHECK: } + +// CHECK: llvm.func @__quantum__rt__qubit_allocate_array(i64) -> !llvm.ptr> attributes {sym_visibility = "private"} +// CHECK: llvm.func @__quantum__rt__qubit_release_array(!llvm.ptr>) attributes {sym_visibility = "private"} diff --git a/test/Translate/exp_pauli-1.qke b/test/Translate/exp_pauli-1.qke index c04278774ee..9d96cf32bf8 100644 --- a/test/Translate/exp_pauli-1.qke +++ b/test/Translate/exp_pauli-1.qke @@ -24,21 +24,22 @@ module attributes {quake.mangled_name_map = {__nvqpp__mlirgen__Z4mainE3$_0 = "_Z } } -// CHECK: %[[VAL_9:.*]] = alloca { i8*, i64 }, align 8 -// CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 4) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_10:.*]] = getelementptr inbounds { i8*, i64 }, { i8*, i64 }* %[[VAL_9]], i64 0, i32 0 -// CHECK: store i8* getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.5858585900, i64 0, i64 0), i8** %[[VAL_10]], align 8 -// CHECK: %[[VAL_11:.*]] = getelementptr inbounds { i8*, i64 }, { i8*, i64 }* %[[VAL_9]], i64 0, i32 1 -// CHECK: store i64 4, i64* %[[VAL_11]], align 8 -// CHECK: %[[VAL_12:.*]] = bitcast { i8*, i64 }* %[[VAL_9]] to i8* -// CHECK: call void @__quantum__qis__exp_pauli(double %[[VAL_13:.*]], %[[VAL_1]]* %[[VAL_0]], i8* nonnull %[[VAL_12]]) -// CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// CHECK-LABEL: define void @"__nvqpp__mlirgen__Z4mainE3$_0"(double +// CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { +// CHECK: %[[VAL_1:.*]] = alloca [1 x { i8*, i64 }], align 8 +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]* @__quantum__rt__qubit_allocate_array(i64 4) +// CHECK: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 0) +// CHECK: %[[VAL_6:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_4]], align 8 +// CHECK: tail call void @__quantum__qis__x(%[[VAL_5]]* %[[VAL_6]]) +// CHECK: %[[VAL_7:.*]] = tail call %[[VAL_5]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 1) +// CHECK: %[[VAL_8:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_7]], align 8 +// CHECK: tail call void @__quantum__qis__x(%[[VAL_5]]* %[[VAL_8]]) +// CHECK: %[[VAL_9:.*]] = getelementptr inbounds [1 x { i8*, i64 }], [1 x { i8*, i64 }]* %[[VAL_1]], i64 0, i64 0, i32 0 +// CHECK: store i8* getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.5858585900, i64 0, i64 0), i8** %[[VAL_9]], align 8 +// CHECK: %[[VAL_10:.*]] = getelementptr inbounds [1 x { i8*, i64 }], [1 x { i8*, i64 }]* %[[VAL_1]], i64 0, i64 0, i32 1 +// CHECK: store i64 4, i64* %[[VAL_10]], align 8 +// CHECK: %[[VAL_11:.*]] = bitcast [1 x { i8*, i64 }]* %[[VAL_1]] to i8* +// CHECK: call void @__quantum__qis__exp_pauli(double %[[VAL_0]], %[[VAL_3]]* %[[VAL_2]], i8* nonnull %[[VAL_11]]) +// CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_2]]) // CHECK: ret void +// CHECK: } diff --git a/test/Translate/exp_pauli-3.qke b/test/Translate/exp_pauli-3.qke index fdd2f3c3a58..5a6dd37fcc2 100644 --- a/test/Translate/exp_pauli-3.qke +++ b/test/Translate/exp_pauli-3.qke @@ -30,46 +30,39 @@ module attributes {quake.mangled_name_map = {__nvqpp__mlirgen__Z4mainE3$_0 = "_Z } } -// CHECK: %[[VAL_30:.*]] = alloca { i8*, i64 }, align 8 -// CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 4) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_9:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 2) -// CHECK: %[[VAL_10:.*]] = bitcast i8* %[[VAL_9]] to i8** -// CHECK: %[[VAL_11:.*]] = load i8*, i8** %[[VAL_10]], align 8 -// CHECK: %[[VAL_12:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 3) -// CHECK: %[[VAL_13:.*]] = bitcast i8* %[[VAL_12]] to i8** -// CHECK: %[[VAL_14:.*]] = load i8*, i8** %[[VAL_13]], align 8 -// CHECK: %[[VAL_15:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_16:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_15]], i64 0) -// CHECK: %[[VAL_17:.*]] = bitcast i8* %[[VAL_16]] to %[[VAL_4]]** -// CHECK: store %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]** %[[VAL_17]], align 8 -// CHECK: %[[VAL_18:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_19:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_18]], i64 0) -// CHECK: %[[VAL_20:.*]] = bitcast i8* %[[VAL_19]] to %[[VAL_4]]** -// CHECK: store %[[VAL_4]]* %[[VAL_8]], %[[VAL_4]]** %[[VAL_20]], align 8 -// CHECK: %[[VAL_21:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_concatenate(%[[VAL_1]]* %[[VAL_15]], %[[VAL_1]]* %[[VAL_18]]) -// CHECK: %[[VAL_22:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_23:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_22]], i64 0) -// CHECK: %[[VAL_24:.*]] = bitcast i8* %[[VAL_23]] to i8** -// CHECK: store i8* %[[VAL_11]], i8** %[[VAL_24]], align 8 -// CHECK: %[[VAL_25:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_concatenate(%[[VAL_1]]* %[[VAL_21]], %[[VAL_1]]* %[[VAL_22]]) -// CHECK: %[[VAL_26:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_27:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_26]], i64 0) -// CHECK: %[[VAL_28:.*]] = bitcast i8* %[[VAL_27]] to i8** -// CHECK: store i8* %[[VAL_14]], i8** %[[VAL_28]], align 8 -// CHECK: %[[VAL_29:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_concatenate(%[[VAL_1]]* %[[VAL_25]], %[[VAL_1]]* %[[VAL_26]]) -// CHECK: %[[VAL_31:.*]] = getelementptr inbounds { i8*, i64 }, { i8*, i64 }* %[[VAL_30]], i64 0, i32 0 +// CHECK-LABEL: __nvqpp__mlirgen__Z +// CHECK: %[[VAL_30:.*]] = alloca [1 x { i8*, i64 }], align 8 +// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 4) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_2]] +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_5]]) +// CHECK: %[[VAL_6:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_8:.*]] = load %Qubit*, %Qubit** %[[VAL_6]] +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_8]]) +// CHECK: %[[VAL_9:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// CHECK: %[[VAL_11:.*]] = load %Qubit*, %Qubit** %[[VAL_9]] +// CHECK: %[[VAL_12:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 3) +// CHECK: %[[VAL_14:.*]] = load %Qubit*, %Qubit** %[[VAL_12]] +// CHECK: %[[VAL_15:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_16:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_15]], i64 0) +// CHECK: store %Qubit* %[[VAL_5]], %Qubit** %[[VAL_16]] +// CHECK: %[[VAL_18:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_19:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_18]], i64 0) +// CHECK: store %Qubit* %[[VAL_8]], %Qubit** %[[VAL_19]] +// CHECK: %[[VAL_21:.*]] = tail call %Array* @__quantum__rt__array_concatenate(%Array* %[[VAL_15]], %Array* %[[VAL_18]]) +// CHECK: %[[VAL_22:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_23:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_22]], i64 0) +// CHECK: store %Qubit* %[[VAL_11]], %Qubit** %[[VAL_23]] +// CHECK: %[[VAL_25:.*]] = tail call %Array* @__quantum__rt__array_concatenate(%Array* %[[VAL_21]], %Array* %[[VAL_22]]) +// CHECK: %[[VAL_26:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_27:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_26]], i64 0) +// CHECK: store %Qubit* %[[VAL_14]], %Qubit** %[[VAL_27]], align 8 +// CHECK: %[[VAL_29:.*]] = tail call %Array* @__quantum__rt__array_concatenate(%Array* %[[VAL_25]], %Array* %[[VAL_26]]) +// CHECK: %[[VAL_31:.*]] = getelementptr inbounds [1 x { i8*, i64 }], [1 x { i8*, i64 }]* %[[VAL_30]], i64 0, i64 0, i32 0 // CHECK: store i8* getelementptr inbounds ([5 x i8], [5 x i8]* @cstr.5858585900, i64 0, i64 0), i8** %[[VAL_31]], align 8 -// CHECK: %[[VAL_32:.*]] = getelementptr inbounds { i8*, i64 }, { i8*, i64 }* %[[VAL_30]], i64 0, i32 1 +// CHECK: %[[VAL_32:.*]] = getelementptr inbounds [1 x { i8*, i64 }], [1 x { i8*, i64 }]* %[[VAL_30]], i64 0, i64 0, i32 1 // CHECK: store i64 4, i64* %[[VAL_32]], align 8 -// CHECK: %[[VAL_33:.*]] = bitcast { i8*, i64 }* %[[VAL_30]] to i8* -// CHECK: call void @__quantum__qis__exp_pauli(double %[[VAL_34:.*]], %[[VAL_1]]* %[[VAL_29]], i8* nonnull %[[VAL_33]]) -// CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// CHECK: %[[VAL_33:.*]] = bitcast [1 x { i8*, i64 }]* %[[VAL_30]] to i8* +// CHECK: call void @__quantum__qis__exp_pauli(double %[[VAL_34:.*]], %Array* %[[VAL_29]], i8* nonnull %[[VAL_33]]) +// CHECK: call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // CHECK: ret void diff --git a/test/Translate/ghz.qke b/test/Translate/ghz.qke index 702f971af79..e7e7c15884c 100644 --- a/test/Translate/ghz.qke +++ b/test/Translate/ghz.qke @@ -7,38 +7,6 @@ // ========================================================================== // // RUN: cudaq-opt %s --canonicalize --add-dealloc | cudaq-translate --convert-to=qir | FileCheck %s -module { -// CHECK: %[[VAL_0:.*]] = zext i32 -// CHECK-SAME: %[[VAL_1:.*]] to i64 -// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_0]]) -// CHECK: %[[VAL_4:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 0) -// CHECK: %[[VAL_5:.*]] = bitcast i8* %[[VAL_4]] to %[[VAL_6:.*]]** -// CHECK: %[[VAL_7:.*]] = load %[[VAL_6]]*, %[[VAL_6]]** %[[VAL_5]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_6]]* %[[VAL_7]]) -// CHECK: %[[VAL_8:.*]] = add i32 %[[VAL_1]], -1 -// CHECK: %[[VAL_9:.*]] = icmp eq i32 %[[VAL_8]], 0 -// CHECK: br i1 %[[VAL_9]], label %[[VAL_10:.*]], label %[[VAL_11:.*]] -// CHECK: .lr.ph.preheader: -// CHECK-SAME: ; preds = %[[VAL_12:.*]] -// CHECK: %[[VAL_13:.*]] = zext i32 %[[VAL_8]] to i64 -// CHECK: br label %[[VAL_14:.*]] -// CHECK: .lr.ph: -// CHECK-SAME: ; preds = %[[VAL_11]], %[[VAL_14]] -// CHECK: %[[VAL_15:.*]] = phi i64 [ 0, %[[VAL_11]] ], [ %[[VAL_16:.*]], %[[VAL_14]] ] -// CHECK: %[[VAL_17:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_15]]) -// CHECK: %[[VAL_18:.*]] = bitcast i8* %[[VAL_17]] to %[[VAL_6]]** -// CHECK: %[[VAL_19:.*]] = load %[[VAL_6]]*, %[[VAL_6]]** %[[VAL_18]], align 8 -// CHECK: %[[VAL_16]] = add nuw nsw i64 %[[VAL_15]], 1 -// CHECK: %[[VAL_20:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_16]]) -// CHECK: %[[VAL_21:.*]] = bitcast i8* %[[VAL_20]] to %[[VAL_6]]** -// CHECK: %[[VAL_22:.*]] = load %[[VAL_6]]*, %[[VAL_6]]** %[[VAL_21]], align 8 -// CHECK: tail call void (i64, void (%[[VAL_3]]*, %[[VAL_6]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_3]]*, %[[VAL_6]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_6]]* %[[VAL_19]], %[[VAL_6]]* %[[VAL_22]]) -// CHECK: %[[VAL_23:.*]] = icmp eq i64 %[[VAL_16]], %[[VAL_13]] -// CHECK: br i1 %[[VAL_23]], label %[[VAL_10]], label %[[VAL_14]] -// CHECK: ._crit_edge: -// CHECK-SAME: ; preds = %[[VAL_14]], %[[VAL_12]] -// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_2]]) -// CHECK: ret void func.func @ghz(%arg0: i32){ %c1_i32 = arith.constant 1 : i32 @@ -64,4 +32,34 @@ module { } return } -} + +// CHECK-LABEL: define void @ghz(i32 +// CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { +// CHECK: %[[VAL_1:.*]] = zext i32 %[[VAL_0]] to i64 +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_1]]) +// CHECK: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 0) +// CHECK: %[[VAL_6:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_4]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_5]]* %[[VAL_6]]) +// CHECK: %[[VAL_7:.*]] = add i32 %[[VAL_0]], -1 +// CHECK: %[[VAL_8:.*]] = icmp eq i32 %[[VAL_7]], 0 +// CHECK: br i1 %[[VAL_8]], label %[[VAL_9:.*]], label %[[VAL_10:.*]] +// CHECK: : ; preds = %[[VAL_11:.*]] +// CHECK: %[[VAL_12:.*]] = zext i32 %[[VAL_7]] to i64 +// CHECK: br label %[[VAL_13:.*]] +// CHECK: : ; preds = %[[VAL_10]], %[[VAL_13]] +// CHECK: %[[VAL_14:.*]] = phi i64 [ 0, %[[VAL_10]] ], [ %[[VAL_15:.*]], %[[VAL_13]] ] +// CHECK: %[[VAL_16:.*]] = tail call %[[VAL_5]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_14]]) +// CHECK: %[[VAL_17:.*]] = bitcast %[[VAL_5]]** %[[VAL_16]] to i8** +// CHECK: %[[VAL_18:.*]] = load i8*, i8** %[[VAL_17]], align 8 +// CHECK: %[[VAL_15]] = add nuw nsw i64 %[[VAL_14]], 1 +// CHECK: %[[VAL_19:.*]] = tail call %[[VAL_5]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_15]]) +// CHECK: %[[VAL_20:.*]] = bitcast %[[VAL_5]]** %[[VAL_19]] to i8** +// CHECK: %[[VAL_21:.*]] = load i8*, i8** %[[VAL_20]], align 8 +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%[[VAL_3]]*, %[[VAL_5]]*)* @__quantum__qis__x__ctl to i8*), i8* %[[VAL_18]], i8* %[[VAL_21]]) +// CHECK: %[[VAL_22:.*]] = icmp eq i64 %[[VAL_15]], %[[VAL_12]] +// CHECK: br i1 %[[VAL_22]], label %[[VAL_9]], label %[[VAL_13]] +// CHECK: : ; preds = %[[VAL_13]], %[[VAL_11]] +// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_2]]) +// CHECK: ret void +// CHECK: } + diff --git a/test/Translate/measure.qke b/test/Translate/measure.qke index 7f38e3d0a7d..b6d6dfd4a14 100644 --- a/test/Translate/measure.qke +++ b/test/Translate/measure.qke @@ -22,15 +22,15 @@ func.func @test_func2(){ return } -// CHECK-LABEL: define void @test_func2 -// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 2) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %Qubit** -// CHECK: %[[VAL_5:.*]] = load %Qubit*, %Qubit** %[[VAL_3]], align 8 -// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_5]]) -// CHECK: %[[VAL_7:.*]] = tail call %Result* @__quantum__qis__mz(%Qubit* %[[VAL_5]]) -// CHECK: tail call void @__quantum__qis__s__adj(%Qubit* %[[VAL_5]]) -// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_5]]) -// CHECK: %[[VAL_9:.*]] = tail call %Result* @__quantum__qis__mz(%Qubit* %[[VAL_5]]) -// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) -// CHECK: ret void +// CHECK-LABEL: define void @test_func2() +// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 2) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_5:.*]] = tail call %[[VAL_6:.*]]* @__quantum__qis__mz(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__sdg(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__h(%Qubit* %[[VAL_4]]) +// CHECK: %[[VAL_7:.*]] = tail call %[[VAL_6]]* @__quantum__qis__mz(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) +// CHECK: ret void +// CHECK: } diff --git a/test/Translate/qalloc_initfloat.qke b/test/Translate/qalloc_initfloat.qke index 98636bc2320..a5fb071a604 100644 --- a/test/Translate/qalloc_initfloat.qke +++ b/test/Translate/qalloc_initfloat.qke @@ -22,8 +22,7 @@ func.func @__nvqpp__mlirgen__function_test._Z4testSt6vectorIfSaIfEE(%arg0: !cc.s // CHECK: %[[VAL_1:.*]] = extractvalue { float*, i64 } %[[VAL_0]], 1 // CHECK: %[[VAL_2:.*]] = tail call i64 @llvm.cttz.i64(i64 %[[VAL_1]], i1 false), !range !1 // CHECK: %[[VAL_3:.*]] = extractvalue { float*, i64 } %[[VAL_0]], 0 -// CHECK: %[[VAL_4:.*]] = bitcast float* %[[VAL_3]] to i8* -// CHECK: %[[VAL_5:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array_with_state_fp32(i64 %[[VAL_2]], i8* %[[VAL_4]]) +// CHECK: %[[VAL_5:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array_with_state_fp32(i64 %[[VAL_2]], float* %[[VAL_3]]) // CHECK: tail call void @__quantum__rt__qubit_release_array( // CHECK: ret void // CHECK: } diff --git a/test/Translate/qalloc_initialization.qke b/test/Translate/qalloc_initialization.qke index 8c784133b02..438ff0a841a 100644 --- a/test/Translate/qalloc_initialization.qke +++ b/test/Translate/qalloc_initialization.qke @@ -51,27 +51,16 @@ module attributes { } } -// CHECK-LABEL: define void @__nvqpp__mlirgen__function_test._Z4testSt6vectorIdSaIdEE() local_unnamed_addr { -// CHECK: %[[VAL_0:.*]] = alloca [4 x double], align 8 -// CHECK: %[[VAL_1:.*]] = getelementptr inbounds [4 x double], [4 x double]* %[[VAL_0]], i64 0, i64 0 -// CHECK: store double 1.000000e+00, double* %[[VAL_1]], align 8 -// CHECK: %[[VAL_2:.*]] = getelementptr inbounds [4 x double], [4 x double]* %[[VAL_0]], i64 0, i64 1 -// CHECK: %[[VAL_3:.*]] = getelementptr inbounds [4 x double], [4 x double]* %[[VAL_0]], i64 0, i64 3 -// CHECK: %[[VAL_4:.*]] = bitcast double* %[[VAL_2]] to i8* -// CHECK: call void @llvm.memset.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(16) %[[VAL_4]], i8 0, i64 16, i1 false) -// CHECK: store double 1.000000e+00, double* %[[VAL_3]], align 8 -// CHECK: %[[VAL_5:.*]] = bitcast [4 x double]* %[[VAL_0]] to i8* -// CHECK: %[[VAL_6:.*]] = call %[[VAL_7:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 2, i8* nonnull %[[VAL_5]]) -// CHECK: %[[VAL_8:.*]] = call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_7]]* %[[VAL_6]], i64 0) -// CHECK: %[[VAL_9:.*]] = bitcast i8* %[[VAL_8]] to %[[VAL_10:.*]]** -// CHECK: %[[VAL_11:.*]] = load %[[VAL_10]]*, %[[VAL_10]]** %[[VAL_9]], align 8 -// CHECK: call void @__quantum__qis__h(%[[VAL_10]]* %[[VAL_11]]) -// CHECK: %[[VAL_12:.*]] = call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_7]]* %[[VAL_6]], i64 1) -// CHECK: %[[VAL_13:.*]] = bitcast i8* %[[VAL_12]] to %[[VAL_10]]** -// CHECK: %[[VAL_14:.*]] = load %[[VAL_10]]*, %[[VAL_10]]** %[[VAL_13]], align 8 -// CHECK: call void @__quantum__qis__h(%[[VAL_10]]* %[[VAL_14]]) -// CHECK: call void (i64, void (%[[VAL_7]]*, %[[VAL_10]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_7]]*, %[[VAL_10]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_10]]* %[[VAL_14]], %[[VAL_10]]* %[[VAL_11]]) -// CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_7]]* %[[VAL_6]]) +// CHECK-LABEL: define void @__nvqpp__mlirgen__function_test. +// CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array_with_state_fp64(i64 2, double* nonnull getelementptr inbounds ([4 x double], [4 x double]* @__nvqpp__mlirgen__function_test._Z4testSt6vectorIdSaIdEE.rodata_0, i64 0, i64 0)) +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_2]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_3]]* %[[VAL_4]]) +// CHECK: %[[VAL_5:.*]] = tail call %[[VAL_3]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_6:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_5]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_3]]* %[[VAL_6]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%[[VAL_1]]*, %[[VAL_3]]*)* @__quantum__qis__x__ctl to i8*), %[[VAL_3]]* %[[VAL_6]], %[[VAL_3]]* %[[VAL_4]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) // CHECK: ret void // CHECK: } diff --git a/test/Translate/return_values.qke b/test/Translate/return_values.qke index 7db2d6d8472..5a8e6a11986 100644 --- a/test/Translate/return_values.qke +++ b/test/Translate/return_values.qke @@ -58,8 +58,9 @@ func.func @__nvqpp__mlirgen__test_0(%arg0: i32) -> !cc.stdvec { func.func @test_0(%1: !cc.ptr, !cc.ptr, !cc.ptr}>> {llvm.sret = !cc.struct<{!cc.ptr, !cc.ptr, !cc.ptr}>}, %this: !cc.ptr, %2: i32) { return } + // CHECK-LABEL: define { i1*, i64 } @__nvqpp__mlirgen__test_0(i32 -// CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { +// CHECK-SAME: %[[VAL_0:.*]]) local_unnamed_addr { // CHECK: %[[VAL_1:.*]] = sext i32 %[[VAL_0]] to i64 // CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]* @__quantum__rt__qubit_allocate_array(i64 %[[VAL_1]]) // CHECK: %[[VAL_4:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_3]]* %[[VAL_2]]) @@ -70,38 +71,36 @@ func.func @test_0(%1: !cc.ptr, !cc.ptr, !cc.ptr} // CHECK: br label %[[VAL_10:.*]] // CHECK: .lr.ph: ; preds = %[[VAL_8]], %[[VAL_6]] // CHECK: %[[VAL_11:.*]] = phi i64 [ %[[VAL_12:.*]], %[[VAL_6]] ], [ 0, %[[VAL_8]] ] -// CHECK: %[[VAL_13:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_11]]) -// CHECK: %[[VAL_14:.*]] = bitcast i8* %[[VAL_13]] to %[[VAL_15:.*]]** -// CHECK: %[[VAL_16:.*]] = load %[[VAL_15]]*, %[[VAL_15]]** %[[VAL_14]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_15]]* %[[VAL_16]]) +// CHECK: %[[VAL_13:.*]] = tail call %[[VAL_14:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_11]]) +// CHECK: %[[VAL_15:.*]] = load %[[VAL_14]]*, %[[VAL_14]]** %[[VAL_13]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_14]]* %[[VAL_15]]) // CHECK: %[[VAL_12]] = add nuw nsw i64 %[[VAL_11]], 1 -// CHECK: %[[VAL_17:.*]] = icmp eq i64 %[[VAL_12]], %[[VAL_4]] -// CHECK: br i1 %[[VAL_17]], label %[[VAL_18:.*]], label %[[VAL_6]] +// CHECK: %[[VAL_16:.*]] = icmp eq i64 %[[VAL_12]], %[[VAL_4]] +// CHECK: br i1 %[[VAL_16]], label %[[VAL_17:.*]], label %[[VAL_6]] // CHECK: ._crit_edge: ; preds = %[[VAL_6]] -// CHECK: %[[VAL_19:.*]] = alloca i8, i64 %[[VAL_4]], align 1 -// CHECK: br i1 %[[VAL_5]], label %[[VAL_20:.*]], label %[[VAL_10]] -// CHECK: .lr.ph4: ; preds = %[[VAL_18]], %[[VAL_20]] -// CHECK: %[[VAL_21:.*]] = phi i64 [ %[[VAL_22:.*]], %[[VAL_20]] ], [ 0, %[[VAL_18]] ] -// CHECK: %[[VAL_23:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_21]]) -// CHECK: %[[VAL_24:.*]] = bitcast i8* %[[VAL_23]] to %[[VAL_15]]** -// CHECK: %[[VAL_25:.*]] = load %[[VAL_15]]*, %[[VAL_15]]** %[[VAL_24]], align 8 -// CHECK: %[[VAL_26:.*]] = tail call %[[VAL_27:.*]]* @__quantum__qis__mz(%[[VAL_15]]* %[[VAL_25]]) -// CHECK: %[[VAL_28:.*]] = bitcast %[[VAL_27]]* %[[VAL_26]] to i1* -// CHECK: %[[VAL_29:.*]] = load i1, i1* %[[VAL_28]], align 1 -// CHECK: %[[VAL_30:.*]] = getelementptr i8, i8* %[[VAL_19]], i64 %[[VAL_21]] -// CHECK: %[[VAL_31:.*]] = zext i1 %[[VAL_29]] to i8 -// CHECK: store i8 %[[VAL_31]], i8* %[[VAL_30]], align 1 -// CHECK: %[[VAL_22]] = add nuw nsw i64 %[[VAL_21]], 1 -// CHECK: %[[VAL_32:.*]] = icmp eq i64 %[[VAL_22]], %[[VAL_4]] -// CHECK: br i1 %[[VAL_32]], label %[[VAL_10]], label %[[VAL_20]] -// CHECK: ._crit_edge5: ; preds = %[[VAL_20]], %[[VAL_7]], %[[VAL_18]] -// CHECK: %[[VAL_33:.*]] = phi i8* [ %[[VAL_9]], %[[VAL_7]] ], [ %[[VAL_19]], %[[VAL_18]] ], [ %[[VAL_19]], %[[VAL_20]] ] -// CHECK: %[[VAL_34:.*]] = call i8* @__nvqpp_vectorCopyCtor(i8* nonnull %[[VAL_33]], i64 %[[VAL_4]], i64 1) -// CHECK: %[[VAL_35:.*]] = bitcast i8* %[[VAL_34]] to i1* -// CHECK: %[[VAL_36:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_35]], 0 -// CHECK: %[[VAL_37:.*]] = insertvalue { i1*, i64 } %[[VAL_36]], i64 %[[VAL_4]], 1 +// CHECK: %[[VAL_18:.*]] = alloca i8, i64 %[[VAL_4]], align 1 +// CHECK: br i1 %[[VAL_5]], label %[[VAL_19:.*]], label %[[VAL_10]] +// CHECK: .lr.ph4: ; preds = %[[VAL_17]], %[[VAL_19]] +// CHECK: %[[VAL_20:.*]] = phi i64 [ %[[VAL_21:.*]], %[[VAL_19]] ], [ 0, %[[VAL_17]] ] +// CHECK: %[[VAL_22:.*]] = tail call %[[VAL_14]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 %[[VAL_20]]) +// CHECK: %[[VAL_23:.*]] = load %[[VAL_14]]*, %[[VAL_14]]** %[[VAL_22]], align 8 +// CHECK: %[[VAL_24:.*]] = tail call %[[VAL_25:.*]]* @__quantum__qis__mz(%[[VAL_14]]* %[[VAL_23]]) +// CHECK: %[[VAL_26:.*]] = bitcast %[[VAL_25]]* %[[VAL_24]] to i1* +// CHECK: %[[VAL_27:.*]] = load i1, i1* %[[VAL_26]], align 1 +// CHECK: %[[VAL_28:.*]] = getelementptr i8, i8* %[[VAL_18]], i64 %[[VAL_20]] +// CHECK: %[[VAL_29:.*]] = zext i1 %[[VAL_27]] to i8 +// CHECK: store i8 %[[VAL_29]], i8* %[[VAL_28]], align 1 +// CHECK: %[[VAL_21]] = add nuw nsw i64 %[[VAL_20]], 1 +// CHECK: %[[VAL_30:.*]] = icmp eq i64 %[[VAL_21]], %[[VAL_4]] +// CHECK: br i1 %[[VAL_30]], label %[[VAL_10]], label %[[VAL_19]] +// CHECK: ._crit_edge5: ; preds = %[[VAL_19]], %[[VAL_7]], %[[VAL_17]] +// CHECK: %[[VAL_31:.*]] = phi i8* [ %[[VAL_9]], %[[VAL_7]] ], [ %[[VAL_18]], %[[VAL_17]] ], [ %[[VAL_18]], %[[VAL_19]] ] +// CHECK: %[[VAL_32:.*]] = call i8* @__nvqpp_vectorCopyCtor(i8* nonnull %[[VAL_31]], i64 %[[VAL_4]], i64 1) +// CHECK: %[[VAL_33:.*]] = bitcast i8* %[[VAL_32]] to i1* +// CHECK: %[[VAL_34:.*]] = insertvalue { i1*, i64 } undef, i1* %[[VAL_33]], 0 +// CHECK: %[[VAL_35:.*]] = insertvalue { i1*, i64 } %[[VAL_34]], i64 %[[VAL_4]], 1 // CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_2]]) -// CHECK: ret { i1*, i64 } %[[VAL_37]] +// CHECK: ret { i1*, i64 } %[[VAL_35]] // CHECK: } // CHECK-LABEL: define void @test_0({ i8*, i8*, i8* }* sret({ i8*, i8*, i8* }) @@ -171,26 +170,25 @@ func.func @test_1(%this: !cc.ptr) -> i16 { return %0 : i16 } + // CHECK-LABEL: define { i1, i1 } @__nvqpp__mlirgen__test_1() local_unnamed_addr { // CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 2) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: tail call void (i64, void (%[[VAL_1]]*, %[[VAL_4]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_1]]*, %[[VAL_4]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_10:.*]]* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: %[[VAL_11:.*]] = bitcast %[[VAL_10]]* %[[VAL_9]] to i1* -// CHECK: %[[VAL_12:.*]] = load i1, i1* %[[VAL_11]], align 1 -// CHECK: %[[VAL_13:.*]] = tail call %[[VAL_10]]* @__quantum__qis__mz(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_14:.*]] = bitcast %[[VAL_10]]* %[[VAL_13]] to i1* -// CHECK: %[[VAL_15:.*]] = load i1, i1* %[[VAL_14]], align 1 -// CHECK: %[[VAL_16:.*]] = insertvalue { i1, i1 } undef, i1 %[[VAL_12]], 0 -// CHECK: %[[VAL_17:.*]] = insertvalue { i1, i1 } %[[VAL_16]], i1 %[[VAL_15]], 1 +// CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_2]], align 8 +// CHECK: %[[VAL_5:.*]] = tail call %[[VAL_3]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_6:.*]] = load %[[VAL_3]]*, %[[VAL_3]]** %[[VAL_5]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_3]]* %[[VAL_4]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%[[VAL_1]]*, %[[VAL_3]]*)* @__quantum__qis__x__ctl to i8*), %[[VAL_3]]* %[[VAL_4]], %[[VAL_3]]* %[[VAL_6]]) +// CHECK: %[[VAL_7:.*]] = tail call %[[VAL_8:.*]]* @__quantum__qis__mz(%[[VAL_3]]* %[[VAL_4]]) +// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_8]]* @__quantum__qis__mz(%[[VAL_3]]* %[[VAL_6]]) +// CHECK: %[[VAL_10:.*]] = bitcast %[[VAL_8]]* %[[VAL_7]] to i1* +// CHECK: %[[VAL_11:.*]] = load i1, i1* %[[VAL_10]], align 1 +// CHECK: %[[VAL_12:.*]] = insertvalue { i1, i1 } undef, i1 %[[VAL_11]], 0 +// CHECK: %[[VAL_13:.*]] = bitcast %[[VAL_8]]* %[[VAL_9]] to i1* +// CHECK: %[[VAL_14:.*]] = load i1, i1* %[[VAL_13]], align 1 +// CHECK: %[[VAL_15:.*]] = insertvalue { i1, i1 } %[[VAL_12]], i1 %[[VAL_14]], 1 // CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) -// CHECK: ret { i1, i1 } %[[VAL_17]] +// CHECK: ret { i1, i1 } %[[VAL_15]] // CHECK: } // CHECK-LABEL: define i16 @test_1(i8* nocapture readnone @@ -379,8 +377,8 @@ func.func @test_5(%sret: !cc.ptr> {llvm.sret = !cc.struct // CHECK: } // CHECK-LABEL: define { i8*, i64 } @test_0.thunk(i8* nocapture -// CHECK-SAME: %[[VAL_0:.*]], i1 -// CHECK-SAME: %[[VAL_1:.*]]) { +// CHECK-SAME: %[[VAL_0:.*]], i1 +// CHECK-SAME: %[[VAL_1:.*]]) { // CHECK: %[[VAL_2:.*]] = bitcast i8* %[[VAL_0]] to i32* // CHECK: %[[VAL_3:.*]] = load i32, i32* %[[VAL_2]], align 4 // CHECK: %[[VAL_4:.*]] = sext i32 %[[VAL_3]] to i64 @@ -393,56 +391,54 @@ func.func @test_5(%sret: !cc.ptr> {llvm.sret = !cc.struct // CHECK: br label %[[VAL_13:.*]] // CHECK: .lr.ph: ; preds = %[[VAL_11]], %[[VAL_9]] // CHECK: %[[VAL_14:.*]] = phi i64 [ %[[VAL_15:.*]], %[[VAL_9]] ], [ 0, %[[VAL_11]] ] -// CHECK: %[[VAL_16:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_6]]* %[[VAL_5]], i64 %[[VAL_14]]) -// CHECK: %[[VAL_17:.*]] = bitcast i8* %[[VAL_16]] to %[[VAL_18:.*]]** -// CHECK: %[[VAL_19:.*]] = load %[[VAL_18]]*, %[[VAL_18]]** %[[VAL_17]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_18]]* %[[VAL_19]]) +// CHECK: %[[VAL_16:.*]] = tail call %[[VAL_17:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_6]]* %[[VAL_5]], i64 %[[VAL_14]]) +// CHECK: %[[VAL_18:.*]] = load %[[VAL_17]]*, %[[VAL_17]]** %[[VAL_16]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_17]]* %[[VAL_18]]) // CHECK: %[[VAL_15]] = add nuw nsw i64 %[[VAL_14]], 1 -// CHECK: %[[VAL_20:.*]] = icmp eq i64 %[[VAL_15]], %[[VAL_7]] -// CHECK: br i1 %[[VAL_20]], label %[[VAL_21:.*]], label %[[VAL_9]] +// CHECK: %[[VAL_19:.*]] = icmp eq i64 %[[VAL_15]], %[[VAL_7]] +// CHECK: br i1 %[[VAL_19]], label %[[VAL_20:.*]], label %[[VAL_9]] // CHECK: ._crit_edge: ; preds = %[[VAL_9]] -// CHECK: %[[VAL_22:.*]] = alloca i8, i64 %[[VAL_7]], align 1 -// CHECK: br i1 %[[VAL_8]], label %[[VAL_23:.*]], label %[[VAL_13]] -// CHECK: .lr.ph6: ; preds = %[[VAL_21]], %[[VAL_23]] -// CHECK: %[[VAL_24:.*]] = phi i64 [ %[[VAL_25:.*]], %[[VAL_23]] ], [ 0, %[[VAL_21]] ] -// CHECK: %[[VAL_26:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_6]]* %[[VAL_5]], i64 %[[VAL_24]]) -// CHECK: %[[VAL_27:.*]] = bitcast i8* %[[VAL_26]] to %[[VAL_18]]** -// CHECK: %[[VAL_28:.*]] = load %[[VAL_18]]*, %[[VAL_18]]** %[[VAL_27]], align 8 -// CHECK: %[[VAL_29:.*]] = tail call %[[VAL_30:.*]]* @__quantum__qis__mz(%[[VAL_18]]* %[[VAL_28]]) -// CHECK: %[[VAL_31:.*]] = bitcast %[[VAL_30]]* %[[VAL_29]] to i1* -// CHECK: %[[VAL_32:.*]] = load i1, i1* %[[VAL_31]], align 1 -// CHECK: %[[VAL_33:.*]] = getelementptr i8, i8* %[[VAL_22]], i64 %[[VAL_24]] -// CHECK: %[[VAL_34:.*]] = zext i1 %[[VAL_32]] to i8 -// CHECK: store i8 %[[VAL_34]], i8* %[[VAL_33]], align 1 -// CHECK: %[[VAL_25]] = add nuw nsw i64 %[[VAL_24]], 1 -// CHECK: %[[VAL_35:.*]] = icmp eq i64 %[[VAL_25]], %[[VAL_7]] -// CHECK: br i1 %[[VAL_35]], label %[[VAL_13]], label %[[VAL_23]] -// CHECK: ._crit_edge7: ; preds = %[[VAL_23]], %[[VAL_10]], %[[VAL_21]] -// CHECK: %[[VAL_36:.*]] = phi i8* [ %[[VAL_12]], %[[VAL_10]] ], [ %[[VAL_22]], %[[VAL_21]] ], [ %[[VAL_22]], %[[VAL_23]] ] -// CHECK: %[[VAL_37:.*]] = call i8* @__nvqpp_vectorCopyCtor(i8* nonnull %[[VAL_36]], i64 %[[VAL_7]], i64 1) +// CHECK: %[[VAL_21:.*]] = alloca i8, i64 %[[VAL_7]], align 1 +// CHECK: br i1 %[[VAL_8]], label %[[VAL_22:.*]], label %[[VAL_13]] +// CHECK: .lr.ph6: ; preds = %[[VAL_20]], %[[VAL_22]] +// CHECK: %[[VAL_23:.*]] = phi i64 [ %[[VAL_24:.*]], %[[VAL_22]] ], [ 0, %[[VAL_20]] ] +// CHECK: %[[VAL_25:.*]] = tail call %[[VAL_17]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_6]]* %[[VAL_5]], i64 %[[VAL_23]]) +// CHECK: %[[VAL_26:.*]] = load %[[VAL_17]]*, %[[VAL_17]]** %[[VAL_25]], align 8 +// CHECK: %[[VAL_27:.*]] = tail call %[[VAL_28:.*]]* @__quantum__qis__mz(%[[VAL_17]]* %[[VAL_26]]) +// CHECK: %[[VAL_29:.*]] = bitcast %[[VAL_28]]* %[[VAL_27]] to i1* +// CHECK: %[[VAL_30:.*]] = load i1, i1* %[[VAL_29]], align 1 +// CHECK: %[[VAL_31:.*]] = getelementptr i8, i8* %[[VAL_21]], i64 %[[VAL_23]] +// CHECK: %[[VAL_32:.*]] = zext i1 %[[VAL_30]] to i8 +// CHECK: store i8 %[[VAL_32]], i8* %[[VAL_31]], align 1 +// CHECK: %[[VAL_24]] = add nuw nsw i64 %[[VAL_23]], 1 +// CHECK: %[[VAL_33:.*]] = icmp eq i64 %[[VAL_24]], %[[VAL_7]] +// CHECK: br i1 %[[VAL_33]], label %[[VAL_13]], label %[[VAL_22]] +// CHECK: ._crit_edge7: ; preds = %[[VAL_22]], %[[VAL_10]], %[[VAL_20]] +// CHECK: %[[VAL_34:.*]] = phi i8* [ %[[VAL_12]], %[[VAL_10]] ], [ %[[VAL_21]], %[[VAL_20]] ], [ %[[VAL_21]], %[[VAL_22]] ] +// CHECK: %[[VAL_35:.*]] = call i8* @__nvqpp_vectorCopyCtor(i8* nonnull %[[VAL_34]], i64 %[[VAL_7]], i64 1) // CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_6]]* %[[VAL_5]]) -// CHECK: %[[VAL_38:.*]] = getelementptr i8, i8* %[[VAL_0]], i64 8 -// CHECK: %[[VAL_39:.*]] = bitcast i8* %[[VAL_38]] to i8** -// CHECK: store i8* %[[VAL_37]], i8** %[[VAL_39]], align 8 -// CHECK: %[[VAL_40:.*]] = getelementptr i8, i8* %[[VAL_0]], i64 16 -// CHECK: %[[VAL_41:.*]] = bitcast i8* %[[VAL_40]] to i64* -// CHECK: store i64 %[[VAL_7]], i64* %[[VAL_41]], align 8 -// CHECK: br i1 %[[VAL_1]], label %[[VAL_42:.*]], label %[[VAL_43:.*]] -// CHECK: common.ret: ; preds = %[[VAL_13]], %[[VAL_42]] -// CHECK: %[[VAL_44:.*]] = phi { i8*, i64 } [ %[[VAL_45:.*]], %[[VAL_42]] ], [ zeroinitializer, %[[VAL_13]] ] -// CHECK: ret { i8*, i64 } %[[VAL_44]] -// CHECK: 31: ; preds = %[[VAL_13]] -// CHECK: %[[VAL_46:.*]] = add i64 %[[VAL_7]], 24 -// CHECK: %[[VAL_47:.*]] = call i8* @malloc(i64 %[[VAL_46]]) -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(24) %[[VAL_47]], i8* noundef nonnull align 1 dereferenceable(24) %[[VAL_0]], i64 24, i1 false) -// CHECK: %[[VAL_48:.*]] = getelementptr i8, i8* %[[VAL_47]], i64 24 -// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[VAL_48]], i8* align 1 %[[VAL_37]], i64 %[[VAL_7]], i1 false) -// CHECK: %[[VAL_49:.*]] = insertvalue { i8*, i64 } undef, i8* %[[VAL_47]], 0 -// CHECK: %[[VAL_45]] = insertvalue { i8*, i64 } %[[VAL_49]], i64 %[[VAL_46]], 1 -// CHECK: %[[VAL_50:.*]] = getelementptr i8, i8* %[[VAL_47]], i64 8 -// CHECK: %[[VAL_51:.*]] = bitcast i8* %[[VAL_50]] to i8** -// CHECK: store i8* %[[VAL_48]], i8** %[[VAL_51]], align 8 -// CHECK: br label %[[VAL_43]] +// CHECK: %[[VAL_36:.*]] = getelementptr i8, i8* %[[VAL_0]], i64 8 +// CHECK: %[[VAL_37:.*]] = bitcast i8* %[[VAL_36]] to i8** +// CHECK: store i8* %[[VAL_35]], i8** %[[VAL_37]], align 8 +// CHECK: %[[VAL_38:.*]] = getelementptr i8, i8* %[[VAL_0]], i64 16 +// CHECK: %[[VAL_39:.*]] = bitcast i8* %[[VAL_38]] to i64* +// CHECK: store i64 %[[VAL_7]], i64* %[[VAL_39]], align 8 +// CHECK: br i1 %[[VAL_1]], label %[[VAL_40:.*]], label %[[VAL_41:.*]] +// CHECK: common.ret: ; preds = %[[VAL_13]], %[[VAL_40]] +// CHECK: %[[VAL_42:.*]] = phi { i8*, i64 } [ %[[VAL_43:.*]], %[[VAL_40]] ], [ zeroinitializer, %[[VAL_13]] ] +// CHECK: ret { i8*, i64 } %[[VAL_42]] +// CHECK: 29: ; preds = %[[VAL_13]] +// CHECK: %[[VAL_44:.*]] = add i64 %[[VAL_7]], 24 +// CHECK: %[[VAL_45:.*]] = call i8* @malloc(i64 %[[VAL_44]]) +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 1 dereferenceable(24) %[[VAL_45]], i8* noundef nonnull align 1 dereferenceable(24) %[[VAL_0]], i64 24, i1 false) +// CHECK: %[[VAL_46:.*]] = getelementptr i8, i8* %[[VAL_45]], i64 24 +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %[[VAL_46]], i8* align 1 %[[VAL_35]], i64 %[[VAL_7]], i1 false) +// CHECK: %[[VAL_47:.*]] = insertvalue { i8*, i64 } undef, i8* %[[VAL_45]], 0 +// CHECK: %[[VAL_43]] = insertvalue { i8*, i64 } %[[VAL_47]], i64 %[[VAL_44]], 1 +// CHECK: %[[VAL_48:.*]] = getelementptr i8, i8* %[[VAL_45]], i64 8 +// CHECK: %[[VAL_49:.*]] = bitcast i8* %[[VAL_48]] to i8** +// CHECK: store i8* %[[VAL_46]], i8** %[[VAL_49]], align 8 +// CHECK: br label %[[VAL_41]] // CHECK: } // CHECK-LABEL: define i64 @test_0.argsCreator(i8** nocapture readonly @@ -472,25 +468,23 @@ func.func @test_5(%sret: !cc.ptr> {llvm.sret = !cc.struct // CHECK-SAME: %[[VAL_0:.*]], i1 // CHECK-SAME: %[[VAL_1:.*]]) { // CHECK: %[[VAL_2:.*]] = tail call %[[VAL_3:.*]]* @__quantum__rt__qubit_allocate_array(i64 2) -// CHECK: %[[VAL_4:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 0) -// CHECK: %[[VAL_5:.*]] = bitcast i8* %[[VAL_4]] to %[[VAL_6:.*]]** -// CHECK: %[[VAL_7:.*]] = load %[[VAL_6]]*, %[[VAL_6]]** %[[VAL_5]], align 8 -// CHECK: %[[VAL_8:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 1) -// CHECK: %[[VAL_9:.*]] = bitcast i8* %[[VAL_8]] to %[[VAL_6]]** -// CHECK: %[[VAL_10:.*]] = load %[[VAL_6]]*, %[[VAL_6]]** %[[VAL_9]], align 8 -// CHECK: tail call void @__quantum__qis__h(%[[VAL_6]]* %[[VAL_7]]) -// CHECK: tail call void (i64, void (%[[VAL_3]]*, %[[VAL_6]]*)*, ...) @invokeWithControlQubits(i64 1, void (%[[VAL_3]]*, %[[VAL_6]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_6]]* %[[VAL_7]], %[[VAL_6]]* %[[VAL_10]]) -// CHECK: %[[VAL_11:.*]] = tail call %[[VAL_12:.*]]* @__quantum__qis__mz(%[[VAL_6]]* %[[VAL_7]]) -// CHECK: %[[VAL_13:.*]] = bitcast %[[VAL_12]]* %[[VAL_11]] to i1* -// CHECK: %[[VAL_14:.*]] = load i1, i1* %[[VAL_13]], align 1 -// CHECK: %[[VAL_15:.*]] = tail call %[[VAL_12]]* @__quantum__qis__mz(%[[VAL_6]]* %[[VAL_10]]) -// CHECK: %[[VAL_16:.*]] = bitcast %[[VAL_12]]* %[[VAL_15]] to i1* -// CHECK: %[[VAL_17:.*]] = load i1, i1* %[[VAL_16]], align 1 -// CHECK: %[[VAL_18:.*]] = bitcast i8* %[[VAL_0]] to i1* -// CHECK: store i1 %[[VAL_14]], i1* %[[VAL_18]], align 1 -// CHECK: %[[VAL_19:.*]] = getelementptr inbounds i8, i8* %[[VAL_0]], i64 1 -// CHECK: %[[VAL_20:.*]] = bitcast i8* %[[VAL_19]] to i1* -// CHECK: store i1 %[[VAL_17]], i1* %[[VAL_20]], align 1 +// CHECK: %[[VAL_4:.*]] = tail call %[[VAL_5:.*]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 0) +// CHECK: %[[VAL_6:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_4]], align 8 +// CHECK: %[[VAL_7:.*]] = tail call %[[VAL_5]]** @__quantum__rt__array_get_element_ptr_1d(%[[VAL_3]]* %[[VAL_2]], i64 1) +// CHECK: %[[VAL_8:.*]] = load %[[VAL_5]]*, %[[VAL_5]]** %[[VAL_7]], align 8 +// CHECK: tail call void @__quantum__qis__h(%[[VAL_5]]* %[[VAL_6]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 0, i64 1, i64 1, i8* nonnull bitcast (void (%[[VAL_3]]*, %[[VAL_5]]*)* @__quantum__qis__x__ctl to i8*), %[[VAL_5]]* %[[VAL_6]], %[[VAL_5]]* %[[VAL_8]]) +// CHECK: %[[VAL_9:.*]] = tail call %[[VAL_10:.*]]* @__quantum__qis__mz(%[[VAL_5]]* %[[VAL_6]]) +// CHECK: %[[VAL_11:.*]] = tail call %[[VAL_10]]* @__quantum__qis__mz(%[[VAL_5]]* %[[VAL_8]]) +// CHECK: %[[VAL_12:.*]] = bitcast %[[VAL_10]]* %[[VAL_9]] to i1* +// CHECK: %[[VAL_13:.*]] = load i1, i1* %[[VAL_12]], align 1 +// CHECK: %[[VAL_14:.*]] = bitcast %[[VAL_10]]* %[[VAL_11]] to i1* +// CHECK: %[[VAL_15:.*]] = load i1, i1* %[[VAL_14]], align 1 +// CHECK: %[[VAL_16:.*]] = bitcast i8* %[[VAL_0]] to i1* +// CHECK: store i1 %[[VAL_13]], i1* %[[VAL_16]], align 1 +// CHECK: %[[VAL_17:.*]] = getelementptr inbounds i8, i8* %[[VAL_0]], i64 1 +// CHECK: %[[VAL_18:.*]] = bitcast i8* %[[VAL_17]] to i1* +// CHECK: store i1 %[[VAL_15]], i1* %[[VAL_18]], align 1 // CHECK: tail call void @__quantum__rt__qubit_release_array(%[[VAL_3]]* %[[VAL_2]]) // CHECK: ret { i8*, i64 } zeroinitializer // CHECK: } diff --git a/test/Translate/veq_or_qubit_control_args.qke b/test/Translate/veq_or_qubit_control_args.qke index 07f3ad8c9be..7769e127d42 100644 --- a/test/Translate/veq_or_qubit_control_args.qke +++ b/test/Translate/veq_or_qubit_control_args.qke @@ -27,28 +27,23 @@ module attributes {quake.mangled_name_map = {__nvqpp__mlirgen__function_fancyCno } } -// CHECK: %[[VAL_15:.*]] = alloca [2 x i64], align 8 -// CHECK: %[[VAL_16:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[VAL_15]], i64 0, i64 0 -// CHECK: %[[VAL_0:.*]] = tail call %[[VAL_1:.*]]* @__quantum__rt__qubit_allocate_array(i64 3) -// CHECK: %[[VAL_2:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 0) -// CHECK: %[[VAL_3:.*]] = bitcast i8* %[[VAL_2]] to %[[VAL_4:.*]]** -// CHECK: %[[VAL_5:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_3]], align 8 -// CHECK: %[[VAL_6:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 2) -// CHECK: %[[VAL_7:.*]] = bitcast i8* %[[VAL_6]] to %[[VAL_4]]** -// CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_7]], align 8 -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_5]]) -// CHECK: tail call void @__quantum__qis__x(%[[VAL_4]]* %[[VAL_8]]) -// CHECK: %[[VAL_9:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_0]], i64 1) -// CHECK: %[[VAL_10:.*]] = bitcast i8* %[[VAL_9]] to %[[VAL_4]]** -// CHECK: %[[VAL_11:.*]] = load %[[VAL_4]]*, %[[VAL_4]]** %[[VAL_10]], align 8 -// CHECK: %[[VAL_12:.*]] = tail call %[[VAL_1]]* @__quantum__rt__array_create_1d(i32 8, i64 1) -// CHECK: %[[VAL_13:.*]] = tail call i8* @__quantum__rt__array_get_element_ptr_1d(%[[VAL_1]]* %[[VAL_12]], i64 0) -// CHECK: %[[VAL_14:.*]] = bitcast i8* %[[VAL_13]] to %[[VAL_4]]** -// CHECK: store %[[VAL_4]]* %[[VAL_5]], %[[VAL_4]]** %[[VAL_14]], align 8 -// CHECK: %[[VAL_17:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%[[VAL_1]]* %[[VAL_12]]) -// CHECK: store i64 %[[VAL_17]], i64* %[[VAL_16]], align 8 -// CHECK: %[[VAL_18:.*]] = getelementptr inbounds [2 x i64], [2 x i64]* %[[VAL_15]], i64 0, i64 1 -// CHECK: store i64 0, i64* %[[VAL_18]], align 8 -// CHECK: call void (i64, i64*, i64, void (%[[VAL_1]]*, %[[VAL_4]]*)*, ...) @invokeWithControlRegisterOrQubits(i64 2, i64* nonnull %[[VAL_16]], i64 1, void (%[[VAL_1]]*, %[[VAL_4]]*)* nonnull @__quantum__qis__x__ctl, %[[VAL_1]]* %[[VAL_12]], %[[VAL_4]]* %[[VAL_11]], %[[VAL_4]]* %[[VAL_8]]) -// CHECK: call void @__quantum__rt__qubit_release_array(%[[VAL_1]]* %[[VAL_0]]) +// CHECK-LABEL: define void @__nvqpp__mlirgen__function_toffoli +// CHECK: %[[VAL_0:.*]] = tail call %Array* @__quantum__rt__qubit_allocate_array(i64 3) +// CHECK: %[[VAL_2:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 0) +// CHECK: %[[VAL_4:.*]] = load %Qubit*, %Qubit** %[[VAL_2]], align 8 +// CHECK: %[[VAL_5:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 2) +// CHECK: %[[VAL_6:.*]] = load %Qubit*, %Qubit** %[[VAL_5]], align 8 +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_4]]) +// CHECK: tail call void @__quantum__qis__x(%Qubit* %[[VAL_6]]) +// CHECK: %[[VAL_7:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_0]], i64 1) +// CHECK: %[[VAL_8:.*]] = bitcast %Qubit** %[[VAL_7]] to i8** +// CHECK: %[[VAL_9:.*]] = load i8*, i8** %[[VAL_8]], align 8 +// CHECK: %[[VAL_10:.*]] = tail call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) +// CHECK: %[[VAL_11:.*]] = tail call %Qubit** @__quantum__rt__array_get_element_ptr_1d(%Array* %[[VAL_10]], i64 0) +// CHECK: store %Qubit* %[[VAL_4]], %Qubit** %[[VAL_11]], align 8 +// CHECK: %[[VAL_12:.*]] = tail call i64 @__quantum__rt__array_get_size_1d(%Array* %[[VAL_10]]) +// CHECK: tail call void (i64, i64, i64, i64, i8*, ...) @generalizedInvokeWithRotationsControlsTargets(i64 0, i64 1, i64 1, i64 1, i8* nonnull bitcast (void (%Array*, %Qubit*)* @__quantum__qis__x__ctl to i8*), i64 %[[VAL_12]], %Array* %[[VAL_10]], i8* %[[VAL_9]], %Qubit* %[[VAL_6]]) +// CHECK: tail call void @__quantum__rt__qubit_release_array(%Array* %[[VAL_0]]) // CHECK: ret void +// CHECK: } + diff --git a/tools/cudaq-qpud/RestServerMain.cpp b/tools/cudaq-qpud/RestServerMain.cpp index 8b2f0f0bfe1..9065e6bc871 100644 --- a/tools/cudaq-qpud/RestServerMain.cpp +++ b/tools/cudaq-qpud/RestServerMain.cpp @@ -5,6 +5,7 @@ * This source code and the accompanying materials are made available under * * the terms of the Apache License 2.0 which accompanies this distribution. * ******************************************************************************/ + #include "common/GPUInfo.h" #include "common/Registry.h" #include "common/RemoteKernelExecutor.h" diff --git a/unittests/integration/builder_tester.cpp b/unittests/integration/builder_tester.cpp index 008371164bd..f70be9f4d87 100644 --- a/unittests/integration/builder_tester.cpp +++ b/unittests/integration/builder_tester.cpp @@ -1060,7 +1060,7 @@ CUDAQ_TEST(BuilderTester, checkEntryPointAttribute) { std::cout << quake; std::regex functionDecleration( - R"(func\.func @__nvqpp__mlirgen\w+\(\) attributes \{"cudaq-entrypoint"\})"); + R"(func\.func @__nvqpp__mlirgen\w+\(\) attributes \{"cudaq-entrypoint")"); EXPECT_TRUE(std::regex_search(quake, functionDecleration)); } diff --git a/utils/mock_qpu/anyon/__init__.py b/utils/mock_qpu/anyon/__init__.py index 5699387c142..2da298366eb 100644 --- a/utils/mock_qpu/anyon/__init__.py +++ b/utils/mock_qpu/anyon/__init__.py @@ -51,7 +51,7 @@ def getNumRequiredQubits(function): if "requiredQubits" in str(a): return int( str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace( - "\"", "")) + "\"", "").replace("'", "")) # Here we test that the login endpoint works diff --git a/utils/mock_qpu/braket/__init__.py b/utils/mock_qpu/braket/__init__.py index f9b084764d8..9b9081187f4 100644 --- a/utils/mock_qpu/braket/__init__.py +++ b/utils/mock_qpu/braket/__init__.py @@ -51,7 +51,7 @@ def getNumRequiredQubits(function): if "requiredQubits" in str(a): return int( str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace( - "\"", "")) + "\"", "").replace("'", "")) # Here we test that the login endpoint works diff --git a/utils/mock_qpu/ionq/__init__.py b/utils/mock_qpu/ionq/__init__.py index 2eb9b0f52d4..cb86a6acaae 100644 --- a/utils/mock_qpu/ionq/__init__.py +++ b/utils/mock_qpu/ionq/__init__.py @@ -60,7 +60,7 @@ def getNumRequiredQubits(function): if "requiredQubits" in str(a): return int( str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace( - "\"", "")) + "\"", "").replace("'", "")) # Here we test that the login endpoint works diff --git a/utils/mock_qpu/oqc/__init__.py b/utils/mock_qpu/oqc/__init__.py index deb9896a930..fba9adf0884 100644 --- a/utils/mock_qpu/oqc/__init__.py +++ b/utils/mock_qpu/oqc/__init__.py @@ -72,7 +72,7 @@ def getNumRequiredQubits(function): if "requiredQubits" in str(a): return int( str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace( - "\"", "")) + "\"", "").replace("'", "")) # Here we expose a way to post jobs, diff --git a/utils/mock_qpu/quantinuum/__init__.py b/utils/mock_qpu/quantinuum/__init__.py index 805f445f6a5..60e92d815eb 100644 --- a/utils/mock_qpu/quantinuum/__init__.py +++ b/utils/mock_qpu/quantinuum/__init__.py @@ -51,7 +51,7 @@ def getNumRequiredQubits(function): if "requiredQubits" in str(a): return int( str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace( - "\"", "")) + "\"", "").replace("'", "")) # Here we test that the login endpoint works