diff --git a/.prettierignore b/.prettierignore index 373f496d81..05b0846ade 100644 --- a/.prettierignore +++ b/.prettierignore @@ -5,3 +5,4 @@ coverage* gasReporterOutput.json dist typechain* +addresses.json diff --git a/src/common/ContractAddresses.ts b/src/common/ContractAddresses.ts index bed274ad19..41dd1b10d4 100644 --- a/src/common/ContractAddresses.ts +++ b/src/common/ContractAddresses.ts @@ -34,6 +34,8 @@ import BLAST_OPTIMISM_PORTAL_ABI from "./abi/BlastOptimismPortal.json"; import SCROLL_GATEWAY_ROUTER_L1_ABI from "./abi/ScrollGatewayRouterL1.json"; import SCROLL_GATEWAY_ROUTER_L2_ABI from "./abi/ScrollGatewayRouterL2.json"; import SCROLL_GAS_PRICE_ORACLE_ABI from "./abi/ScrollGasPriceOracle.json"; +import HUB_POOL_STORE_ABI from "./abi/HubPoolStore.json"; +import SP1_HELIOS_ABI from "./abi/SP1Helios.json"; // Constants file exporting hardcoded contract addresses per chain. export const CONTRACT_ADDRESSES: { @@ -199,6 +201,10 @@ export const CONTRACT_ADDRESSES: { address: "0xc186fA914353c44b2E33eBE05f21846F1048bEda", abi: HUB_POOL_ABI, }, + hubPoolStore: { + address: "0x1Ace3BbD69b63063F859514Eca29C9BDd8310E61", + abi: HUB_POOL_STORE_ABI, + }, blastBridge: { address: "0x3a05E5d33d7Ab3864D53aaEc93c8301C1Fa49115", abi: BLAST_BRIDGE_ABI, @@ -249,6 +255,12 @@ export const CONTRACT_ADDRESSES: { abi: CCTP_TOKEN_MESSENGER_ABI, }, }, + [CHAIN_IDs.BSC]: { + sp1Helios: { + address: "0x3BED21dAe767e4Df894B31b14aD32369cE4bad8b", + abi: SP1_HELIOS_ABI, + }, + }, [CHAIN_IDs.POLYGON]: { withdrawableErc20: { abi: POLYGON_WITHDRAWABLE_ERC20_ABI, diff --git a/src/common/abi/HubPoolStore.json b/src/common/abi/HubPoolStore.json new file mode 100644 index 0000000000..c7282ecaad --- /dev/null +++ b/src/common/abi/HubPoolStore.json @@ -0,0 +1,12 @@ +[ + { + "anonymous": false, + "inputs": [ + { "indexed": true, "internalType": "address", "name": "target", "type": "address" }, + { "indexed": false, "internalType": "bytes", "name": "data", "type": "bytes" }, + { "indexed": true, "internalType": "uint256", "name": "nonce", "type": "uint256" } + ], + "name": "StoredCallData", + "type": "event" + } +] diff --git a/src/common/abi/SP1Helios.json b/src/common/abi/SP1Helios.json new file mode 100644 index 0000000000..098b4f7e55 --- /dev/null +++ b/src/common/abi/SP1Helios.json @@ -0,0 +1,37 @@ +[ + { + "anonymous": false, + "inputs": [ + { "indexed": true, "internalType": "uint256", "name": "head", "type": "uint256" }, + { "indexed": true, "internalType": "bytes32", "name": "key", "type": "bytes32" }, + { "indexed": false, "internalType": "bytes32", "name": "value", "type": "bytes32" }, + { "indexed": false, "internalType": "address", "name": "contractAddress", "type": "address" } + ], + "name": "StorageSlotVerified", + "type": "event" + }, + { + "inputs": [], + "name": "head", + "outputs": [{ "internalType": "uint256", "name": "", "type": "uint256" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [{ "internalType": "uint256", "name": "beaconSlot", "type": "uint256" }], + "name": "headers", + "outputs": [{ "internalType": "bytes32", "name": "beaconHeaderRoot", "type": "bytes32" }], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { "internalType": "bytes", "name": "proof", "type": "bytes" }, + { "internalType": "bytes", "name": "publicValues", "type": "bytes" } + ], + "name": "update", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/src/common/abi/Universal_SpokePool.json b/src/common/abi/Universal_SpokePool.json new file mode 100644 index 0000000000..aff7beee3d --- /dev/null +++ b/src/common/abi/Universal_SpokePool.json @@ -0,0 +1,22 @@ +[ + { + "anonymous": false, + "inputs": [ + { "indexed": true, "internalType": "uint256", "name": "nonce", "type": "uint256" }, + { "indexed": false, "internalType": "address", "name": "caller", "type": "address" } + ], + "name": "RelayedCallData", + "type": "event" + }, + { + "inputs": [ + { "internalType": "uint256", "name": "_messageNonce", "type": "uint256" }, + { "internalType": "bytes", "name": "_message", "type": "bytes" }, + { "internalType": "uint256", "name": "_blockNumber", "type": "uint256" } + ], + "name": "executeMessage", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/src/finalizer/index.ts b/src/finalizer/index.ts index 48a9b03c5f..296afdc349 100644 --- a/src/finalizer/index.ts +++ b/src/finalizer/index.ts @@ -32,13 +32,14 @@ import { Profiler, stringifyThrownValue, } from "../utils"; -import { ChainFinalizer, CrossChainMessage } from "./types"; +import { ChainFinalizer, CrossChainMessage, isAugmentedTransaction } from "./types"; import { arbStackFinalizer, binanceL1ToL2Finalizer, binanceL2ToL1Finalizer, cctpL1toL2Finalizer, cctpL2toL1Finalizer, + heliosL1toL2Finalizer, lineaL1ToL2Finalizer, lineaL2ToL1Finalizer, opStackFinalizer, @@ -124,7 +125,7 @@ const chainFinalizers: { [chainId: number]: { finalizeOnL2: ChainFinalizer[]; fi }, [CHAIN_IDs.BSC]: { finalizeOnL1: [binanceL2ToL1Finalizer], - finalizeOnL2: [binanceL1ToL2Finalizer], + finalizeOnL2: [binanceL1ToL2Finalizer, heliosL1toL2Finalizer], }, [CHAIN_IDs.SONEIUM]: { finalizeOnL1: [opStackFinalizer], @@ -190,7 +191,8 @@ export async function finalize( // Note: Could move this into a client in the future to manage # of calls and chunk calls based on // input byte length. - const finalizationsToBatch: { txn: Multicall2Call; crossChainMessage?: CrossChainMessage }[] = []; + const finalizerResponseTxns: { txn: Multicall2Call | AugmentedTransaction; crossChainMessage?: CrossChainMessage }[] = + []; // For each chain, delegate to a handler to look up any TokensBridged events and attempt finalization. await sdkUtils.mapAsync(configuredChainIds, async (chainId) => { @@ -264,7 +266,7 @@ export async function finalize( ); callData.forEach((txn, idx) => { - finalizationsToBatch.push({ txn, crossChainMessage: crossChainMessages[idx] }); + finalizerResponseTxns.push({ txn, crossChainMessage: crossChainMessages[idx] }); }); totalWithdrawalsForChain += crossChainMessages.filter(({ type }) => type === "withdrawal").length; @@ -313,25 +315,39 @@ export async function finalize( // counter of the approximate gas estimation and cut off the list of finalizations if it gets too high. // Ensure each transaction would succeed in isolation. - const finalizations = await sdkUtils.filterAsync(finalizationsToBatch, async ({ txn: _txn, crossChainMessage }) => { - const txnToSubmit: AugmentedTransaction = { - contract: multicall2Lookup[crossChainMessage.destinationChainId], - chainId: crossChainMessage.destinationChainId, - method: "aggregate", - // aggregate() takes an array of tuples: [calldata: bytes, target: address]. - args: [[_txn]], - }; - const [{ reason, succeed, transaction }] = await txnClient.simulate([txnToSubmit]); - - if (succeed) { - // Increase running counter of estimated gas cost for batch finalization. - // gasLimit should be defined if succeed is True. - const updatedGasEstimation = gasEstimation.add(transaction.gasLimit); - if (updatedGasEstimation.lt(batchGasLimit)) { - gasEstimation = updatedGasEstimation; + const finalizations = await sdkUtils.filterAsync(finalizerResponseTxns, async ({ txn: _txn, crossChainMessage }) => { + let simErrorReason: string; + if (!isAugmentedTransaction(_txn)) { + // Multicall transaction simulation flow + const txnToSubmit: AugmentedTransaction = { + contract: multicall2Lookup[crossChainMessage.destinationChainId], + chainId: crossChainMessage.destinationChainId, + method: "aggregate", + // aggregate() takes an array of tuples: [calldata: bytes, target: address]. + args: [[_txn]], + }; + const [{ reason, succeed, transaction }] = await txnClient.simulate([txnToSubmit]); + + if (succeed) { + // Increase running counter of estimated gas cost for batch finalization. + // gasLimit should be defined if succeed is True. + const updatedGasEstimation = gasEstimation.add(transaction.gasLimit); + if (updatedGasEstimation.lt(batchGasLimit)) { + gasEstimation = updatedGasEstimation; + return true; + } else { + return false; + } + } else { + simErrorReason = reason; + } + } else { + // Individual transaction simulation flow + const [{ reason, succeed }] = await txnClient.simulate([_txn]); + if (succeed) { return true; } else { - return false; + simErrorReason = reason; } } @@ -346,7 +362,7 @@ export async function finalize( // @dev Likely to be the 2nd part of a 2-stage withdrawal (i.e. retrieve() on the Polygon bridge adapter). message = "Unknown finalizer simulation failure."; } - logger.warn({ at: "finalizer", message, reason, txn: _txn }); + logger.warn({ at: "finalizer", message, simErrorReason, txn: _txn }); return false; }); @@ -362,20 +378,36 @@ export async function finalize( finalizations, ({ crossChainMessage }) => crossChainMessage.destinationChainId ); + + // @dev Here, we enqueueTransaction individual transactions right away, and we batch all multicalls into `multicallTxns` to enqueue as a single tx right after for (const [chainId, finalizations] of Object.entries(finalizationsByChain)) { - const finalizerTxns = finalizations.map(({ txn }) => txn); - const txnToSubmit: AugmentedTransaction = { - contract: multicall2Lookup[Number(chainId)], - chainId: Number(chainId), - method: "aggregate", - args: [finalizerTxns], - gasLimit: gasEstimation, - gasLimitMultiplier: 2, - unpermissioned: true, - message: `Batch finalized ${finalizerTxns.length} txns`, - mrkdwn: `Batch finalized ${finalizerTxns.length} txns`, - }; - multicallerClient.enqueueTransaction(txnToSubmit); + const multicallTxns: Multicall2Call[] = []; + + finalizations.forEach(({ txn }) => { + if (isAugmentedTransaction(txn)) { + // It's an AugmentedTransaction, enqueue directly + txn.nonMulticall = true; // cautiously enforce an invariant that should already be present + multicallerClient.enqueueTransaction(txn); + } else { + // It's a Multicall2Call, collect for batching + multicallTxns.push(txn); + } + }); + + if (multicallTxns.length > 0) { + const txnToSubmit: AugmentedTransaction = { + contract: multicall2Lookup[Number(chainId)], + chainId: Number(chainId), + method: "aggregate", + args: [multicallTxns], + gasLimit: gasEstimation, + gasLimitMultiplier: 2, + unpermissioned: true, + message: `Batch finalized ${multicallTxns.length} txns`, + mrkdwn: `Batch finalized ${multicallTxns.length} txns`, + }; + multicallerClient.enqueueTransaction(txnToSubmit); + } } txnRefLookup = await multicallerClient.executeTxnQueues(!submitFinalizationTransactions); } catch (_error) { diff --git a/src/finalizer/types.ts b/src/finalizer/types.ts index 781e59b472..110279629c 100644 --- a/src/finalizer/types.ts +++ b/src/finalizer/types.ts @@ -1,5 +1,5 @@ import { Signer } from "ethers"; -import { HubPoolClient, SpokePoolClient } from "../clients"; +import { AugmentedTransaction, HubPoolClient, SpokePoolClient } from "../clients"; import { Multicall2Call, winston } from "../utils"; /** @@ -28,7 +28,10 @@ export type CrossChainMessage = { } ); -export type FinalizerPromise = { callData: Multicall2Call[]; crossChainMessages: CrossChainMessage[] }; +export type FinalizerPromise = { + callData: (Multicall2Call | AugmentedTransaction)[]; + crossChainMessages: CrossChainMessage[]; +}; export interface ChainFinalizer { ( @@ -40,3 +43,13 @@ export interface ChainFinalizer { l1ToL2AddressesToFinalize: string[] ): Promise; } + +/** + * Type guard to check if a transaction object is an AugmentedTransaction. + * @param txn The transaction object to check. + * @returns True if the object is an AugmentedTransaction, false otherwise. + */ +export function isAugmentedTransaction(txn: Multicall2Call | AugmentedTransaction): txn is AugmentedTransaction { + // Check for the presence of the 'contract' property, unique to AugmentedTransaction + return txn != null && typeof txn === "object" && "contract" in txn; +} diff --git a/src/finalizer/utils/helios.ts b/src/finalizer/utils/helios.ts new file mode 100644 index 0000000000..5a4ea8e6db --- /dev/null +++ b/src/finalizer/utils/helios.ts @@ -0,0 +1,582 @@ +import { HubPoolClient, SpokePoolClient, AugmentedTransaction } from "../../clients"; +import { + EventSearchConfig, + Signer, + winston, + paginatedEventQuery, + compareAddressesSimple, + ethers, + BigNumber, +} from "../../utils"; +import { spreadEventWithBlockNumber } from "../../utils/EventUtils"; +import { FinalizerPromise, CrossChainMessage } from "../types"; +import { CONTRACT_ADDRESSES } from "../../common"; +import axios from "axios"; +import UNIVERSAL_SPOKE_ABI from "../../common/abi/Universal_SpokePool.json"; +import { RelayedCallDataEvent, StoredCallDataEvent } from "../../interfaces/Universal"; +import { ApiProofRequest, ProofOutputs, ProofStateResponse, SP1HeliosProofData } from "../../interfaces/ZkApi"; +import { StorageSlotVerifiedEvent } from "../../interfaces/Helios"; +import { calculateProofId, decodeProofOutputs } from "../../utils/ZkApiUtils"; +import { calculateHubPoolStoreStorageSlot, getHubPoolStoreContract } from "../../utils/UniversalUtils"; +import { stringifyThrownValue } from "../../utils/LogUtils"; +import { getSp1HeliosContract } from "../../utils/HeliosUtils"; + +type CrossChainMessageStatus = "NeedsProofAndExecution" | "NeedsExecutionOnly"; + +interface PendingCrosschainMessage { + l1Event: StoredCallDataEvent; // The original HubPoolStore event triggering the flow + status: CrossChainMessageStatus; + verifiedHead?: ethers.BigNumber; // Head from the StorageSlotVerified event, only present if status is NeedsExecutionOnly +} +// --------------------------------------- + +// Type for successful proof data, augmented with HubPoolStore event info. +type SuccessfulProof = { + proofData: SP1HeliosProofData; + sourceNonce: ethers.BigNumber; + target: string; + sourceMessageData: string; +}; + +export async function heliosL1toL2Finalizer( + logger: winston.Logger, + _signer: Signer, + hubPoolClient: HubPoolClient, + l2SpokePoolClient: SpokePoolClient, + l1SpokePoolClient: SpokePoolClient, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + _senderAddresses: string[] +): Promise { + const l1ChainId = hubPoolClient.chainId; + const l2ChainId = l2SpokePoolClient.chainId; + + // --- Step 1: Identify Pending Messages --- + const pendingMessages = await identifyPendingHeliosMessages( + logger, + hubPoolClient, + l1SpokePoolClient, + l2SpokePoolClient, + l1ChainId, + l2ChainId + ); + + if (pendingMessages.length === 0) { + logger.debug({ + at: `Finalizer#heliosL1toL2Finalizer:${l2ChainId}`, + message: "No pending Helios messages found requiring action.", + }); + return { callData: [], crossChainMessages: [] }; + } + + // Separate messages based on required action + const needsProofAndExecution = pendingMessages.filter((m) => m.status === "NeedsProofAndExecution"); + const needsExecutionOnly = pendingMessages.filter((m) => m.status === "NeedsExecutionOnly"); + + logger.debug({ + at: `Finalizer#heliosL1toL2Finalizer:${l2ChainId}`, + message: `Identified ${pendingMessages.length} total pending messages.`, + counts: { + needsProofAndExecution: needsProofAndExecution.length, + needsExecutionOnly: needsExecutionOnly.length, + }, + needsExecutionNonces: needsExecutionOnly.map((m) => m.l1Event.nonce.toString()), // Log nonces needing only execution + }); + + // --- Step 2: Get Proofs for Messages Needing Full Finalization --- + let proofsToSubmit: SuccessfulProof[] = []; + if (needsProofAndExecution.length > 0) { + // Pass only the messages that need proofs + proofsToSubmit = await processUnfinalizedHeliosMessages( + logger, + needsProofAndExecution, // Pass PendingHeliosMessage[] here + l2SpokePoolClient, + l1ChainId + ); + if (proofsToSubmit.length === 0) { + logger.debug({ + at: `Finalizer#heliosL1toL2Finalizer:${l2ChainId}`, + message: "No successful proofs retrieved for messages needing full finalization.", + }); + // Don't return yet, might still have needsExecutionOnly messages + } + } + + // --- Step 3: Generate Multicall Data from Proofs and Partially Finalized Messages --- + if (proofsToSubmit.length === 0 && needsExecutionOnly.length === 0) { + logger.debug({ + at: `Finalizer#heliosL1toL2Finalizer:${l2ChainId}`, + message: "No proofs obtained and no messages need execution only. Nothing to submit.", + }); + return { callData: [], crossChainMessages: [] }; + } + + return generateHeliosTxns(logger, proofsToSubmit, needsExecutionOnly, l1ChainId, l2ChainId, l2SpokePoolClient); +} + +// ================================== +// Step-by-step Helper Functions +// ================================== + +/** --- Step 1 --- + * Identifies messages stored on L1 HubPoolStore that require action on L2. + * Fetches L1 StoredCallData events, L2 StorageSlotVerified events, and L2 RelayedCallData events. + * Determines if a message needs both proof+execution or just execution or no actions. + */ +async function identifyPendingHeliosMessages( + logger: winston.Logger, + hubPoolClient: HubPoolClient, + l1SpokePoolClient: SpokePoolClient, + l2SpokePoolClient: SpokePoolClient, + l1ChainId: number, + l2ChainId: number +): Promise { + // --- Substep 1: Query and Filter L1 Events --- + const relevantStoredCallDataEvents = await getRelevantL1Events( + logger, + hubPoolClient, + l1SpokePoolClient, + l1ChainId, + l2ChainId, + l2SpokePoolClient.spokePool.address + ); + + if (relevantStoredCallDataEvents.length === 0) { + logger.debug({ + at: `Finalizer#identifyPendingHeliosMessages:${l2ChainId}`, + message: "No relevant StoredCallData events found on L1.", + }); + return []; + } + + // --- Substep 2: Query L2 Verification Events (StorageSlotVerified) --- + const verifiedSlotsMap = await getL2VerifiedSlotsMap(l2SpokePoolClient, l2ChainId); + + // --- Substep 3: Query L2 Execution Events (RelayedCallData) --- + const relayedNonces = await getL2RelayedNonces(l2SpokePoolClient); + + // --- Determine Status for each L1 Event --- + const pendingMessages: PendingCrosschainMessage[] = []; + for (const l1Event of relevantStoredCallDataEvents) { + const expectedStorageSlot = calculateHubPoolStoreStorageSlot(l1Event.nonce); + const nonce = l1Event.nonce; + + const isExecuted = relayedNonces.has(nonce.toString()); // Use nonce string as key + + if (!isExecuted) { + if (verifiedSlotsMap.has(expectedStorageSlot) /* isVerified */) { + // Verified but not executed -> Needs Execution Only + const verifiedEvent = verifiedSlotsMap.get(expectedStorageSlot); + pendingMessages.push({ + l1Event: l1Event, + status: "NeedsExecutionOnly", + verifiedHead: verifiedEvent.head, // set verifiedHead as it's needed for execution + }); + // Log a warning for partially finalized messages + logger.warn({ + at: `Finalizer#identifyPendingHeliosMessages:${l2ChainId}`, + message: + "Message requires execution only (already verified in SP1Helios). Will generate SpokePool.executeMessage tx.", + l1TxRef: l1Event.txnRef, + nonce: nonce.toString(), + storageSlot: expectedStorageSlot, + verifiedOnL2TxnRef: verifiedEvent.txnRef, + verifiedHead: verifiedEvent.head.toString(), + }); + } else { + // Not verified and not executed -> Needs Proof and Execution + pendingMessages.push({ + l1Event: l1Event, + status: "NeedsProofAndExecution", + // verifiedHead is undefined here + }); + } + } + // If `isExecuted` is true, the message is fully finalized, do nothing. + } + + logger.debug({ + at: `Finalizer#identifyPendingHeliosMessages:${l2ChainId}`, + message: "Finished identifying pending messages.", + totalL1StoredCallData: relevantStoredCallDataEvents.length, + totalL2VerifiedSlots: verifiedSlotsMap.size, + totalL2RelayedNonces: relayedNonces.size, + pendingMessagesCount: pendingMessages.length, + needsProofCount: pendingMessages.filter((m) => m.status === "NeedsProofAndExecution").length, + needsExecutionOnlyCount: pendingMessages.filter((m) => m.status === "NeedsExecutionOnly").length, + }); + + return pendingMessages; +} + +/** Query and Filter L1 Events */ +async function getRelevantL1Events( + _logger: winston.Logger, + hubPoolClient: HubPoolClient, + l1SpokePoolClient: SpokePoolClient, + l1ChainId: number, + _l2ChainId: number, + l2SpokePoolAddress: string +): Promise { + const l1Provider = hubPoolClient.hubPool.provider; + const hubPoolStoreContract = getHubPoolStoreContract(l1ChainId, l1Provider); + + /** + * @dev We artificially shorten the lookback time peiod for L1 events by a factor of 2. We want to avoid race conditions where + * we see an old event on L1, but not look back far enough on L2 to see that the event has been executed successfully. + */ + const toBlock = l1SpokePoolClient.latestBlockSearched; + const fromBlock = Math.floor((l1SpokePoolClient.eventSearchConfig.fromBlock + toBlock) / 2); + const l1SearchConfig: EventSearchConfig = { + fromBlock: fromBlock, + toBlock: toBlock, + maxBlockLookBack: l1SpokePoolClient.eventSearchConfig.maxBlockLookBack, + }; + + const storedCallDataFilter = hubPoolStoreContract.filters.StoredCallData(); + + const rawLogs = await paginatedEventQuery(hubPoolStoreContract, storedCallDataFilter, l1SearchConfig); + + const events: StoredCallDataEvent[] = rawLogs.map((log) => spreadEventWithBlockNumber(log) as StoredCallDataEvent); + + const relevantStoredCallDataEvents = events.filter( + (event) => + compareAddressesSimple(event.target, l2SpokePoolAddress) || + compareAddressesSimple(event.target, ethers.constants.AddressZero) + ); + + return relevantStoredCallDataEvents; +} + +/** Query L2 Verification Events and return verified slots map */ +async function getL2VerifiedSlotsMap( + l2SpokePoolClient: SpokePoolClient, + l2ChainId: number +): Promise> { + const l2Provider = l2SpokePoolClient.spokePool.provider; + const sp1HeliosContract = getSp1HeliosContract(l2ChainId, l2Provider); + + const l2SearchConfig: EventSearchConfig = { + fromBlock: l2SpokePoolClient.eventSearchConfig.fromBlock, + toBlock: l2SpokePoolClient.latestBlockSearched, + maxBlockLookBack: l2SpokePoolClient.eventSearchConfig.maxBlockLookBack, + }; + const storageVerifiedFilter = sp1HeliosContract.filters.StorageSlotVerified(); + + const rawLogs = await paginatedEventQuery(sp1HeliosContract, storageVerifiedFilter, l2SearchConfig); + + // Use spreadEventWithBlockNumber and cast to the flattened type + const events: StorageSlotVerifiedEvent[] = rawLogs.map( + (log) => spreadEventWithBlockNumber(log) as StorageSlotVerifiedEvent + ); + + // Store events in a map keyed by the storage slot (key) + const verifiedSlotsMap = new Map(); + events.forEach((event) => { + // Handle potential duplicates (though unlikely with paginated query): favour latest block/logIndex + const existing = verifiedSlotsMap.get(event.key); + if ( + !existing || + event.blockNumber > existing.blockNumber || + (event.blockNumber === existing.blockNumber && event.logIndex > existing.logIndex) + ) { + verifiedSlotsMap.set(event.key, event); + } + }); + return verifiedSlotsMap; +} + +/** --- Query L2 Execution Events (RelayedCallData) */ +async function getL2RelayedNonces(l2SpokePoolClient: SpokePoolClient): Promise> { + const l2Provider = l2SpokePoolClient.spokePool.provider; + const l2SpokePoolAddress = l2SpokePoolClient.spokePool.address; + const universalSpokePoolContract = new ethers.Contract(l2SpokePoolAddress, UNIVERSAL_SPOKE_ABI, l2Provider); + + const l2SearchConfig: EventSearchConfig = { + fromBlock: l2SpokePoolClient.eventSearchConfig.fromBlock, + toBlock: l2SpokePoolClient.latestBlockSearched, + maxBlockLookBack: l2SpokePoolClient.eventSearchConfig.maxBlockLookBack, + }; + const relayedCallDataFilter = universalSpokePoolContract.filters.RelayedCallData(); + + const rawLogs = await paginatedEventQuery(universalSpokePoolContract, relayedCallDataFilter, l2SearchConfig); + + // Use spreadEventWithBlockNumber and cast to the flattened type + const events: RelayedCallDataEvent[] = rawLogs.map((log) => spreadEventWithBlockNumber(log) as RelayedCallDataEvent); + + // Return a Set of nonces (as strings for easy comparison) + return new Set(events.map((event) => event.nonce.toString())); +} + +/** + * --- Get Proofs for Unfinalized Messages --- + * Processes messages needing proof+execution by interacting with the ZK Proof API. + * Returns a list of successfully retrieved proofs. + */ +async function processUnfinalizedHeliosMessages( + logger: winston.Logger, + messagesToProcess: PendingCrosschainMessage[], + l2SpokePoolClient: SpokePoolClient, + l1ChainId: number +): Promise { + // Filter within the function just in case, though the caller should have already filtered + const unfinalizedMessages = messagesToProcess.filter((m) => m.status === "NeedsProofAndExecution"); + if (unfinalizedMessages.length === 0) { + return []; + } + + const l2ChainId = l2SpokePoolClient.chainId; + const l2Provider = l2SpokePoolClient.spokePool.provider; + const apiBaseUrl = process.env.HELIOS_PROOF_API_URL; + + if (!apiBaseUrl) { + logger.error({ + at: `Finalizer#heliosL1toL2Finalizer:processUnfinalizedHeliosMessages:${l2ChainId}`, + message: "HELIOS_PROOF_API_URL environment variable not set. Cannot process Helios messages.", + }); + return []; + } + + const hubPoolStoreInfo = CONTRACT_ADDRESSES[l1ChainId]?.hubPoolStore; + if (!hubPoolStoreInfo?.address) { + throw new Error(`HubPoolStore address not available for chain: ${l1ChainId}. Cannot process Helios messages.`); + } + const hubPoolStoreAddress = hubPoolStoreInfo.address; + const sp1HeliosContract = getSp1HeliosContract(l2ChainId, l2Provider); + + const headBn: ethers.BigNumber = await sp1HeliosContract.head(); + // todo: well, currently we're taking currentHead to use as prevHead in our ZK proof. There's a particular scenario where we could speed up proofs + // todo: (by not making them to wait for finality longer than needed) if our blockNumber that we need a proved slot for is older than this head. + const currentHead = headBn.toNumber(); + const currentHeader = await sp1HeliosContract.headers(headBn); + if (!currentHeader || currentHeader === ethers.constants.HashZero) { + throw new Error(`Invalid header found for head ${currentHead}`); + } + + const successfulProofs: SuccessfulProof[] = []; + + // todo? Can use Promise.All if we really want to + // Process messages one by one + for (const pendingMessage of unfinalizedMessages) { + const l1Event = pendingMessage.l1Event; // Extract the L1 event + const logContext = { + at: `Finalizer#heliosL1toL2Finalizer:processUnfinalizedHeliosMessages:${l2ChainId}`, + l1TxHash: l1Event.txnRef, + nonce: l1Event.nonce.toString(), + target: l1Event.target, + }; + + const storageSlot = calculateHubPoolStoreStorageSlot(l1Event.nonce); + + const apiRequest: ApiProofRequest = { + src_chain_contract_address: hubPoolStoreAddress, + src_chain_storage_slot: storageSlot, + src_chain_block_number: l1Event.blockNumber, // Use block number from L1 event + dst_chain_contract_from_head: currentHead, + dst_chain_contract_from_header: currentHeader, + }; + + const proofId = calculateProofId(apiRequest); + const getProofUrl = `${apiBaseUrl}/api/proofs/${proofId}`; + + logger.debug({ ...logContext, message: "Attempting to get proof", proofId, getProofUrl, storageSlot }); + + let proofState: ProofStateResponse | null = null; + + // @dev We need try - catch here because of how API responds to non-existing proofs: with NotFound status + let getError: any = null; + try { + const response = await axios.get(getProofUrl); + proofState = response.data; + logger.debug({ ...logContext, message: "Proof state received", proofId, status: proofState.status }); + } catch (error: any) { + getError = error; + } + + // Axios error. Handle based on whether was a NOTFOUND or another error + if (getError) { + const isNotFoundError = axios.isAxiosError(getError) && getError.response?.status === 404; + if (isNotFoundError) { + // NOTFOUND error -> Request proof + logger.debug({ ...logContext, message: "Proof not found (404), requesting...", proofId }); + await axios.post(`${apiBaseUrl}/api/proofs`, apiRequest); + logger.debug({ ...logContext, message: "Proof requested successfully.", proofId }); + continue; + } else { + // If other error is returned -- throw and alert PD; this shouldn't happen + throw new Error(`Failed to get proof state for proofId ${proofId}: ${stringifyThrownValue(getError)}`); + } + } + + // No axios error, process `proofState` + switch (proofState.status) { + case "pending": + // If proof generation is pending -- there's nothing for us to do yet. Will check this proof next run + logger.debug({ ...logContext, message: "Proof generation is pending.", proofId }); + break; + case "errored": + // Proof generation errored on the API side. This is concerning, so we log an error. But nothing to do for us other than to re-request + logger.error({ + ...logContext, + message: "Proof generation errored on ZK API side. Requesting again.", + proofId, + errorMessage: proofState.error_message, + }); + + await axios.post(`${apiBaseUrl}/api/proofs`, apiRequest); + logger.debug({ ...logContext, message: "Errored proof requested again successfully.", proofId }); + break; + case "success": + if (!proofState.update_calldata) { + throw new Error(`Proof status is success but update_calldata is missing for proofId ${proofId}`); + } + logger.debug({ ...logContext, message: "Proof successfully retrieved.", proofId }); + successfulProofs.push({ + // @dev `proofData` should exist if proofState.status is "success" + proofData: proofState.update_calldata, + sourceNonce: l1Event.nonce, + target: l1Event.target, + sourceMessageData: l1Event.data, + }); + break; + default: + throw new Error(`Received unexpected proof status for proof ${proofId}`); + } + } // end loop over messages + + return successfulProofs; +} + +/** --- Generate Multicall Data --- */ +async function generateHeliosTxns( + logger: winston.Logger, + successfulProofs: SuccessfulProof[], + needsExecutionOnlyMessages: PendingCrosschainMessage[], + l1ChainId: number, + l2ChainId: number, + l2SpokePoolClient: SpokePoolClient +): Promise { + const transactions: AugmentedTransaction[] = []; + const crossChainMessages: CrossChainMessage[] = []; + + const sp1HeliosContract = getSp1HeliosContract(l2ChainId, l2SpokePoolClient.spokePool.signer); + const spokePoolAddress = l2SpokePoolClient.spokePool.address; + const universalSpokePoolContract = new ethers.Contract( + spokePoolAddress, + [...UNIVERSAL_SPOKE_ABI], + l2SpokePoolClient.spokePool.signer + ); + + // --- Process messages needing only execution --- + for (const message of needsExecutionOnlyMessages) { + const { l1Event, verifiedHead } = message; + const nonce = l1Event.nonce; + const l1Target = l1Event.target; // Get target from L1 event + const l1Data = l1Event.data; // Get data from L1 event + + if (!verifiedHead) { + // @dev This shouldn't happen. If it does, there's a bug that needs fixing. + throw new Error(`Logic error: Message ${nonce.toString()} needs execution only but verifiedHead is missing.`); + } + + // @dev Warn about messages that require only half of finalization. Means that either a tx from prev. run got stuck or failed or something else weird happened + logger.warn({ + at: `Finalizer#heliosL1toL2Finalizer:generateTxnItem:${l2ChainId}`, + message: "Generating SpokePool.executeMessage ONLY for partially finalized message.", + nonce: nonce.toString(), + l1TxHash: l1Event.txnRef, + verifiedHead: verifiedHead.toString(), + }); + + // --- Encode the message parameter --- + const encodedMessage = ethers.utils.defaultAbiCoder.encode(["address", "bytes"], [l1Target, l1Data]); + // ------------------------------------ + + const executeArgs = [nonce, encodedMessage, verifiedHead]; // Use encodedMessage + const executeTx: AugmentedTransaction = { + contract: universalSpokePoolContract, + chainId: l2ChainId, + method: "executeMessage", + args: executeArgs, + unpermissioned: true, + canFailInSimulation: true, + message: `Finalize Helios msg (HubPoolStore nonce ${nonce.toString()}) - Step 2 ONLY: Execute on SpokePool`, + }; + transactions.push(executeTx); + crossChainMessages.push({ + type: "misc", + miscReason: "ZK bridge finalization (Execute Message Only)", + originationChainId: l1ChainId, + destinationChainId: l2ChainId, + }); + } + + // --- Process messages needing proof and execution --- + for (const proof of successfulProofs) { + // Ensure the hex strings have the '0x' prefix, adding it only if missing. + const proofBytes = proof.proofData.proof.startsWith("0x") ? proof.proofData.proof : "0x" + proof.proofData.proof; + const publicValuesBytes = proof.proofData.public_values.startsWith("0x") + ? proof.proofData.public_values + : "0x" + proof.proofData.public_values; + + // @dev Will throw on decode errors here. + const decodedOutputs: ProofOutputs = decodeProofOutputs(publicValuesBytes); + + // 1. SP1Helios.update transaction + const updateArgs = [proofBytes, publicValuesBytes]; + const updateTx: AugmentedTransaction = { + contract: sp1HeliosContract, + chainId: l2ChainId, + method: "update", + args: updateArgs, + unpermissioned: false, + canFailInSimulation: false, + nonMulticall: true, + message: `Finalize Helios msg (HubPoolStore nonce ${proof.sourceNonce.toString()}) - Step 1: Update SP1Helios`, + }; + transactions.push(updateTx); + crossChainMessages.push({ + type: "misc", + miscReason: "ZK bridge finalization (Helios Update)", + originationChainId: l1ChainId, + destinationChainId: l2ChainId, + }); + + // 2. SpokePool.executeMessage transaction + // --- Encode the message parameter --- + const l1Target = proof.target; // Get target from SuccessfulProof + const l1Data = proof.sourceMessageData; // Get data from SuccessfulProof + const encodedMessage = ethers.utils.defaultAbiCoder.encode(["address", "bytes"], [l1Target, l1Data]); + // ------------------------------------ + + const executeArgs = [proof.sourceNonce, encodedMessage, decodedOutputs.newHead]; // Use encodedMessage + const executeTx: AugmentedTransaction = { + contract: universalSpokePoolContract, + chainId: l2ChainId, + method: "executeMessage", + args: executeArgs, + unpermissioned: true, + // @dev Simulation of `executeMessage` depends on prior state update via SP1Helios.update + canFailInSimulation: true, + // todo? this hardcoded gas limit of 2 mil could be improved if we were able to simulate this tx on top of blockchain state created by the tx above + gasLimit: BigNumber.from(2000000), + message: `Finalize Helios msg (HubPoolStore nonce ${proof.sourceNonce.toString()}) - Step 2: Execute on SpokePool`, + }; + transactions.push(executeTx); + crossChainMessages.push({ + type: "misc", + miscReason: "ZK bridge finalization (Execute Message)", + originationChainId: l1ChainId, + destinationChainId: l2ChainId, + }); + } + + const totalFinalizations = successfulProofs.length + needsExecutionOnlyMessages.length; + logger.debug({ + at: `Finalizer#heliosL1toL2Finalizer:generateHeliosTxns:${l2ChainId}`, + message: `Generated ${transactions.length} transactions for ${totalFinalizations} finalizations (${successfulProofs.length} full, ${needsExecutionOnlyMessages.length} exec only).`, + proofNoncesFinalized: successfulProofs.map((p) => p.sourceNonce.toString()), + execOnlyNoncesFinalized: needsExecutionOnlyMessages.map((m) => m.l1Event.nonce.toString()), + }); + + return { callData: transactions, crossChainMessages: crossChainMessages }; +} diff --git a/src/finalizer/utils/index.ts b/src/finalizer/utils/index.ts index a6bd53a567..4f04d411df 100644 --- a/src/finalizer/utils/index.ts +++ b/src/finalizer/utils/index.ts @@ -6,3 +6,4 @@ export * from "./scroll"; export * from "./cctp"; export * from "./binance"; export * from "./linea"; +export * from "./helios"; diff --git a/src/interfaces/Helios.ts b/src/interfaces/Helios.ts new file mode 100644 index 0000000000..f6888d87e7 --- /dev/null +++ b/src/interfaces/Helios.ts @@ -0,0 +1,10 @@ +import { SortableEvent } from "."; +import { BigNumber } from "../utils"; + +// Event type for Sp1Helios used in v4 messaging (Flattened) +export interface StorageSlotVerifiedEvent extends SortableEvent { + head: BigNumber; + key: string; // bytes32 + value: string; // bytes32 + contractAddress: string; +} diff --git a/src/interfaces/Universal.ts b/src/interfaces/Universal.ts new file mode 100644 index 0000000000..eec124cb33 --- /dev/null +++ b/src/interfaces/Universal.ts @@ -0,0 +1,15 @@ +import { SortableEvent } from "."; +import { BigNumber } from "../utils"; + +// Flattened HubPoolStore Event +export interface StoredCallDataEvent extends SortableEvent { + target: string; + data: string; + nonce: BigNumber; +} + +// Flattened Universal SpokePool Event +export interface RelayedCallDataEvent extends SortableEvent { + nonce: BigNumber; + caller: string; +} diff --git a/src/interfaces/ZkApi.ts b/src/interfaces/ZkApi.ts new file mode 100644 index 0000000000..068f15c6f8 --- /dev/null +++ b/src/interfaces/ZkApi.ts @@ -0,0 +1,49 @@ +import { BigNumber } from "../utils"; + +// --- API Interaction Types --- +export interface ApiProofRequest { + src_chain_contract_address: string; + src_chain_storage_slot: string; + src_chain_block_number: number; // u64 on Rust API side + dst_chain_contract_from_head: number; // u64 on Rust API side + dst_chain_contract_from_header: string; +} + +export type ProofStatus = "pending" | "success" | "errored"; + +export interface SP1HeliosProofData { + proof: string; + public_values: string; +} + +export interface ProofStateResponse { + proof_id: string; + status: ProofStatus; + update_calldata?: SP1HeliosProofData; // Present only if status is "success" + error_message?: string; // Present only if status is "errored" +} + +// ABI for `public_values` returned from ZK API as part of `SP1HeliosProofData` +export const PROOF_OUTPUTS_ABI_TUPLE = `tuple( + bytes32 executionStateRoot, + bytes32 newHeader, + bytes32 nextSyncCommitteeHash, + uint256 newHead, + bytes32 prevHeader, + uint256 prevHead, + bytes32 syncCommitteeHash, + bytes32 startSyncCommitteeHash, + tuple(bytes32 key, bytes32 value, address contractAddress)[] slots + )`; + +export type ProofOutputs = { + executionStateRoot: string; + newHeader: string; + nextSyncCommitteeHash: string; + newHead: BigNumber; + prevHeader: string; + prevHead: BigNumber; + syncCommitteeHash: string; + startSyncCommitteeHash: string; + slots: { key: string; value: string; contractAddress: string }[]; +}; diff --git a/src/utils/HeliosUtils.ts b/src/utils/HeliosUtils.ts new file mode 100644 index 0000000000..c8c45b80f0 --- /dev/null +++ b/src/utils/HeliosUtils.ts @@ -0,0 +1,14 @@ +import { ethers, Provider, Signer } from "."; +import { CONTRACT_ADDRESSES } from "../common/ContractAddresses"; + +/** + * Retrieves an ethers.Contract instance for the SP1Helios contract on the specified chain. + * @throws {Error} If the SP1Helios contract address or ABI is not found for the given chainId in CONTRACT_ADDRESSES. + */ +export function getSp1HeliosContract(chainId: number, signerOrProvider: Signer | Provider): ethers.Contract { + const { address: sp1HeliosAddress, abi: sp1HeliosAbi } = CONTRACT_ADDRESSES[chainId].sp1Helios; + if (!sp1HeliosAddress || !sp1HeliosAbi) { + throw new Error(`SP1Helios contract not found for chain ${chainId}. Cannot verify Helios messages.`); + } + return new ethers.Contract(sp1HeliosAddress, sp1HeliosAbi as any, signerOrProvider); +} diff --git a/src/utils/UniversalUtils.ts b/src/utils/UniversalUtils.ts new file mode 100644 index 0000000000..757b3206c5 --- /dev/null +++ b/src/utils/UniversalUtils.ts @@ -0,0 +1,38 @@ +import { ethers } from "ethers"; +import { BigNumber, Signer, Provider } from "."; +import { CONTRACT_ADDRESSES } from "../common"; + +/** + * Calculates the storage slot in the HubPoolStore contract for a given nonce. + * This assumes the data is stored in a mapping at slot 0, keyed by nonce. + * storage_slot = keccak256(h(k) . h(p)) where k = nonce, p = mapping slot position (0) + */ +export function calculateHubPoolStoreStorageSlot(nonce: BigNumber): string { + const mappingSlotPosition = 0; // The relayMessageCallData mapping is at slot 0 + + // Ensure nonce and slot position are correctly padded to 32 bytes (64 hex chars + 0x prefix) + const paddedNonce = ethers.utils.hexZeroPad(nonce.toHexString(), 32); + const paddedSlot = ethers.utils.hexZeroPad(BigNumber.from(mappingSlotPosition).toHexString(), 32); + + // Concatenate the padded key (nonce) and slot position + // ethers.utils.concat expects Uint8Array or hex string inputs + const concatenated = ethers.utils.concat([paddedNonce, paddedSlot]); + + // Calculate the Keccak256 hash + const storageSlot = ethers.utils.keccak256(concatenated); + + return storageSlot; +} + +/** + * Retrieves an ethers.Contract instance for the HubPoolStore contract on the specified chain. + * @throws {Error} If the HubPoolStore contract address or ABI is not found for the given chainId in CONTRACT_ADDRESSES. + */ +export function getHubPoolStoreContract(chainId: number, signerOrProvider: Signer | Provider): ethers.Contract { + const hubPoolStoreInfo = CONTRACT_ADDRESSES[chainId]?.hubPoolStore; + if (!hubPoolStoreInfo?.address || !hubPoolStoreInfo.abi) { + throw new Error(`HubPoolStore contract address or ABI not found for chain ${chainId}.`); + } + + return new ethers.Contract(hubPoolStoreInfo.address, hubPoolStoreInfo.abi as any, signerOrProvider); +} diff --git a/src/utils/ZkApiUtils.ts b/src/utils/ZkApiUtils.ts new file mode 100644 index 0000000000..b3543fed76 --- /dev/null +++ b/src/utils/ZkApiUtils.ts @@ -0,0 +1,47 @@ +import { BigNumber, ethers } from "."; +import { ApiProofRequest, PROOF_OUTPUTS_ABI_TUPLE, ProofOutputs } from "../interfaces/ZkApi"; + +/** + * Calculates the deterministic Proof ID based on the request parameters. + * Matches the Rust implementation using RLP encoding and Keccak256. + */ +export function calculateProofId(request: ApiProofRequest): string { + const encoded = ethers.utils.RLP.encode([ + request.src_chain_contract_address, + request.src_chain_storage_slot, + BigNumber.from(request.src_chain_block_number).toHexString(), // Ensure block number is hex encoded for RLP + BigNumber.from(request.dst_chain_contract_from_head).toHexString(), // Ensure head is hex encoded for RLP + request.dst_chain_contract_from_header, + ]); + return ethers.utils.keccak256(encoded); +} + +/** + * Decodes the ABI-encoded public_values string from the ZK Proof API into a structured ProofOutputs object. + * @param publicValuesBytes The ABI-encoded hex string (with or without 0x prefix) containing the proof outputs. + * @returns The decoded ProofOutputs object. + * @throws {Error} If the decoding fails (e.g., invalid format). + */ +export function decodeProofOutputs(publicValuesBytes: string): ProofOutputs { + // Ensure 0x prefix for decoder + const prefixedBytes = publicValuesBytes.startsWith("0x") ? publicValuesBytes : "0x" + publicValuesBytes; + const decodedResult = ethers.utils.defaultAbiCoder.decode([PROOF_OUTPUTS_ABI_TUPLE], prefixedBytes)[0]; + + // Map the decoded array elements to the ProofOutputs type properties + // @dev Notice, if `decodedResult` is not what we expect, this will implicitly throw an error. + return { + executionStateRoot: decodedResult[0], + newHeader: decodedResult[1], + nextSyncCommitteeHash: decodedResult[2], + newHead: decodedResult[3], // Already a BigNumber from decoder + prevHeader: decodedResult[4], + prevHead: decodedResult[5], // Already a BigNumber from decoder + syncCommitteeHash: decodedResult[6], + startSyncCommitteeHash: decodedResult[7], + slots: decodedResult[8].map((slot: any[]) => ({ + key: slot[0], + value: slot[1], + contractAddress: slot[2], + })), + }; +}