diff --git a/.githooks/pre-commit b/.githooks/pre-commit index fb552d5ced..6aae36aa89 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -6,7 +6,7 @@ RED='\033[0;31m' NC='\033[0m' # No Color -# Check that `rustfmt` rules are not violated. +# Check that Rust formatting rules are not violated. if ! cargo fmt -- --check; then echo -e "${RED}Commit error!${NC}" echo "Please format the code via 'cargo fmt', cannot commit unformatted code" diff --git a/.githooks/pre-push b/.githooks/pre-push new file mode 100755 index 0000000000..eb1acbb693 --- /dev/null +++ b/.githooks/pre-push @@ -0,0 +1,14 @@ +#!/bin/sh +# +# Pre-push hook verifying that inappropriate code will not be pushed. + +# Colors for the terminal output +RED='\033[0;31m' +NC='\033[0m' # No Color + +# Check that prettier formatting rules are not violated. +if ! zk fmt --check; then + echo -e "${RED}Commit error!${NC}" + echo "Please format the code via 'zk fmt', cannot push unformatted code" + exit 1 +fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d5102494e1..f271d9c591 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,9 @@ on: jobs: ci-all: - runs-on: self-hosted + # We currently have two self-hosted runners, one of which is marked "DEV-CI" and other one is marder "MAIN". + # "MAIN" is the current CI runner, "DEV-CI" is currently used to experiment with CI optimizing. + runs-on: [self-hosted, MAIN] steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/promote-stage.yml b/.github/workflows/promote-stage.yml index cbfc34cfa9..4ab2668591 100644 --- a/.github/workflows/promote-stage.yml +++ b/.github/workflows/promote-stage.yml @@ -6,7 +6,9 @@ on: jobs: build-images: - runs-on: self-hosted + # We currently have two self-hosted runners, one of which is marked "DEV-CI" and other one is marder "MAIN". + # "MAIN" is the current CI runner, "DEV-CI" is currently used to experiment with CI optimizing. + runs-on: [self-hosted, MAIN] steps: - uses: actions/checkout@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index ac2beeed51..28c00ee8f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,15 +2,16 @@ ### Contracts v4 and protocol (24.11.2020) -- Timestamp is added to the block commitment. Rollup block timestamp validity is checked when block is committed onchain. -- Offchain ChangePubKey can be performed for smart contract wallets that can be deployed with CREATE2 when pubkey hash is encoded in the CREATE2 salt parameter. +- Timestamp is added to the block commitment. Rollup block timestamp validity is checked when block is committed + onchain. +- Offchain ChangePubKey can be performed for smart contract wallets that can be deployed with CREATE2 when pubkey hash + is encoded in the CREATE2 salt parameter. - Governance contract can pause token deposits. - ChangePubKey message signature is changed. - Onchain operation processing changed on the contract. - Recursive block verifier added. - Onchain rollup block commitment changed, multiple blocks can be committed, verified at once. - ### Contracts v3 and protocol (4.09.2020) - Change pubkey operation requires fee for processing. diff --git a/Cargo.lock b/Cargo.lock index 5466e1d801..60e295bfdc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5918,11 +5918,13 @@ dependencies = [ "actix-rt", "actix-web", "anyhow", + "async-trait", "chrono", "ctrlc", "env_logger 0.6.2", "ethabi 12.0.0", "futures 0.3.8", + "itertools 0.9.0", "log 0.4.11", "metrics", "num", @@ -5966,6 +5968,7 @@ name = "zksync_data_restore" version = "1.0.0" dependencies = [ "anyhow", + "async-trait", "chrono", "db_test_macro", "env_logger 0.6.2", @@ -6217,6 +6220,7 @@ dependencies = [ "zksync_contracts", "zksync_core", "zksync_crypto", + "zksync_data_restore", "zksync_eth_client", "zksync_eth_signer", "zksync_prover", @@ -6235,6 +6239,7 @@ dependencies = [ "criterion", "ethabi 12.0.0", "hex", + "lazy_static", "log 0.4.11", "num", "parity-crypto", @@ -6242,6 +6247,7 @@ dependencies = [ "serde", "serde_json", "tiny-keccak 1.5.0", + "web3 0.13.0", "zksync_basic_types", "zksync_crypto", "zksync_utils", @@ -6256,6 +6262,7 @@ dependencies = [ "futures 0.3.8", "num", "serde", + "serde_json", ] [[package]] diff --git a/contracts/contracts/ZkSync.sol b/contracts/contracts/ZkSync.sol index dcaf2ee41e..df4dc59d86 100644 --- a/contracts/contracts/ZkSync.sol +++ b/contracts/contracts/ZkSync.sol @@ -444,7 +444,10 @@ contract ZkSync is UpgradeableMaster, Storage, Config, Events, ReentrancyGuard { ++currentTotalBlocksProofed; uint256 mask = (~uint256(0)) >> 3; - require(_proof.commitments[_commitmentIdxs[i]] & mask == uint256(_committedBlocks[i].commitment) & mask, "pbl3"); // incorrect block commitment in proof + require( + _proof.commitments[_commitmentIdxs[i]] & mask == uint256(_committedBlocks[i].commitment) & mask, + "pbl3" + ); // incorrect block commitment in proof } bool success = diff --git a/contracts/hardhat.config.ts b/contracts/hardhat.config.ts index 13eb825702..a403c8a38c 100644 --- a/contracts/hardhat.config.ts +++ b/contracts/hardhat.config.ts @@ -2,26 +2,26 @@ import '@nomiclabs/hardhat-waffle'; import '@nomiclabs/hardhat-solpp'; import 'hardhat-typechain'; import 'hardhat-contract-sizer'; -import "@nomiclabs/hardhat-etherscan"; +import '@nomiclabs/hardhat-etherscan'; const prodConfig = { // UPGRADE_NOTICE_PERIOD: 0, MAX_AMOUNT_OF_REGISTERED_TOKENS: 127, // PRIORITY_EXPIRATION: 101, DUMMY_VERIFIER: false -} +}; const testnetConfig = { UPGRADE_NOTICE_PERIOD: 0, MAX_AMOUNT_OF_REGISTERED_TOKENS: 127, // PRIORITY_EXPIRATION: 101, DUMMY_VERIFIER: false -} +}; const testConfig = { UPGRADE_NOTICE_PERIOD: 0, MAX_AMOUNT_OF_REGISTERED_TOKENS: 5, PRIORITY_EXPIRATION: 101, DUMMY_VERIFIER: true -} +}; const localConfig = Object.assign({}, prodConfig); localConfig.DUMMY_VERIFIER = process.env.DUMMY_VERIFIER ? true : localConfig.DUMMY_VERIFIER; @@ -30,8 +30,8 @@ const contractDefs = { rinkeby: testnetConfig, ropsten: testnetConfig, mainnet: prodConfig, - test: testConfig, - localhost: localConfig, + test: testConfig, + localhost: localConfig }; export default { @@ -51,10 +51,10 @@ export default { sources: './contracts' }, solpp: { - defs: process.env.ETH_NETWORK ? contractDefs[process.env.ETH_NETWORK] : contractDefs["test"], + defs: process.env.ETH_NETWORK ? contractDefs[process.env.ETH_NETWORK] : contractDefs['test'] }, networks: { - env : { + env: { url: process.env.WEB3_URL } }, diff --git a/contracts/scripts/add-erc20-token.ts b/contracts/scripts/add-erc20-token.ts index c9d3db96ca..a2c032a43c 100644 --- a/contracts/scripts/add-erc20-token.ts +++ b/contracts/scripts/add-erc20-token.ts @@ -1,8 +1,12 @@ import { ArgumentParser } from 'argparse'; import { BigNumber, Wallet, ethers } from 'ethers'; import { Deployer } from '../src.ts/deploy'; +import * as fs from 'fs'; +import * as path from 'path'; const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); async function main() { const parser = new ArgumentParser({ @@ -17,9 +21,10 @@ async function main() { const args = parser.parseArgs(process.argv.slice(2)); const deployer = new Deployer({ deployWallet: ethers.Wallet.createRandom() }); + const governorWallet = args.deployerPrivateKey ? new Wallet(args.deployerPrivateKey, provider) - : Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); + : Wallet.fromMnemonic(ethTestConfig.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); console.log('Adding new ERC20 token to network: ', args.tokenAddress); diff --git a/contracts/scripts/deploy-eip1271.ts b/contracts/scripts/deploy-eip1271.ts index ea584de97d..6366eee7e1 100644 --- a/contracts/scripts/deploy-eip1271.ts +++ b/contracts/scripts/deploy-eip1271.ts @@ -8,6 +8,10 @@ import { deployContract } from 'ethereum-waffle'; import * as fs from 'fs'; import * as path from 'path'; +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const EIP1271TestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eip1271.json`, { encoding: 'utf-8' })); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + (async () => { try { if (!['test', 'localhost'].includes(process.env.ETH_NETWORK)) { @@ -15,19 +19,16 @@ import * as path from 'path'; process.exit(1); } - const testConfigPath = path.join(process.env.ZKSYNC_HOME, `etc/test_config/constant/eip1271.json`); - const testConfig = JSON.parse(fs.readFileSync(testConfigPath, { encoding: 'utf-8' })); - const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); provider.pollingInterval = 10; - const deployWallet = ethers.Wallet.fromMnemonic(process.env.TEST_MNEMONIC, "m/44'/60'/0'/0/0").connect( + const deployWallet = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic, "m/44'/60'/0'/0/0").connect( provider ); const smartWallet = await deployContract( deployWallet, - readContractCode('AccountMock'), - [testConfig.owner_address], + readContractCode('dev-contracts/AccountMock'), + [EIP1271TestConfig.owner_address], { gasLimit: 5000000 } diff --git a/contracts/scripts/deploy-erc20.ts b/contracts/scripts/deploy-erc20.ts index 22555e3d4b..f1bb3cb561 100644 --- a/contracts/scripts/deploy-erc20.ts +++ b/contracts/scripts/deploy-erc20.ts @@ -3,9 +3,14 @@ import { deployContract } from 'ethereum-waffle'; import { ethers, Wallet } from 'ethers'; import { readContractCode } from '../src.ts/deploy'; import { parseEther } from 'ethers/lib/utils'; +import * as fs from 'fs'; +import * as path from 'path'; + +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); -const wallet = Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); +const wallet = Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect(provider); type Token = { address: string | null; @@ -24,7 +29,9 @@ async function deployToken(token: Token): Promise { await erc20.mint(wallet.address, parseEther('3000000000')); for (let i = 0; i < 10; ++i) { - const testWallet = Wallet.fromMnemonic(process.env.TEST_MNEMONIC, "m/44'/60'/0'/0/" + i).connect(provider); + const testWallet = Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/" + i).connect( + provider + ); await erc20.mint(testWallet.address, parseEther('3000000000')); } token.address = erc20.address; diff --git a/contracts/scripts/deploy-testkit.ts b/contracts/scripts/deploy-testkit.ts index 3a70d5e163..8800bb5594 100644 --- a/contracts/scripts/deploy-testkit.ts +++ b/contracts/scripts/deploy-testkit.ts @@ -2,6 +2,11 @@ import { ethers, Wallet } from 'ethers'; import { Deployer, readContractCode, readTestContracts, readProductionContracts } from '../src.ts/deploy'; import { deployContract } from 'ethereum-waffle'; import { ArgumentParser } from 'argparse'; +import * as fs from 'fs'; +import * as path from 'path'; + +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); (async () => { const parser = new ArgumentParser({ @@ -26,7 +31,7 @@ import { ArgumentParser } from 'argparse'; const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); provider.pollingInterval = 10; - const deployWallet = ethers.Wallet.fromMnemonic(process.env.TEST_MNEMONIC, "m/44'/60'/0'/0/0").connect(provider); + const deployWallet = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic, "m/44'/60'/0'/0/0").connect(provider); // todo: should be decided when building const contracts = readProductionContracts(); const deployer = new Deployer({ deployWallet, contracts, verbose: true }); @@ -48,7 +53,7 @@ import { ArgumentParser } from 'argparse'; } for (let i = 0; i < 10; ++i) { - const testWallet = Wallet.fromMnemonic(process.env.TEST_MNEMONIC, "m/44'/60'/0'/0/" + i).connect(provider); + const testWallet = Wallet.fromMnemonic(ethTestConfig.test_mnemonic, "m/44'/60'/0'/0/" + i).connect(provider); await (await erc20.mint(testWallet.address, '0x4B3B4CA85A86C47A098A224000000000')).wait(); } })(); diff --git a/contracts/scripts/deploy-testnet-token.ts b/contracts/scripts/deploy-testnet-token.ts index 987f2bf4c9..518c7522e3 100644 --- a/contracts/scripts/deploy-testnet-token.ts +++ b/contracts/scripts/deploy-testnet-token.ts @@ -3,10 +3,14 @@ import { ethers, Wallet } from 'ethers'; import { readContractCode } from '../src.ts/deploy'; import { encodeConstructorArgs, publishSourceCodeToEtherscan } from '../src.ts/publish-utils'; import * as fs from 'fs'; +import * as path from 'path'; import { ArgumentParser } from 'argparse'; const mainnetTokens = require(`${process.env.ZKSYNC_HOME}/etc/tokens/mainnet`); +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + (async () => { const parser = new ArgumentParser({ version: '0.1.0', @@ -18,10 +22,14 @@ const mainnetTokens = require(`${process.env.ZKSYNC_HOME}/etc/tokens/mainnet`); action: 'storeTrue', help: 'Only publish code for deployed tokens' }); + parser.addArgument('--deployerPrivateKey', { required: false, help: 'Wallet used to deploy contracts' }); const args = parser.parseArgs(process.argv.slice(2)); const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); - const wallet = Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); + const wallet = args.deployerPrivateKey + ? new Wallet(args.deployerPrivateKey, provider) + : Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect(provider); + const contractCode = readContractCode('TestnetERC20Token'); if (process.env.ETH_NETWORK === 'mainnet') { diff --git a/contracts/scripts/deploy.ts b/contracts/scripts/deploy.ts index 45ff4b164b..9a8ba065ae 100644 --- a/contracts/scripts/deploy.ts +++ b/contracts/scripts/deploy.ts @@ -2,8 +2,12 @@ import { ArgumentParser } from 'argparse'; import { ethers, Wallet } from 'ethers'; import { Deployer } from '../src.ts/deploy'; import { formatUnits, parseUnits } from 'ethers/lib/utils'; +import * as fs from 'fs'; +import * as path from 'path'; const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); (async () => { const parser = new ArgumentParser({ @@ -25,7 +29,7 @@ const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); const wallet = args.deployerPrivateKey ? new Wallet(args.deployerPrivateKey, provider) - : Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); + : Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect(provider); const gasPrice = args.gasPrice ? parseUnits(args.gasPrice, 'gwei') : await provider.getGasPrice(); console.log(`Using gas price: ${formatUnits(gasPrice, 'gwei')} gwei`); @@ -55,8 +59,8 @@ const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); if (args.contract === 'Governance' || args.contract == null) { await deployer.deployGovernanceTarget({ gasPrice, nonce: args.nonce }); } - // - // if (args.contract === 'Proxies' || args.contract == null) { - // await deployer.deployProxiesAndGatekeeper({ gasPrice, nonce: args.nonce }); - // } + + if (args.contract === 'Proxies' || args.contract == null) { + await deployer.deployProxiesAndGatekeeper({ gasPrice, nonce: args.nonce }); + } })(); diff --git a/contracts/scripts/init-faucet-account.ts b/contracts/scripts/init-faucet-account.ts index f4e773b0fc..a629dea18a 100644 --- a/contracts/scripts/init-faucet-account.ts +++ b/contracts/scripts/init-faucet-account.ts @@ -1,14 +1,33 @@ -import { ethers } from 'ethers'; +import { ArgumentParser } from 'argparse'; +import * as fs from 'fs'; +import * as path from 'path'; import * as zksync from 'zksync'; +import { ethers } from 'ethers'; const DEPOSIT_AMOUNT = ethers.utils.parseEther('10000000000'); +const network = process.env.ETH_NETWORK; const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); -const deployerEthWallet = ethers.Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); -const faucetEthWallet = ethers.Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/2").connect(provider); +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); async function main() { - const syncProvider = await zksync.Provider.newHttpProvider(process.env.HTTP_RPC_API_ADDR); + const parser = new ArgumentParser({ + version: '0.1.0', + addHelp: true + }); + parser.addArgument('--deployerPrivateKey', { required: false, help: 'Wallet used to deploy contracts' }); + parser.addArgument('--faucetPrivateKey', { required: false, help: 'Wallet used as faucet' }); + const args = parser.parseArgs(process.argv.slice(2)); + + const deployerEthWallet = args.deployerPrivateKey + ? new ethers.Wallet(args.deployerPrivateKey, provider) + : ethers.Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect(provider); + const faucetEthWallet = args.faucetPrivateKey + ? new ethers.Wallet(args.faucetPrivateKey, provider) + : ethers.Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/2").connect(provider); + + const syncProvider = await zksync.getDefaultProvider(network as zksync.types.Network); const deployerWallet = await zksync.Wallet.fromEthSigner(deployerEthWallet, syncProvider); const faucetWallet = await zksync.Wallet.fromEthSigner(faucetEthWallet, syncProvider); diff --git a/contracts/scripts/publish.ts b/contracts/scripts/publish.ts index f4b0345f31..8983a5efb0 100644 --- a/contracts/scripts/publish.ts +++ b/contracts/scripts/publish.ts @@ -1,25 +1,25 @@ -import {deployedAddressesFromEnv} from "../src.ts/deploy"; +import { deployedAddressesFromEnv } from '../src.ts/deploy'; -const hre = require("hardhat"); +const hre = require('hardhat'); async function main() { if (process.env.ETH_NETWORK == 'localhost') { - console.log("Skip contract publish on localhost"); + console.log('Skip contract publish on localhost'); return; } const addresses = deployedAddressesFromEnv(); for (const address of [addresses.ZkSyncTarget, addresses.VerifierTarget, addresses.GovernanceTarget]) { try { - await hre.run('verify', {address}); + await hre.run('verify', { address }); } catch (e) { - console.error(e) + console.error(e); } } } main() .then(() => process.exit(0)) - .catch(error => { + .catch((error) => { console.error(error); process.exit(1); }); diff --git a/contracts/scripts/revert-reason.ts b/contracts/scripts/revert-reason.ts index 5928c26291..8a37748c44 100644 --- a/contracts/scripts/revert-reason.ts +++ b/contracts/scripts/revert-reason.ts @@ -33,7 +33,8 @@ async function reason() { try { const parsedTransaction = franklinInterface.parseTransaction({ data: tx.data }); if (parsedTransaction) { - console.log('parsed tx: ', parsedTransaction.name, JSON.stringify(parsedTransaction.args, null, 2)); + console.log('parsed tx: ', parsedTransaction.name, parsedTransaction); + console.log('tx args: ', parsedTransaction.name, JSON.stringify(parsedTransaction.args, null, 2)); } else { console.log('tx:', tx); } @@ -43,8 +44,8 @@ async function reason() { const transaction = await provider.getTransaction(hash); const receipt = await provider.getTransactionReceipt(hash); - // console.log('receipt:', receipt); - // console.log('\n \n '); + console.log('receipt:', receipt); + console.log('\n \n '); if (receipt.gasUsed) { const gasLimit = transaction.gasLimit; diff --git a/contracts/scripts/test-upgrade-franklin.ts b/contracts/scripts/test-upgrade-franklin.ts index 0904b4255d..0b0b253f76 100644 --- a/contracts/scripts/test-upgrade-franklin.ts +++ b/contracts/scripts/test-upgrade-franklin.ts @@ -1,12 +1,16 @@ import { ArgumentParser } from 'argparse'; import { deployContract } from 'ethereum-waffle'; import { constants, ethers } from 'ethers'; +import { readTestContracts } from '../src.ts/deploy'; +import * as fs from 'fs'; +import * as path from 'path'; import { readProductionContracts, readTestContracts } from '../src.ts/deploy'; const { expect } = require('chai'); export const FranklinTestUpgradeTargetContractCode = require(`../build/ZkSyncTestUpgradeTarget`); - +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); const testContracts = readProductionContracts(); async function main() { @@ -26,7 +30,7 @@ async function main() { const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); - const wallet = ethers.Wallet.fromMnemonic(process.env.TEST_MNEMONIC, "m/44'/60'/0'/0/0").connect(provider); + const wallet = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic, "m/44'/60'/0'/0/0").connect(provider); const proxyContract = new ethers.Contract(args.contractAddress, testContracts.proxy.abi, wallet); diff --git a/contracts/scripts/upgrade-testnet.ts b/contracts/scripts/upgrade-testnet.ts index a8b62aa8fe..46f35a4aca 100644 --- a/contracts/scripts/upgrade-testnet.ts +++ b/contracts/scripts/upgrade-testnet.ts @@ -2,8 +2,12 @@ import { ArgumentParser } from 'argparse'; import { ethers, Wallet } from 'ethers'; import { Deployer } from '../src.ts/deploy'; import { formatUnits, parseUnits } from 'ethers/lib/utils'; +import * as fs from 'fs'; +import * as path from 'path'; const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); (async () => { const parser = new ArgumentParser({ @@ -36,7 +40,7 @@ const provider = new ethers.providers.JsonRpcProvider(process.env.WEB3_URL); const wallet = args.deployerPrivateKey ? new Wallet(args.deployerPrivateKey, provider) - : Wallet.fromMnemonic(process.env.MNEMONIC, "m/44'/60'/0'/0/1").connect(provider); + : Wallet.fromMnemonic(ethTestConfig.mnemonic, "m/44'/60'/0'/0/1").connect(provider); const gasPrice = args.gasPrice ? parseUnits(args.gasPrice, 'gwei') : await provider.getGasPrice(); console.info(`Using gas price: ${formatUnits(gasPrice, 'gwei')} gwei`); diff --git a/core/bin/data_restore/Cargo.toml b/core/bin/data_restore/Cargo.toml index f8a440d4a6..cc8c9cad0b 100644 --- a/core/bin/data_restore/Cargo.toml +++ b/core/bin/data_restore/Cargo.toml @@ -27,6 +27,7 @@ anyhow = "1.0" structopt = "0.3.20" chrono = { version = "0.4", features = ["serde", "rustc-serialize"] } tokio = { version = "0.2", features = ["full"] } +async-trait = "0.1" zksync_state = { path = "../../lib/state", version = "1.0" } zksync_types = { path = "../../lib/types", version = "1.0" } diff --git a/core/bin/data_restore/src/data_restore_driver.rs b/core/bin/data_restore/src/data_restore_driver.rs index df533145f3..e65b43db44 100644 --- a/core/bin/data_restore/src/data_restore_driver.rs +++ b/core/bin/data_restore/src/data_restore_driver.rs @@ -7,23 +7,24 @@ use web3::{ // Workspace deps use zksync_contracts::{governance_contract, zksync_contract}; use zksync_crypto::Fr; -use zksync_storage::StorageProcessor; + use zksync_types::{AccountMap, AccountUpdate}; // Local deps +use crate::storage_interactor::StorageInteractor; use crate::{ contract_functions::{get_genesis_account, get_total_verified_blocks}, eth_tx_helpers::get_ethereum_transaction, events_state::EventsState, rollup_ops::RollupOpsBlock, - storage_interactor, tree_state::TreeState, }; +use serde::export::PhantomData; /// Storage state update: /// - None - The state is updated completely last time - start from fetching the new events /// - Events - The events fetched and saved successfully - now get operations from them and update tree /// - Operations - There are operations that are not presented in the tree state - update tree state -#[derive(Debug)] +#[derive(Debug, Copy, Clone)] pub enum StorageUpdateState { None, Events, @@ -44,7 +45,7 @@ pub enum StorageUpdateState { /// - Operations /// - Tree /// - Storage -pub struct DataRestoreDriver { +pub struct DataRestoreDriver { /// Web3 provider endpoint pub web3: Web3, /// Provides Ethereum Governance contract unterface @@ -68,14 +69,18 @@ pub struct DataRestoreDriver { /// Expected root hash to be observed after restoring process. Only /// available in finite mode, and intended for tests. pub final_hash: Option, + phantom_data: PhantomData, } -impl DataRestoreDriver { +impl DataRestoreDriver +where + T: Transport, + I: StorageInteractor, +{ /// Returns new data restore driver with empty events and tree states. /// /// # Arguments /// - /// * `connection_pool` - Database connection pool /// * `web3_transport` - Web3 provider transport /// * `governance_contract_eth_addr` - Governance contract address /// * `zksync_contract_eth_addr` - Rollup contract address @@ -126,6 +131,7 @@ impl DataRestoreDriver { available_block_chunk_sizes, finite_mode, final_hash, + phantom_data: Default::default(), } } @@ -137,11 +143,7 @@ impl DataRestoreDriver { /// /// * `governance_contract_genesis_tx_hash` - Governance contract creation tx hash /// - pub async fn set_genesis_state( - &mut self, - storage: &mut StorageProcessor<'_>, - genesis_tx_hash: H256, - ) { + pub async fn set_genesis_state(&mut self, interactor: &mut I, genesis_tx_hash: H256) { let genesis_transaction = get_ethereum_transaction(&self.web3, &genesis_tx_hash) .await .expect("Cant get zkSync genesis transaction"); @@ -153,7 +155,9 @@ impl DataRestoreDriver { .expect("Cant set genesis block number for events state"); log::info!("genesis_eth_block_number: {:?}", &genesis_eth_block_number); - storage_interactor::save_events_state(storage, &[], &[], genesis_eth_block_number).await; + interactor + .save_events_state(&[], &[], genesis_eth_block_number) + .await; let genesis_fee_account = get_genesis_account(&genesis_transaction).expect("Cant get genesis account address"); @@ -186,7 +190,7 @@ impl DataRestoreDriver { log::info!("Genesis tree root hash: {:?}", tree_state.root_hash()); log::debug!("Genesis accounts: {:?}", tree_state.get_accounts()); - storage_interactor::save_genesis_tree_state(storage, account_update).await; + interactor.save_genesis_tree_state(account_update).await; log::info!("Saved genesis tree state\n"); @@ -194,30 +198,30 @@ impl DataRestoreDriver { } /// Stops states from storage - pub async fn load_state_from_storage(&mut self, storage: &mut StorageProcessor<'_>) -> bool { + pub async fn load_state_from_storage(&mut self, interactor: &mut I) -> bool { log::info!("Loading state from storage"); - let state = storage_interactor::get_storage_state(storage).await; - self.events_state = storage_interactor::get_block_events_state_from_storage(storage).await; - let tree_state = storage_interactor::get_tree_state(storage).await; + let state = interactor.get_storage_state().await; + self.events_state = interactor.get_block_events_state_from_storage().await; + let tree_state = interactor.get_tree_state().await; self.tree_state = TreeState::load( - tree_state.0, // current block - tree_state.1, // account map - tree_state.2, // unprocessed priority op - tree_state.3, // fee account + tree_state.last_block_number, // current block + tree_state.account_map, // account map + tree_state.unprocessed_prior_ops, // unprocessed priority op + tree_state.fee_acc_id, // fee account self.available_block_chunk_sizes.clone(), ); match state { StorageUpdateState::Events => { // Update operations - let new_ops_blocks = self.update_operations_state(storage).await; + let new_ops_blocks = self.update_operations_state(interactor).await; // Update tree - self.update_tree_state(storage, new_ops_blocks).await; + self.update_tree_state(interactor, new_ops_blocks).await; } StorageUpdateState::Operations => { // Update operations - let new_ops_blocks = storage_interactor::get_ops_blocks_from_storage(storage).await; + let new_ops_blocks = interactor.get_ops_blocks_from_storage().await; // Update tree - self.update_tree_state(storage, new_ops_blocks).await; + self.update_tree_state(interactor, new_ops_blocks).await; } StorageUpdateState::None => {} } @@ -234,20 +238,20 @@ impl DataRestoreDriver { } /// Activates states updates - pub async fn run_state_update(&mut self, storage: &mut StorageProcessor<'_>) { - let mut last_wached_block: u64 = self.events_state.last_watched_eth_block_number; + pub async fn run_state_update(&mut self, interactor: &mut I) { + let mut last_watched_block: u64 = self.events_state.last_watched_eth_block_number; let mut final_hash_was_found = false; loop { - log::debug!("Last watched ethereum block: {:?}", last_wached_block); + log::debug!("Last watched ethereum block: {:?}", last_watched_block); // Update events - if self.update_events_state(storage).await { + if self.update_events_state(interactor).await { // Update operations - let new_ops_blocks = self.update_operations_state(storage).await; + let new_ops_blocks = self.update_operations_state(interactor).await; if !new_ops_blocks.is_empty() { // Update tree - self.update_tree_state(storage, new_ops_blocks).await; + self.update_tree_state(interactor, new_ops_blocks).await; let total_verified_blocks = get_total_verified_blocks(&self.zksync_contract).await; @@ -255,7 +259,7 @@ impl DataRestoreDriver { // We must update the Ethereum stats table to match the actual stored state // to keep the `state_keeper` consistent with the `eth_sender`. - storage_interactor::update_eth_stats(storage).await; + interactor.update_eth_state().await; log::info!( "State updated\nProcessed {:?} blocks of total {:?} verified on contract\nRoot hash: {:?}\n", @@ -292,17 +296,17 @@ impl DataRestoreDriver { } } - if last_wached_block == self.events_state.last_watched_eth_block_number { + if last_watched_block == self.events_state.last_watched_eth_block_number { std::thread::sleep(std::time::Duration::from_secs(5)); } else { - last_wached_block = self.events_state.last_watched_eth_block_number; + last_watched_block = self.events_state.last_watched_eth_block_number; } } } /// Updates events state, saves new blocks, tokens events and the last watched eth block number in storage /// Returns bool flag, true if there are new block events - async fn update_events_state(&mut self, storage: &mut StorageProcessor<'_>) -> bool { + async fn update_events_state(&mut self, interactor: &mut I) -> bool { let (block_events, token_events, last_watched_eth_block_number) = self .events_state .update_events_state( @@ -315,13 +319,13 @@ impl DataRestoreDriver { .await .expect("Updating events state: cant update events state"); - storage_interactor::save_events_state( - storage, - &block_events, - token_events.as_slice(), - last_watched_eth_block_number, - ) - .await; + interactor + .save_events_state( + &block_events, + token_events.as_slice(), + last_watched_eth_block_number, + ) + .await; log::debug!("Updated events storage"); @@ -334,11 +338,7 @@ impl DataRestoreDriver { /// /// * `new_ops_blocks` - the new Rollup operations blocks /// - async fn update_tree_state( - &mut self, - storage: &mut StorageProcessor<'_>, - new_ops_blocks: Vec, - ) { + async fn update_tree_state(&mut self, interactor: &mut I, new_ops_blocks: Vec) { let mut blocks = vec![]; let mut updates = vec![]; let mut count = 0; @@ -352,7 +352,8 @@ impl DataRestoreDriver { count += 1; } for i in 0..count { - storage_interactor::update_tree_state(storage, blocks[i].clone(), updates[i].clone()) + interactor + .update_tree_state(blocks[i].clone(), updates[i].clone()) .await; } @@ -361,13 +362,10 @@ impl DataRestoreDriver { /// Gets new operations blocks from events, updates rollup operations stored state. /// Returns new rollup operations blocks - async fn update_operations_state( - &mut self, - storage: &mut StorageProcessor<'_>, - ) -> Vec { + async fn update_operations_state(&mut self, interactor: &mut I) -> Vec { let new_blocks = self.get_new_operation_blocks_from_events().await; - storage_interactor::save_rollup_ops(storage, &new_blocks).await; + interactor.save_rollup_ops(&new_blocks).await; log::debug!("Updated operations storage"); diff --git a/core/bin/data_restore/src/database_storage_interactor.rs b/core/bin/data_restore/src/database_storage_interactor.rs new file mode 100644 index 0000000000..8087cf6066 --- /dev/null +++ b/core/bin/data_restore/src/database_storage_interactor.rs @@ -0,0 +1,287 @@ +// Built-in deps +use std::str::FromStr; +// Workspace deps +use zksync_storage::{data_restore::records::NewBlockEvent, StorageProcessor}; +use zksync_types::{ + Action, Operation, Token, TokenGenesisListItem, TokenId, + {block::Block, AccountUpdate, AccountUpdates, ZkSyncOp}, +}; + +// Local deps +use crate::storage_interactor::StoredTreeState; +use crate::{ + data_restore_driver::StorageUpdateState, + events::BlockEvent, + events_state::{EventsState, NewTokenEvent}, + rollup_ops::RollupOpsBlock, + storage_interactor::{ + block_event_into_stored_block_event, stored_block_event_into_block_event, + stored_ops_block_into_ops_block, StorageInteractor, + }, +}; + +impl From<&NewTokenEvent> for zksync_storage::data_restore::records::NewTokenEvent { + fn from(event: &NewTokenEvent) -> Self { + Self { + address: event.address, + id: event.id, + } + } +} + +pub struct DatabaseStorageInteractor<'a> { + storage: StorageProcessor<'a>, +} + +impl<'a> DatabaseStorageInteractor<'a> { + pub fn new(storage: StorageProcessor<'a>) -> Self { + Self { storage } + } + + pub fn storage(&mut self) -> &mut StorageProcessor<'a> { + &mut self.storage + } + + /// Returns last watched ethereum block number from storage + pub async fn get_last_watched_block_number_from_storage(&mut self) -> u64 { + let last_watched_block_number_string = self + .storage + .data_restore_schema() + .load_last_watched_block_number() + .await + .expect("Cant load last watched block number") + .block_number; + + u64::from_str(last_watched_block_number_string.as_str()) + .expect("Сant make u256 block_number in get_last_watched_block_number_from_storage") + } +} + +#[async_trait::async_trait] +impl StorageInteractor for DatabaseStorageInteractor<'_> { + async fn save_rollup_ops(&mut self, blocks: &[RollupOpsBlock]) { + let mut ops: Vec<(u32, &ZkSyncOp, u32)> = vec![]; + + for block in blocks { + for op in &block.ops { + ops.push((block.block_num, op, block.fee_account)); + } + } + + self.storage + .data_restore_schema() + .save_rollup_ops(ops.as_slice()) + .await + .expect("Cant update rollup operations"); + } + + async fn update_tree_state(&mut self, block: Block, accounts_updated: AccountUpdates) { + let mut transaction = self + .storage + .start_transaction() + .await + .expect("Failed initializing a DB transaction"); + + let commit_op = Operation { + action: Action::Commit, + block: block.clone(), + id: None, + }; + + let verify_op = Operation { + action: Action::Verify { + proof: Box::new(Default::default()), + }, + block: block.clone(), + id: None, + }; + + transaction + .chain() + .state_schema() + .commit_state_update(block.block_number, &accounts_updated, 0) + .await + .expect("Cant execute verify operation"); + + transaction + .data_restore_schema() + .save_block_operations(commit_op, verify_op) + .await + .expect("Cant execute verify operation"); + + transaction + .commit() + .await + .expect("Unable to commit DB transaction"); + } + + async fn store_token(&mut self, token: TokenGenesisListItem, token_id: TokenId) { + self.storage + .tokens_schema() + .store_token(Token { + id: token_id, + symbol: token.symbol, + address: token.address[2..] + .parse() + .expect("failed to parse token address"), + decimals: token.decimals, + }) + .await + .expect("failed to store token"); + } + + async fn save_events_state( + &mut self, + block_events: &[BlockEvent], + tokens: &[NewTokenEvent], + last_watched_eth_block_number: u64, + ) { + let mut new_events: Vec = vec![]; + for event in block_events { + new_events.push(block_event_into_stored_block_event(event)); + } + + let block_number = last_watched_eth_block_number.to_string(); + + let tokens: Vec<_> = tokens.iter().map(From::from).collect(); + self.storage + .data_restore_schema() + .save_events_state(new_events.as_slice(), &tokens, &block_number) + .await + .expect("Cant update events state"); + } + + async fn save_genesis_tree_state(&mut self, genesis_acc_update: AccountUpdate) { + let (_last_committed, mut _accounts) = self + .storage + .chain() + .state_schema() + .load_committed_state(None) + .await + .expect("Cant load comitted state"); + assert!( + _last_committed == 0 && _accounts.is_empty(), + "db should be empty" + ); + self.storage + .data_restore_schema() + .save_genesis_state(genesis_acc_update) + .await + .expect("Cant update genesis state"); + } + + async fn get_block_events_state_from_storage(&mut self) -> EventsState { + let last_watched_eth_block_number = self.get_last_watched_block_number_from_storage().await; + + let committed = self + .storage + .data_restore_schema() + .load_committed_events_state() + .await + .expect("Cant load committed state"); + + let mut committed_events: Vec = vec![]; + for event in committed { + let block_event = stored_block_event_into_block_event(event.clone()); + committed_events.push(block_event); + } + + let verified = self + .storage + .data_restore_schema() + .load_verified_events_state() + .await + .expect("Cant load verified state"); + let mut verified_events: Vec = vec![]; + for event in verified { + let block_event = stored_block_event_into_block_event(event.clone()); + verified_events.push(block_event); + } + + EventsState { + committed_events, + verified_events, + last_watched_eth_block_number, + } + } + + async fn get_tree_state(&mut self) -> StoredTreeState { + let (last_block, account_map) = self + .storage + .chain() + .state_schema() + .load_verified_state() + .await + .expect("There are no last verified state in storage"); + + let block = self + .storage + .chain() + .block_schema() + .get_block(last_block) + .await + .expect("Cant get the last block from storage") + .expect("There are no last block in storage - restart driver"); + let (unprocessed_prior_ops, fee_acc_id) = + (block.processed_priority_ops.1, block.fee_account); + + StoredTreeState { + last_block_number: last_block, + account_map, + unprocessed_prior_ops, + fee_acc_id, + } + } + + async fn get_ops_blocks_from_storage(&mut self) -> Vec { + self.storage + .data_restore_schema() + .load_rollup_ops_blocks() + .await + .expect("Cant load operation blocks") + .iter() + .map(|block| stored_ops_block_into_ops_block(&block)) + .collect() + } + + async fn update_eth_state(&mut self) { + let last_committed_block = self + .storage + .chain() + .block_schema() + .get_last_committed_block() + .await + .expect("Can't get the last committed block"); + + let last_verified_block = self + .storage + .chain() + .block_schema() + .get_last_verified_block() + .await + .expect("Can't get the last verified block"); + + self.storage + .data_restore_schema() + .initialize_eth_stats(last_committed_block, last_verified_block) + .await + .expect("Can't update the eth_stats table") + } + + async fn get_storage_state(&mut self) -> StorageUpdateState { + let storage_state_string = self + .storage + .data_restore_schema() + .load_storage_state() + .await + .expect("Cant load storage state") + .storage_state; + + match storage_state_string.as_ref() { + "Events" => StorageUpdateState::Events, + "Operations" => StorageUpdateState::Operations, + "None" => StorageUpdateState::None, + _ => panic!("Unknown storage state"), + } + } +} diff --git a/core/bin/data_restore/src/events_state.rs b/core/bin/data_restore/src/events_state.rs index 15c1955018..502f931569 100644 --- a/core/bin/data_restore/src/events_state.rs +++ b/core/bin/data_restore/src/events_state.rs @@ -269,6 +269,7 @@ impl EventsState { .logs(filter) .await .map_err(|e| format_err!("No new logs: {}", e))?; + Ok(result) } diff --git a/core/bin/data_restore/src/inmemory_storage_interactor.rs b/core/bin/data_restore/src/inmemory_storage_interactor.rs new file mode 100644 index 0000000000..0bbe049344 --- /dev/null +++ b/core/bin/data_restore/src/inmemory_storage_interactor.rs @@ -0,0 +1,243 @@ +use std::cmp::max; +use std::collections::HashMap; + +use web3::types::Address; + +use zksync_types::block::Block; +use zksync_types::{ + Account, AccountId, AccountMap, AccountUpdate, AccountUpdates, Action, Operation, Token, + TokenGenesisListItem, +}; + +use crate::{ + data_restore_driver::StorageUpdateState, + events::{BlockEvent, EventType}, + events_state::{EventsState, NewTokenEvent}, + rollup_ops::RollupOpsBlock, + storage_interactor::StorageInteractor, + storage_interactor::StoredTreeState, +}; + +pub struct InMemoryStorageInteractor { + rollups: Vec, + storage_state: StorageUpdateState, + tokens: HashMap, + events_state: Vec, + last_watched_block: u64, + last_committed_block: u32, + last_verified_block: u32, + accounts: AccountMap, +} + +impl Default for InMemoryStorageInteractor { + fn default() -> Self { + Self::new() + } +} + +#[async_trait::async_trait] +impl StorageInteractor for InMemoryStorageInteractor { + async fn save_rollup_ops(&mut self, blocks: &[RollupOpsBlock]) { + self.rollups = blocks.to_vec(); + self.storage_state = StorageUpdateState::Operations + } + + async fn update_tree_state(&mut self, block: Block, accounts_updated: AccountUpdates) { + let commit_op = Operation { + action: Action::Commit, + block: block.clone(), + id: None, + }; + + let verify_op = Operation { + action: Action::Verify { + proof: Box::new(Default::default()), + }, + block: block.clone(), + id: None, + }; + + self.last_committed_block = commit_op.block.block_number; + self.last_verified_block = verify_op.block.block_number; + + self.commit_state_update(block.block_number, accounts_updated); + self.storage_state = StorageUpdateState::None + // TODO save operations + } + + async fn store_token(&mut self, token: TokenGenesisListItem, token_id: u16) { + let token = Token { + id: token_id, + symbol: token.symbol, + address: token.address[2..] + .parse() + .expect("failed to parse token address"), + decimals: token.decimals, + }; + self.tokens.insert(token_id, token); + } + + async fn save_events_state( + &mut self, + block_events: &[BlockEvent], + tokens: &[NewTokenEvent], + last_watched_eth_block_number: u64, + ) { + self.events_state = block_events.to_vec(); + + for &NewTokenEvent { id, address } in tokens { + self.tokens.insert( + id, + Token { + id, + address, + symbol: format!("ERC20-{}", id), + decimals: 18, + }, + ); + } + + self.last_watched_block = last_watched_eth_block_number; + self.storage_state = StorageUpdateState::Events; + } + + async fn save_genesis_tree_state(&mut self, genesis_acc_update: AccountUpdate) { + self.commit_state_update(0, vec![(0, genesis_acc_update)]); + } + + async fn get_block_events_state_from_storage(&mut self) -> EventsState { + let committed_events = self.load_committed_events_state(); + + let verified_events = self.load_verified_events_state(); + + EventsState { + committed_events, + verified_events, + last_watched_eth_block_number: self.last_watched_block, + } + } + + async fn get_tree_state(&mut self) -> StoredTreeState { + // TODO find a way how to get unprocessed_prior_ops and fee_acc_id + StoredTreeState { + last_block_number: self.last_verified_block, + account_map: self.accounts.clone(), + unprocessed_prior_ops: 0, + fee_acc_id: 0, + } + } + + async fn get_ops_blocks_from_storage(&mut self) -> Vec { + self.rollups.clone() + } + + async fn update_eth_state(&mut self) { + // Do nothing it needs only for database + } + + async fn get_storage_state(&mut self) -> StorageUpdateState { + self.storage_state + } +} + +impl InMemoryStorageInteractor { + pub fn new() -> Self { + Self { + rollups: vec![], + storage_state: StorageUpdateState::None, + tokens: Default::default(), + events_state: vec![], + last_watched_block: 0, + last_committed_block: 0, + last_verified_block: 0, + accounts: Default::default(), + } + } + + pub fn insert_new_account(&mut self, id: AccountId, address: &Address) { + self.accounts + .insert(id, Account::default_with_address(address)); + } + + pub fn get_account_by_address(&self, address: &Address) -> Option<(AccountId, Account)> { + let accounts: Vec<(AccountId, Account)> = self + .accounts + .iter() + .filter(|(_, acc)| acc.address == *address) + .map(|(acc_id, acc)| (*acc_id, acc.clone())) + .collect(); + accounts.first().cloned() + } + + fn load_verified_events_state(&self) -> Vec { + self.events_state + .clone() + .into_iter() + .filter(|event| event.block_type == EventType::Verified) + .collect() + } + + pub(crate) fn load_committed_events_state(&self) -> Vec { + // TODO avoid clone + self.events_state + .clone() + .into_iter() + .filter(|event| event.block_type == EventType::Committed) + .collect() + } + + pub fn get_account(&self, id: &AccountId) -> Option<&Account> { + self.accounts.get(id) + } + + fn commit_state_update( + &mut self, + first_update_order_id: u32, + accounts_updated: AccountUpdates, + ) { + let update_order_ids = + first_update_order_id..first_update_order_id + accounts_updated.len() as u32; + + for (_, (id, upd)) in update_order_ids.zip(accounts_updated.iter()) { + match upd { + AccountUpdate::Create { ref address, nonce } => { + let (mut acc, _) = Account::create_account(*id, *address); + acc.nonce = *nonce; + self.accounts.insert(*id, acc); + } + AccountUpdate::Delete { + ref address, + nonce: _, + } => { + let (acc_id, _) = self.get_account_by_address(address).unwrap(); + self.accounts.remove(&acc_id); + } + AccountUpdate::UpdateBalance { + balance_update: (token, _, new_balance), + old_nonce: _, + new_nonce, + } => { + let account = self + .accounts + .get_mut(id) + .expect("In tests this account should be stored"); + account.set_balance(*token, new_balance.clone()); + account.nonce = max(account.nonce, *new_nonce); + } + AccountUpdate::ChangePubKeyHash { + old_pub_key_hash: _, + ref new_pub_key_hash, + old_nonce: _, + new_nonce, + } => { + let account = self + .accounts + .get_mut(id) + .expect("In tests this account should be stored"); + account.nonce = max(account.nonce, *new_nonce); + account.pub_key_hash = new_pub_key_hash.clone(); + } + } + } + } +} diff --git a/core/bin/data_restore/src/lib.rs b/core/bin/data_restore/src/lib.rs new file mode 100644 index 0000000000..90124280d7 --- /dev/null +++ b/core/bin/data_restore/src/lib.rs @@ -0,0 +1,33 @@ +pub mod contract_functions; +pub mod data_restore_driver; +pub mod database_storage_interactor; +pub mod eth_tx_helpers; +pub mod events; +pub mod events_state; +pub mod inmemory_storage_interactor; +pub mod rollup_ops; +pub mod storage_interactor; +pub mod tree_state; + +#[cfg(test)] +mod tests; + +use crate::storage_interactor::StorageInteractor; +use zksync_types::tokens::get_genesis_token_list; + +// How many blocks we will process at once. +pub const ETH_BLOCKS_STEP: u64 = 10_000; +pub const END_ETH_BLOCKS_OFFSET: u64 = 40; + +pub async fn add_tokens_to_storage(interactor: &mut I, eth_network: &str) { + let genesis_tokens = + get_genesis_token_list(ð_network).expect("Initial token list not found"); + for (id, token) in (1..).zip(genesis_tokens) { + let add_token_log = format!( + "Adding token: {}, id:{}, address: {}, decimals: {}", + &token.symbol, id, &token.address, &token.decimals + ); + interactor.store_token(token, id).await; + log::info!("{}", add_token_log); + } +} diff --git a/core/bin/data_restore/src/main.rs b/core/bin/data_restore/src/main.rs index dae9263d5a..4b6bf2c0bb 100644 --- a/core/bin/data_restore/src/main.rs +++ b/core/bin/data_restore/src/main.rs @@ -1,56 +1,15 @@ -pub mod contract_functions; -pub mod data_restore_driver; -pub mod eth_tx_helpers; -pub mod events; -pub mod events_state; -pub mod rollup_ops; -pub mod storage_interactor; - -#[cfg(test)] -mod tests; -pub mod tree_state; - -use crate::data_restore_driver::DataRestoreDriver; use serde::Deserialize; use structopt::StructOpt; use web3::transports::Http; use zksync_config::ConfigurationOptions; use zksync_crypto::convert::FeConvert; -use zksync_storage::{ConnectionPool, StorageProcessor}; -use zksync_types::{ - tokens::{get_genesis_token_list, Token}, - Address, TokenId, H256, -}; +use zksync_storage::ConnectionPool; +use zksync_types::{Address, H256}; -// How many blocks we will process at once. -const ETH_BLOCKS_STEP: u64 = 10_000; -const END_ETH_BLOCKS_OFFSET: u64 = 40; - -async fn add_tokens_to_db(storage: &mut StorageProcessor<'_>, eth_network: &str) { - let genesis_tokens = - get_genesis_token_list(ð_network).expect("Initial token list not found"); - for (id, token) in (1..).zip(genesis_tokens) { - log::info!( - "Adding token: {}, id:{}, address: {}, decimals: {}", - token.symbol, - id, - token.address, - token.decimals - ); - storage - .tokens_schema() - .store_token(Token { - id: id as TokenId, - symbol: token.symbol, - address: token.address[2..] - .parse() - .expect("failed to parse token address"), - decimals: token.decimals, - }) - .await - .expect("failed to store token"); - } -} +use zksync_data_restore::{ + add_tokens_to_storage, data_restore_driver::DataRestoreDriver, + database_storage_interactor::DatabaseStorageInteractor, END_ETH_BLOCKS_OFFSET, ETH_BLOCKS_STEP, +}; #[derive(StructOpt)] #[structopt( @@ -138,7 +97,7 @@ async fn main() { } else { None }; - let mut storage = connection_pool.access_storage().await.unwrap(); + let storage = connection_pool.access_storage().await.unwrap(); let mut driver = DataRestoreDriver::new( transport, @@ -151,20 +110,21 @@ async fn main() { final_hash, ); + let mut interactor = DatabaseStorageInteractor::new(storage); // If genesis is argument is present - there will be fetching contracts creation transactions to get first eth block and genesis acc address if opt.genesis { // We have to load pre-defined tokens into the database before restoring state, // since these tokens do not have a corresponding Ethereum events. - add_tokens_to_db(&mut storage, &config.eth_network).await; + add_tokens_to_storage(&mut interactor, &config.eth_network).await; driver - .set_genesis_state(&mut storage, config.genesis_tx_hash) + .set_genesis_state(&mut interactor, config.genesis_tx_hash) .await; } - if opt.continue_mode && driver.load_state_from_storage(&mut storage).await { + if opt.continue_mode && driver.load_state_from_storage(&mut interactor).await { std::process::exit(0); } - driver.run_state_update(&mut storage).await; + driver.run_state_update(&mut interactor).await; } diff --git a/core/bin/data_restore/src/storage_interactor.rs b/core/bin/data_restore/src/storage_interactor.rs index 7f3bb7afb4..b63289f78e 100644 --- a/core/bin/data_restore/src/storage_interactor.rs +++ b/core/bin/data_restore/src/storage_interactor.rs @@ -1,17 +1,14 @@ -// Built-in deps -use std::{convert::TryFrom, str::FromStr}; -// External deps +use std::convert::TryFrom; + use web3::types::H256; -// Workspace deps -use zksync_storage::{ - data_restore::records::{NewBlockEvent, StoredBlockEvent, StoredRollupOpsBlock}, - StorageProcessor, + +use zksync_storage::data_restore::records::{ + NewBlockEvent, StoredBlockEvent, StoredRollupOpsBlock, }; use zksync_types::{ - Action, Operation, - {block::Block, AccountMap, AccountUpdate, AccountUpdates, ZkSyncOp}, + block::Block, AccountMap, AccountUpdate, AccountUpdates, TokenGenesisListItem, TokenId, }; -// Local deps + use crate::{ data_restore_driver::StorageUpdateState, events::{BlockEvent, EventType}, @@ -19,129 +16,104 @@ use crate::{ rollup_ops::RollupOpsBlock, }; -impl From<&NewTokenEvent> for zksync_storage::data_restore::records::NewTokenEvent { - fn from(event: &NewTokenEvent) -> Self { - Self { - address: event.address, - id: event.id, - } - } +pub struct StoredTreeState { + pub last_block_number: u32, + pub account_map: AccountMap, + pub unprocessed_prior_ops: u64, + pub fee_acc_id: u32, } -/// Saves genesis account state in storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database connection pool -/// * `genesis_acc_update` - Genesis account update -/// -pub async fn save_genesis_tree_state( - storage: &mut StorageProcessor<'_>, - genesis_acc_update: AccountUpdate, -) { - let (_last_committed, mut _accounts) = storage - .chain() - .state_schema() - .load_committed_state(None) - .await - .expect("Cant load comitted state"); - assert!( - _last_committed == 0 && _accounts.is_empty(), - "db should be empty" +#[async_trait::async_trait] +pub trait StorageInteractor { + /// Saves Rollup operations blocks in storage + /// + /// # Arguments + /// + /// * `blocks` - Rollup operations blocks + /// + async fn save_rollup_ops(&mut self, blocks: &[RollupOpsBlock]); + + /// Updates stored tree state: saves block transactions in storage, stores blocks and account updates + /// + /// # Arguments + /// + /// * `block` - Rollup block + /// * `accounts_updated` - accounts updates + /// + async fn update_tree_state(&mut self, block: Block, accounts_updated: AccountUpdates); + + /// Store token to the storage + /// # Arguments + /// + /// * `token` - Token that added when deploying contract + /// * `token_id` - Id for token in our system + /// + async fn store_token(&mut self, token: TokenGenesisListItem, token_id: TokenId); + + /// Saves Rollup contract events in storage (includes block events, new tokens and last watched eth block number) + /// + /// # Arguments + /// + /// * `eveblock_eventsnts` - Rollup contract block events descriptions + /// * `tokens` - Tokens that had been added to system + /// * `last_watched_eth_block_number` - Last watched ethereum block + /// + async fn save_events_state( + &mut self, + block_events: &[BlockEvent], + tokens: &[NewTokenEvent], + last_watched_eth_block_number: u64, ); - storage - .data_restore_schema() - .save_genesis_state(genesis_acc_update) - .await - .expect("Cant update genesis state"); -} -/// Updates stored tree state: saves block transactions in storage, stores blocks and account updates -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// * `block` - Rollup block -/// * `accounts_updated` - accounts updates -/// -pub async fn update_tree_state( - storage: &mut StorageProcessor<'_>, - block: Block, - accounts_updated: AccountUpdates, -) { - let mut transaction = storage - .start_transaction() - .await - .expect("Failed initializing a DB transaction"); + /// Saves genesis account state in storage + /// + /// # Arguments + /// + /// * `genesis_acc_update` - Genesis account update + /// + async fn save_genesis_tree_state(&mut self, genesis_acc_update: AccountUpdate); - let commit_op = Operation { - action: Action::Commit, - block: block.clone(), - id: None, - }; + /// Returns Rollup contract events state from storage + async fn get_block_events_state_from_storage(&mut self) -> EventsState; - let verify_op = Operation { - action: Action::Verify { - proof: Box::new(Default::default()), - }, - block: block.clone(), - id: None, - }; + /// Returns the current Rollup block, tree accounts map, unprocessed priority ops and the last fee acc from storage + async fn get_tree_state(&mut self) -> StoredTreeState; - transaction - .chain() - .state_schema() - .commit_state_update(block.block_number, &accounts_updated, 0) - .await - .expect("Cant execute verify operation"); + /// Returns Rollup operations blocks from storage + async fn get_ops_blocks_from_storage(&mut self) -> Vec; - transaction - .data_restore_schema() - .save_block_operations(commit_op, verify_op) - .await - .expect("Cant execute verify operation"); + /// Updates the `eth_stats` table with the currently last available committed/verified blocks + /// data for `eth_sender` module to operate correctly. + async fn update_eth_state(&mut self); - transaction - .commit() - .await - .expect("Unable to commit DB transaction"); + /// Returns last recovery state update step from storage + async fn get_storage_state(&mut self) -> StorageUpdateState; } -/// Saves Rollup contract events in storage (includes block events, new tokens and last watched eth block number) +/// Returns Rollup contract event from its stored representation /// /// # Arguments /// -/// * `connection_pool` - Database Connection Pool -/// * `eveblock_eventsnts` - Rollup contract block events descriptions -/// * `tokens` - Tokens that had been added to system -/// * `last_watched_eth_block_number` - Last watched ethereum block +/// * `block` - Stored representation of ZkSync Contract event /// -pub async fn save_events_state( - storage: &mut StorageProcessor<'_>, - block_events: &[BlockEvent], - tokens: &[NewTokenEvent], - last_watched_eth_block_number: u64, -) { - let mut new_events: Vec = vec![]; - for event in block_events { - new_events.push(block_event_into_stored_block_event(event)); +pub fn stored_block_event_into_block_event(block: StoredBlockEvent) -> BlockEvent { + BlockEvent { + block_num: u32::try_from(block.block_num) + .expect("Wrong block number - cant convert into u32"), + transaction_hash: H256::from_slice(block.transaction_hash.as_slice()), + block_type: match &block.block_type { + c if c == "Committed" => EventType::Committed, + v if v == "Verified" => EventType::Verified, + _ => panic!("Wrong block type"), + }, } - - let block_number = last_watched_eth_block_number.to_string(); - - let tokens: Vec<_> = tokens.iter().map(From::from).collect(); - storage - .data_restore_schema() - .save_events_state(new_events.as_slice(), &tokens, &block_number) - .await - .expect("Cant update events state"); } /// Get new stored representation of the Rollup contract event from itself /// /// # Arguments /// -/// * `evnet` - Rollup contract event description +/// * `event` - Rollup contract event description /// pub fn block_event_into_stored_block_event(event: &BlockEvent) -> NewBlockEvent { NewBlockEvent { @@ -154,48 +126,6 @@ pub fn block_event_into_stored_block_event(event: &BlockEvent) -> NewBlockEvent } } -/// Saves Rollup operations blocks in storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// * `blocks` - Rollup operations blocks -/// -pub async fn save_rollup_ops(storage: &mut StorageProcessor<'_>, blocks: &[RollupOpsBlock]) { - let mut ops: Vec<(u32, &ZkSyncOp, u32)> = vec![]; - - for block in blocks { - for op in &block.ops { - ops.push((block.block_num, op, block.fee_account)); - } - } - - storage - .data_restore_schema() - .save_rollup_ops(ops.as_slice()) - .await - .expect("Cant update rollup operations"); -} - -/// Returns Rollup operations blocks from storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// -pub async fn get_ops_blocks_from_storage( - storage: &mut StorageProcessor<'_>, -) -> Vec { - storage - .data_restore_schema() - .load_rollup_ops_blocks() - .await - .expect("Cant load operation blocks") - .iter() - .map(|block| stored_ops_block_into_ops_block(&block)) - .collect() -} - /// Returns Rollup operations block from its stored representation /// /// # Arguments @@ -209,154 +139,3 @@ pub fn stored_ops_block_into_ops_block(op_block: &StoredRollupOpsBlock) -> Rollu fee_account: op_block.fee_account, } } - -/// Returns last recovery state update step from storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// -pub async fn get_storage_state(storage: &mut StorageProcessor<'_>) -> StorageUpdateState { - let storage_state_string = storage - .data_restore_schema() - .load_storage_state() - .await - .expect("Cant load storage state") - .storage_state; - - match storage_state_string.as_ref() { - "Events" => StorageUpdateState::Events, - "Operations" => StorageUpdateState::Operations, - "None" => StorageUpdateState::None, - _ => panic!("Unknown storage state"), - } -} - -/// Returns last watched ethereum block number from storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// -pub async fn get_last_watched_block_number_from_storage(storage: &mut StorageProcessor<'_>) -> u64 { - let last_watched_block_number_string = storage - .data_restore_schema() - .load_last_watched_block_number() - .await - .expect("Cant load last watched block number") - .block_number; - - u64::from_str(last_watched_block_number_string.as_str()) - .expect("Сant make u256 block_number in get_last_watched_block_number_from_storage") -} - -/// Returns Rollup contract events state from storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// -pub async fn get_block_events_state_from_storage( - storage: &mut StorageProcessor<'_>, -) -> EventsState { - let last_watched_eth_block_number = get_last_watched_block_number_from_storage(storage).await; - - let committed = storage - .data_restore_schema() - .load_committed_events_state() - .await - .expect("Cant load committed state"); - - let mut committed_events: Vec = vec![]; - for event in committed { - let block_event = stored_block_event_into_block_event(event.clone()); - committed_events.push(block_event); - } - - let verified = storage - .data_restore_schema() - .load_verified_events_state() - .await - .expect("Cant load verified state"); - let mut verified_events: Vec = vec![]; - for event in verified { - let block_event = stored_block_event_into_block_event(event.clone()); - verified_events.push(block_event); - } - - EventsState { - committed_events, - verified_events, - last_watched_eth_block_number, - } -} - -/// Returns Rollup contract event from its stored representation -/// -/// # Arguments -/// -/// * `block` - Stored representation of ZkSync Contract event -/// -pub fn stored_block_event_into_block_event(block: StoredBlockEvent) -> BlockEvent { - BlockEvent { - block_num: u32::try_from(block.block_num) - .expect("Wrong block number - cant convert into u32"), - transaction_hash: H256::from_slice(block.transaction_hash.as_slice()), - block_type: match &block.block_type { - c if c == "Committed" => EventType::Committed, - v if v == "Verified" => EventType::Verified, - _ => panic!("Wrong block type"), - }, - } -} - -/// Returns the current Rollup block, tree accounts map, unprocessed priority ops and the last fee acc from storage -/// -/// # Arguments -/// -/// * `connection_pool` - Database Connection Pool -/// -/// connection_pool: &ConnectionPool, -pub async fn get_tree_state(storage: &mut StorageProcessor<'_>) -> (u32, AccountMap, u64, u32) { - let (last_block, account_map) = storage - .chain() - .state_schema() - .load_verified_state() - .await - .expect("There are no last verified state in storage"); - - let block = storage - .chain() - .block_schema() - .get_block(last_block) - .await - .expect("Cant get the last block from storage") - .expect("There are no last block in storage - restart driver"); - let (unprocessed_prior_ops, fee_acc_id) = (block.processed_priority_ops.1, block.fee_account); - - (last_block, account_map, unprocessed_prior_ops, fee_acc_id) -} - -/// Updates the `eth_stats` table with the currently last available committed/verified blocks -/// data for `eth_sender` module to operate correctly. -pub async fn update_eth_stats(storage: &mut StorageProcessor<'_>) { - let last_committed_block = storage - .chain() - .block_schema() - .get_last_committed_block() - .await - .expect("Can't get the last committed block"); - - let last_verified_block = storage - .chain() - .block_schema() - .get_last_verified_block() - .await - .expect("Can't get the last verified block"); - - storage - .data_restore_schema() - .initialize_eth_stats(last_committed_block, last_verified_block) - .await - .expect("Can't update the eth_stats table") -} diff --git a/core/bin/data_restore/src/tests/mod.rs b/core/bin/data_restore/src/tests/mod.rs index 635a6e22ae..7b037a5f5b 100644 --- a/core/bin/data_restore/src/tests/mod.rs +++ b/core/bin/data_restore/src/tests/mod.rs @@ -1,10 +1,14 @@ pub(crate) mod utils; +use std::cmp::max; +use std::{collections::HashMap, future::Future}; + use chrono::Utc; use futures::future; use jsonrpc_core::Params; +use num::BigUint; use serde_json::{json, Value}; -use std::{collections::HashMap, future::Future}; +use web3::types::Bytes; use web3::{contract::tokens::Tokenize, types::Transaction, RequestId, Transport}; use db_test_macro::test as db_test; @@ -18,12 +22,13 @@ use zksync_types::{ Log, PriorityOp, Withdraw, WithdrawOp, ZkSyncOp, H256, }; -use crate::data_restore_driver::DataRestoreDriver; -use crate::tests::utils::{create_log, u32_to_32bytes}; -use crate::{END_ETH_BLOCKS_OFFSET, ETH_BLOCKS_STEP}; -use num::BigUint; -use std::cmp::max; -use web3::types::Bytes; +use crate::{ + data_restore_driver::DataRestoreDriver, + database_storage_interactor::DatabaseStorageInteractor, + inmemory_storage_interactor::InMemoryStorageInteractor, + tests::utils::{create_log, u32_to_32bytes}, + END_ETH_BLOCKS_OFFSET, ETH_BLOCKS_STEP, +}; fn create_withdraw_operations( account_id: u32, @@ -229,6 +234,7 @@ impl Transport for Web3Transport { async fn test_run_state_update(mut storage: StorageProcessor<'_>) { let mut transport = Web3Transport::new(); + let mut interactor = DatabaseStorageInteractor::new(storage); let contract = zksync_contract(); let gov_contract = governance_contract(); @@ -336,10 +342,10 @@ async fn test_run_state_update(mut storage: StorageProcessor<'_>) { true, None, ); - driver.run_state_update(&mut storage).await; + driver.run_state_update(&mut interactor).await; // Check that it's stores some account, created by deposit - let (_, account) = AccountSchema(&mut storage) + let (_, account) = AccountSchema(interactor.storage()) .account_state_by_address(&Default::default()) .await .unwrap() @@ -349,7 +355,7 @@ async fn test_run_state_update(mut storage: StorageProcessor<'_>) { assert_eq!(BigUint::from(40u32), balance); assert_eq!(driver.events_state.committed_events.len(), 2); - let events = DataRestoreSchema(&mut storage) + let events = DataRestoreSchema(interactor.storage()) .load_committed_events_state() .await .unwrap(); @@ -368,7 +374,150 @@ async fn test_run_state_update(mut storage: StorageProcessor<'_>) { None, ); // Load state from db and check it - assert!(driver.load_state_from_storage(&mut storage).await); + assert!(driver.load_state_from_storage(&mut interactor).await); + assert_eq!(driver.events_state.committed_events.len(), events.len()); + assert_eq!(driver.tree_state.state.block_number, 2) +} + +#[tokio::test] +async fn test_with_inmemory_storage() { + let mut transport = Web3Transport::new(); + + let mut interactor = InMemoryStorageInteractor::new(); + let contract = zksync_contract(); + let gov_contract = governance_contract(); + + let block_verified_topic = contract + .event("BlockVerification") + .expect("Main contract abi error") + .signature(); + let block_verified_topic_string = format!("{:?}", block_verified_topic); + transport.insert_logs( + block_verified_topic_string, + vec![ + create_log( + block_verified_topic, + vec![u32_to_32bytes(1).into()], + Bytes(vec![]), + 1, + u32_to_32bytes(1).into(), + ), + create_log( + block_verified_topic, + vec![u32_to_32bytes(2).into()], + Bytes(vec![]), + 2, + u32_to_32bytes(2).into(), + ), + ], + ); + + let block_committed_topic = contract + .event("BlockCommit") + .expect("Main contract abi error") + .signature(); + let block_commit_topic_string = format!("{:?}", block_committed_topic); + transport.insert_logs( + block_commit_topic_string, + vec![ + create_log( + block_committed_topic, + vec![u32_to_32bytes(1).into()], + Bytes(vec![]), + 1, + u32_to_32bytes(1).into(), + ), + create_log( + block_committed_topic, + vec![u32_to_32bytes(2).into()], + Bytes(vec![]), + 2, + u32_to_32bytes(2).into(), + ), + ], + ); + + let reverted_topic = contract + .event("BlocksRevert") + .expect("Main contract abi error") + .signature(); + let _reverted_topic_string = format!("{:?}", reverted_topic); + + let new_token_topic = gov_contract + .event("NewToken") + .expect("Main contract abi error") + .signature(); + let new_token_topic_string = format!("{:?}", new_token_topic); + transport.insert_logs( + new_token_topic_string, + vec![create_log( + new_token_topic, + vec![[0; 32].into(), u32_to_32bytes(3).into()], + Bytes(vec![]), + 3, + u32_to_32bytes(1).into(), + )], + ); + + transport.push_transactions(vec![ + create_transaction( + 1, + create_block( + 1, + vec![create_deposit(Default::default(), Default::default(), 50)], + ), + ), + create_transaction( + 2, + create_block( + 2, + vec![create_withdraw_operations( + 0, + Default::default(), + Default::default(), + 10, + )], + ), + ), + ]); + + let mut driver = DataRestoreDriver::new( + transport.clone(), + [1u8; 20].into(), + [1u8; 20].into(), + ETH_BLOCKS_STEP, + END_ETH_BLOCKS_OFFSET, + vec![6, 30], + true, + None, + ); + driver.run_state_update(&mut interactor).await; + + // Check that it's stores some account, created by deposit + let (_, account) = interactor + .get_account_by_address(&Default::default()) + .unwrap(); + let balance = account.get_balance(0); + + assert_eq!(BigUint::from(40u32), balance); + assert_eq!(driver.events_state.committed_events.len(), 2); + let events = interactor.load_committed_events_state(); + + assert_eq!(driver.events_state.committed_events.len(), events.len()); + + // Nullify the state of driver + let mut driver = DataRestoreDriver::new( + transport.clone(), + [1u8; 20].into(), + [1u8; 20].into(), + ETH_BLOCKS_STEP, + END_ETH_BLOCKS_OFFSET, + vec![6, 30], + true, + None, + ); + // Load state from db and check it + assert!(driver.load_state_from_storage(&mut interactor).await); assert_eq!(driver.events_state.committed_events.len(), events.len()); assert_eq!(driver.tree_state.state.block_number, 2) } diff --git a/core/bin/server/src/main.rs b/core/bin/server/src/main.rs index 34b5a7f1fa..9bba984462 100644 --- a/core/bin/server/src/main.rs +++ b/core/bin/server/src/main.rs @@ -2,7 +2,7 @@ use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; use std::cell::RefCell; use structopt::StructOpt; use zksync_api::run_api; -use zksync_config::{ConfigurationOptions, ProverOptions}; +use zksync_config::{ConfigurationOptions, EthClientOptions, EthSenderOptions, ProverOptions}; use zksync_core::{genesis_init, run_core, wait_for_tasks}; use zksync_eth_sender::run_eth_sender; use zksync_prometheus_exporter::run_prometheus_exporter; @@ -46,6 +46,8 @@ async fn main() -> anyhow::Result<()> { let connection_pool = ConnectionPool::new(None); let config_options = ConfigurationOptions::from_env(); + let eth_client_options = EthClientOptions::from_env(); + let eth_sender_options = EthSenderOptions::from_env(); let prover_options = ProverOptions::from_env(); // Handle Ctrl+C @@ -77,16 +79,15 @@ async fn main() -> anyhow::Result<()> { // Run Ethereum sender actors. log::info!("Starting the Ethereum sender actors"); - let eth_sender_task_handle = run_eth_sender(connection_pool.clone(), config_options.clone()); + let eth_sender_task_handle = run_eth_sender( + connection_pool.clone(), + eth_client_options, + eth_sender_options, + ); // Run prover server & witness generator. log::info!("Starting the Prover server actors"); - run_prover_server( - connection_pool, - stop_signal_sender, - prover_options, - config_options, - ); + run_prover_server(connection_pool, stop_signal_sender, prover_options); tokio::select! { _ = async { wait_for_tasks(core_task_handles).await } => { diff --git a/core/bin/zksync_api/src/api_server/event_notify/event_fetcher.rs b/core/bin/zksync_api/src/api_server/event_notify/event_fetcher.rs index 61ccc86940..37e905d5aa 100644 --- a/core/bin/zksync_api/src/api_server/event_notify/event_fetcher.rs +++ b/core/bin/zksync_api/src/api_server/event_notify/event_fetcher.rs @@ -160,7 +160,7 @@ impl EventFetcher { } self.pending_block = Some(new); - metrics::histogram!("api", start.elapsed(), "event_fetcher" => "update_pending_block"); + metrics::histogram!("api.event_fetcher.update_pending_block", start.elapsed()); Some(executed_ops) } @@ -179,7 +179,7 @@ impl EventFetcher { .await .unwrap_or_default(); } - metrics::histogram!("api", start.elapsed(), "event_fetcher" => "send_operations"); + metrics::histogram!("api.event_fetcher.send_operations", start.elapsed()); } async fn load_pending_block(&mut self) -> anyhow::Result> { @@ -191,7 +191,7 @@ impl EventFetcher { .expect("Can't get access to the storage"); let pending_block = storage.chain().block_schema().load_pending_block().await?; - metrics::histogram!("api", start.elapsed(), "event_fetcher" => "load_pending_block"); + metrics::histogram!("api.event_fetcher.load_pending_block", start.elapsed()); Ok(pending_block) } @@ -209,7 +209,7 @@ impl EventFetcher { .get_last_committed_block() .await?; - metrics::histogram!("api", start.elapsed(), "event_fetcher" => "last_committed_block"); + metrics::histogram!("api.event_fetcher.last_committed_block", start.elapsed()); Ok(last_block) } @@ -227,7 +227,7 @@ impl EventFetcher { .get_last_verified_confirmed_block() .await?; - metrics::histogram!("api", start.elapsed(), "event_fetcher" => "last_verified_block"); + metrics::histogram!("api.event_fetcher.last_verified_block", start.elapsed()); Ok(last_block) } @@ -250,7 +250,7 @@ impl EventFetcher { .await .expect("Operation must exist"); - metrics::histogram!("api", start.elapsed(), "event_fetcher" => "load_operation"); + metrics::histogram!("api.event_fetcher.load_operation", start.elapsed()); op.into_op(&mut storage).await } } diff --git a/core/bin/zksync_api/src/api_server/event_notify/operation_notifier.rs b/core/bin/zksync_api/src/api_server/event_notify/operation_notifier.rs index 8675596fe1..fef0b5395a 100644 --- a/core/bin/zksync_api/src/api_server/event_notify/operation_notifier.rs +++ b/core/bin/zksync_api/src/api_server/event_notify/operation_notifier.rs @@ -104,7 +104,7 @@ impl OperationNotifier { } } - metrics::histogram!("api", start.elapsed(), "notifier" => "handle_new_block"); + metrics::histogram!("api.notifier.handle_new_block", start.elapsed()); Ok(()) } @@ -146,7 +146,7 @@ impl OperationNotifier { } } } - metrics::histogram!("api", start.elapsed(), "notifier" => "handle_executed_operations"); + metrics::histogram!("api.notifier.handle_executed_operations", start.elapsed()); Ok(()) } @@ -216,7 +216,7 @@ impl OperationNotifier { self.prior_op_subs .insert_new(sub_id, sub, serial_id, action)?; - metrics::histogram!("api", start.elapsed(), "notifier" => "add_priority_op_sub"); + metrics::histogram!("api.notifier.add_priority_op_sub", start.elapsed()); Ok(()) } @@ -258,7 +258,7 @@ impl OperationNotifier { } self.tx_subs.insert_new(sub_id, sub, hash, action)?; - metrics::histogram!("api", start.elapsed(), "notifier" => "add_transaction_sub"); + metrics::histogram!("api.notifier.add_transaction_sub", start.elapsed()); Ok(()) } @@ -276,7 +276,7 @@ impl OperationNotifier { self.account_subs .insert_new(sub_id, sub, account_id, action)?; - metrics::histogram!("api", start.elapsed(), "notifier" => "add_account_update_sub"); + metrics::histogram!("api.notifier.add_account_update_sub", start.elapsed()); Ok(()) } } diff --git a/core/bin/zksync_api/src/api_server/event_notify/state.rs b/core/bin/zksync_api/src/api_server/event_notify/state.rs index a68602b783..8696d0a277 100644 --- a/core/bin/zksync_api/src/api_server/event_notify/state.rs +++ b/core/bin/zksync_api/src/api_server/event_notify/state.rs @@ -1,6 +1,7 @@ use crate::api_server::rpc_server::types::{BlockInfo, ResponseAccountState}; use crate::utils::token_db_cache::TokenDBCache; use lru_cache::LruCache; +use std::time::Instant; use zksync_storage::chain::operations::records::StoredExecutedPriorityOperation; use zksync_storage::chain::operations_ext::records::TxReceiptResponse; use zksync_storage::ConnectionPool; @@ -35,6 +36,7 @@ impl NotifierState { &mut self, hash: &TxHash, ) -> Result, anyhow::Error> { + let start = Instant::now(); let res = if let Some(tx_receipt) = self .cache_of_transaction_receipts .get_mut(&hash.as_ref().to_vec()) @@ -57,6 +59,8 @@ impl NotifierState { tx_receipt }; + + metrics::histogram!("api.notifier.get_tx_receipt", start.elapsed()); Ok(res) } @@ -64,6 +68,7 @@ impl NotifierState { &mut self, block_number: u32, ) -> Result, anyhow::Error> { + let start = Instant::now(); let res = if let Some(block_info) = self.cache_of_blocks_info.get_mut(&block_number) { block_info.clone() } else { @@ -109,6 +114,8 @@ impl NotifierState { block_info }; + + metrics::histogram!("api.notifier.get_block_info", start.elapsed()); Ok(Some(res)) } @@ -116,6 +123,7 @@ impl NotifierState { &mut self, serial_id: u32, ) -> Result, anyhow::Error> { + let start = Instant::now(); let res = if let Some(executed_op) = self .cache_of_executed_priority_operations .get_mut(&serial_id) @@ -136,6 +144,11 @@ impl NotifierState { executed_op }; + + metrics::histogram!( + "api.notifier.get_executed_priority_operation", + start.elapsed() + ); Ok(res) } @@ -144,6 +157,7 @@ impl NotifierState { address: Address, action: ActionType, ) -> anyhow::Result<(AccountId, ResponseAccountState)> { + let start = Instant::now(); let mut storage = self.db_pool.access_storage().await?; let account_state = storage .chain() @@ -168,6 +182,7 @@ impl NotifierState { ResponseAccountState::default() }; + metrics::histogram!("api.notifier.get_account_info", start.elapsed()); Ok((account_id, account_state)) } @@ -176,6 +191,7 @@ impl NotifierState { id: AccountId, action: ActionType, ) -> anyhow::Result> { + let start = Instant::now(); let mut storage = self.db_pool.access_storage().await?; let stored_account = match action { @@ -202,7 +218,7 @@ impl NotifierState { } else { None }; - + metrics::histogram!("api.notifier.get_account_state", start.elapsed()); Ok(account) } } diff --git a/core/bin/zksync_api/src/api_server/mod.rs b/core/bin/zksync_api/src/api_server/mod.rs index d77ee1f8e7..ffd2262d29 100644 --- a/core/bin/zksync_api/src/api_server/mod.rs +++ b/core/bin/zksync_api/src/api_server/mod.rs @@ -10,7 +10,7 @@ pub use rest::v1; // External uses use futures::channel::mpsc; // Workspace uses -use zksync_config::{AdminServerOptions, ConfigurationOptions}; +use zksync_config::{AdminServerOptions, ApiServerOptions, ConfigurationOptions}; use zksync_storage::ConnectionPool; // Local uses use crate::fee_ticker::TickerRequest; @@ -33,6 +33,7 @@ pub fn start_api_server( panic_notify: mpsc::Sender, ticker_request_sender: mpsc::Sender, config_options: ConfigurationOptions, + api_server_opts: ApiServerOptions, admin_server_opts: AdminServerOptions, ) { let (sign_check_sender, sign_check_receiver) = mpsc::channel(8192); @@ -45,19 +46,22 @@ pub fn start_api_server( rest::start_server_thread_detached( connection_pool.clone(), - config_options.rest_api_server_address, + api_server_opts.rest_api_server_address, config_options.contract_eth_addr, panic_notify.clone(), ticker_request_sender.clone(), sign_check_sender.clone(), config_options.clone(), + api_server_opts.clone(), ); + rpc_subscriptions::start_ws_server( - &config_options, connection_pool.clone(), sign_check_sender.clone(), ticker_request_sender.clone(), panic_notify.clone(), + config_options.clone(), + api_server_opts.clone(), ); admin_server::start_admin_server( @@ -68,10 +72,11 @@ pub fn start_api_server( ); rpc_server::start_rpc_server( - config_options, connection_pool, sign_check_sender, ticker_request_sender, panic_notify, + config_options, + api_server_opts, ); } diff --git a/core/bin/zksync_api/src/api_server/rest/helpers.rs b/core/bin/zksync_api/src/api_server/rest/helpers.rs index 212a7e9707..4c746de8a3 100644 --- a/core/bin/zksync_api/src/api_server/rest/helpers.rs +++ b/core/bin/zksync_api/src/api_server/rest/helpers.rs @@ -8,7 +8,7 @@ use zksync_storage::chain::{ operations_ext::records::{TransactionsHistoryItem, TxByHashResponse}, }; use zksync_storage::StorageProcessor; -use zksync_types::{PriorityOp, Token, TokenId, ZkSyncPriorityOp, H256}; +use zksync_types::{tx::TxHash, PriorityOp, Token, TokenId, ZkSyncPriorityOp, H256}; pub fn remove_prefix(query: &str) -> &str { if let Some(query) = query.strip_prefix("0x") { @@ -25,14 +25,21 @@ pub fn remove_prefix(query: &str) -> &str { pub fn try_parse_hash(query: &str) -> Option { const HASH_SIZE: usize = 32; // 32 bytes - let query = remove_prefix(query); - let bytes = hex::decode(query).ok()?; + let mut slice = [0_u8; HASH_SIZE]; - if bytes.len() == HASH_SIZE { - Some(H256::from_slice(&bytes)) - } else { - None - } + let tx_hex = remove_prefix(query); + hex::decode_to_slice(&tx_hex, &mut slice).ok()?; + Some(H256::from_slice(&slice)) +} + +pub fn try_parse_tx_hash(query: &str) -> Option { + const HASH_SIZE: usize = 32; // 32 bytes + + let mut slice = [0_u8; HASH_SIZE]; + + let tx_hex = remove_prefix(query); + hex::decode_to_slice(&tx_hex, &mut slice).ok()?; + TxHash::from_slice(&slice) } /// Checks if block is finalized, meaning that diff --git a/core/bin/zksync_api/src/api_server/rest/mod.rs b/core/bin/zksync_api/src/api_server/rest/mod.rs index af309bc295..31fc84979b 100644 --- a/core/bin/zksync_api/src/api_server/rest/mod.rs +++ b/core/bin/zksync_api/src/api_server/rest/mod.rs @@ -2,7 +2,7 @@ use actix_cors::Cors; use actix_web::{middleware, web, App, HttpResponse, HttpServer}; use futures::channel::mpsc; use std::net::SocketAddr; -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_storage::ConnectionPool; use zksync_types::H160; @@ -30,14 +30,15 @@ async fn start_server( let api_v1_scope = { let env_options = api_v01.config_options.clone(); + let api_server_options = api_v01.api_server_options.clone(); let tx_sender = TxSender::new( api_v01.connection_pool.clone(), sign_verifier.clone(), fee_ticker.clone(), - &env_options, + &api_server_options, ); - v1::api_scope(tx_sender, env_options) + v1::api_scope(tx_sender, env_options, api_server_options) }; App::new() @@ -61,6 +62,7 @@ async fn start_server( } /// Start HTTP REST API +#[allow(clippy::too_many_arguments)] pub(super) fn start_server_thread_detached( connection_pool: ConnectionPool, listen_addr: SocketAddr, @@ -69,6 +71,7 @@ pub(super) fn start_server_thread_detached( fee_ticker: mpsc::Sender, sign_verifier: mpsc::Sender, config_options: ConfigurationOptions, + api_server_options: ApiServerOptions, ) { std::thread::Builder::new() .name("actix-rest-api".to_string()) @@ -76,7 +79,12 @@ pub(super) fn start_server_thread_detached( let _panic_sentinel = ThreadPanicNotify(panic_notify.clone()); actix_rt::System::new("api-server").block_on(async move { - let api_v01 = ApiV01::new(connection_pool, contract_address, config_options); + let api_v01 = ApiV01::new( + connection_pool, + contract_address, + config_options, + api_server_options, + ); api_v01.spawn_network_status_updater(panic_notify); start_server(api_v01, fee_ticker, sign_verifier, listen_addr).await; diff --git a/core/bin/zksync_api/src/api_server/rest/v01/api_decl.rs b/core/bin/zksync_api/src/api_server/rest/v01/api_decl.rs index 562f5ecaf0..924d9b9989 100644 --- a/core/bin/zksync_api/src/api_server/rest/v01/api_decl.rs +++ b/core/bin/zksync_api/src/api_server/rest/v01/api_decl.rs @@ -9,7 +9,7 @@ use crate::{ }; use actix_web::{web, HttpResponse, Result as ActixResult}; use futures::channel::mpsc; -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_storage::{ chain::{ block::records::BlockDetails, @@ -32,6 +32,7 @@ pub struct ApiV01 { pub(crate) network_status: SharedNetworkStatus, pub(crate) contract_address: String, pub(crate) config_options: ConfigurationOptions, + pub(crate) api_server_options: ApiServerOptions, } impl ApiV01 { @@ -39,15 +40,17 @@ impl ApiV01 { connection_pool: ConnectionPool, contract_address: H160, config_options: ConfigurationOptions, + api_server_options: ApiServerOptions, ) -> Self { - let api_client = CoreApiClient::new(config_options.core_server_url.clone()); + let api_client = CoreApiClient::new(api_server_options.core_server_url.clone()); Self { - caches: Caches::new(config_options.api_requests_caches_size), + caches: Caches::new(api_server_options.api_requests_caches_size), connection_pool, api_client, network_status: SharedNetworkStatus::default(), contract_address: format!("{:?}", contract_address), config_options, + api_server_options, } } diff --git a/core/bin/zksync_api/src/api_server/rest/v01/api_impl.rs b/core/bin/zksync_api/src/api_server/rest/v01/api_impl.rs index 1cb2ad8910..0f86bfa5ef 100644 --- a/core/bin/zksync_api/src/api_server/rest/v01/api_impl.rs +++ b/core/bin/zksync_api/src/api_server/rest/v01/api_impl.rs @@ -27,14 +27,14 @@ impl ApiV01 { pub async fn testnet_config(self_: web::Data) -> ActixResult { let start = Instant::now(); let contract_address = self_.contract_address.clone(); - metrics::histogram!("api", start.elapsed(), "v01" => "testnet_config"); + metrics::histogram!("api.v01.testnet_config", start.elapsed()); ok_json!(TestnetConfigResponse { contract_address }) } pub async fn status(self_: web::Data) -> ActixResult { let start = Instant::now(); let result = ok_json!(self_.network_status.read().await); - metrics::histogram!("api", start.elapsed(), "v01" => "status"); + metrics::histogram!("api.v01.status", start.elapsed()); result } @@ -50,7 +50,7 @@ impl ApiV01 { let mut vec_tokens = tokens.values().cloned().collect::>(); vec_tokens.sort_by_key(|t| t.id); - metrics::histogram!("api", start.elapsed(), "v01" => "tokens"); + metrics::histogram!("api.v01.tokens", start.elapsed()); ok_json!(vec_tokens) } @@ -144,7 +144,7 @@ impl ApiV01 { // goes from oldest tx to the newest tx. transactions_history.append(&mut ongoing_transactions_history); - metrics::histogram!("api", start.elapsed(), "v01" => "tx_history"); + metrics::histogram!("api.v01.tx_history", start.elapsed()); ok_json!(transactions_history) } @@ -185,7 +185,7 @@ impl ApiV01 { transaction.commit().await.map_err(Self::db_error)?; - metrics::histogram!("api", start.elapsed(), "v01" => "tx_history_older_than"); + metrics::histogram!("api.v01.tx_history_older_than", start.elapsed()); ok_json!(transactions_history) } @@ -277,7 +277,7 @@ impl ApiV01 { transactions_history.append(&mut txs); } - metrics::histogram!("api", start.elapsed(), "v01" => "tx_history_newer_than"); + metrics::histogram!("api.v01.tx_history_newer_than", start.elapsed()); ok_json!(transactions_history) } @@ -294,7 +294,7 @@ impl ApiV01 { let tx_receipt = self_.get_tx_receipt(transaction_hash).await?; - metrics::histogram!("api", start.elapsed(), "v01" => "executed_tx_by_hash"); + metrics::histogram!("api.v01.executed_tx_by_hash", start.elapsed()); ok_json!(tx_receipt) } @@ -357,7 +357,7 @@ impl ApiV01 { res = deposit_op_to_tx_by_hash(&tokens, &priority_op, eth_block); } - metrics::histogram!("api", start.elapsed(), "v01" => "tx_by_hash"); + metrics::histogram!("api.v01.tx_by_hash", start.elapsed()); ok_json!(res) } @@ -367,7 +367,7 @@ impl ApiV01 { ) -> ActixResult { let start = Instant::now(); let receipt = self_.get_priority_op_receipt(pq_id).await?; - metrics::histogram!("api", start.elapsed(), "v01" => "priority_op"); + metrics::histogram!("api.v01.priority_op", start.elapsed()); ok_json!(receipt) } @@ -384,7 +384,7 @@ impl ApiV01 { Err(HttpResponse::NotFound().finish().into()) }; - metrics::histogram!("api", start.elapsed(), "v01" => "block_tx"); + metrics::histogram!("api.v01.block_tx", start.elapsed()); result } @@ -416,7 +416,7 @@ impl ApiV01 { HttpResponse::InternalServerError().finish() })?; - metrics::histogram!("api", start.elapsed(), "v01" => "blocks"); + metrics::histogram!("api.v01.blocks", start.elapsed()); ok_json!(resp) } @@ -431,7 +431,7 @@ impl ApiV01 { } else { Err(HttpResponse::NotFound().finish().into()) }; - metrics::histogram!("api", start.elapsed(), "v01" => "block_by_id"); + metrics::histogram!("api.v01.block_by_id", start.elapsed()); result } @@ -452,7 +452,7 @@ impl ApiV01 { HttpResponse::InternalServerError().finish() })?; - metrics::histogram!("api", start.elapsed(), "v01" => "block_transactions"); + metrics::histogram!("api.v01.block_transactions", start.elapsed()); ok_json!(txs) } @@ -469,7 +469,7 @@ impl ApiV01 { Err(HttpResponse::NotFound().finish().into()) }; - metrics::histogram!("api", start.elapsed(), "v01" => "explorer_search"); + metrics::histogram!("api.v01.explorer_search", start.elapsed()); result } @@ -485,7 +485,7 @@ impl ApiV01 { .as_secs(), }; - metrics::histogram!("api", start.elapsed(), "v01" => "withdrawal_processing_time"); + metrics::histogram!("api.v01.withdrawal_processing_time", start.elapsed()); ok_json!(processing_time) } } diff --git a/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs b/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs index c930747e35..8ed0a05a87 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; // Workspace uses -use zksync_config::ConfigurationOptions; +use zksync_config::ApiServerOptions; use zksync_crypto::{convert::FeConvert, serialization::FrSerde, Fr}; use zksync_storage::{chain::block::records, ConnectionPool, QueryResult}; use zksync_types::{tx::TxHash, BlockNumber}; @@ -22,7 +22,7 @@ use super::{ client::{self, Client}, Error as ApiError, JsonResult, Pagination, PaginationQuery, }; -use crate::{api_server::rest::helpers::remove_prefix, utils::shared_lru_cache::AsyncLruCache}; +use crate::{api_server::rest::helpers::try_parse_tx_hash, utils::shared_lru_cache::AsyncLruCache}; /// Shared data between `api/v1/blocks` endpoints. #[derive(Debug, Clone)] @@ -119,7 +119,7 @@ pub struct BlockInfo { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct TransactionInfo { pub tx_hash: TxHash, - pub block_number: i64, + pub block_number: BlockNumber, pub op: Value, pub success: Option, pub fail_reason: Option, @@ -130,14 +130,28 @@ impl From for BlockInfo { fn from(inner: records::BlockDetails) -> Self { Self { block_number: inner.block_number as BlockNumber, - new_state_root: Fr::from_bytes(&inner.new_state_root) - .expect("Unable to decode `new_state_root` field"), + new_state_root: Fr::from_bytes(&inner.new_state_root).unwrap_or_else(|err| { + panic!( + "Database provided an incorrect new_state_root field: {:?}, an error occurred {}", + inner.new_state_root, err + ) + }), block_size: inner.block_size as u64, commit_tx_hash: inner.commit_tx_hash.map(|bytes| { - TxHash::from_slice(&bytes).expect("Unable to decode `commit_tx_hash` field") + TxHash::from_slice(&bytes).unwrap_or_else(|| { + panic!( + "Database provided an incorrect commit_tx_hash field: {:?}", + hex::encode(bytes) + ) + }) }), verify_tx_hash: inner.verify_tx_hash.map(|bytes| { - TxHash::from_slice(&bytes).expect("Unable to decode `verify_tx_hash` field") + TxHash::from_slice(&bytes).unwrap_or_else(|| { + panic!( + "Database provided an incorrect verify_tx_hash field: {:?}", + hex::encode(bytes) + ) + }) }), committed_at: inner.committed_at, verified_at: inner.verified_at, @@ -148,15 +162,13 @@ impl From for BlockInfo { impl From for TransactionInfo { fn from(inner: records::BlockTransactionItem) -> Self { Self { - tx_hash: { - let mut slice = [0_u8; 32]; - - let tx_hex = remove_prefix(&inner.tx_hash); - hex::decode_to_slice(&tx_hex, &mut slice) - .expect("Unable to decode `tx_hash` field"); - TxHash::from_slice(&slice).unwrap() - }, - block_number: inner.block_number, + tx_hash: try_parse_tx_hash(&inner.tx_hash).unwrap_or_else(|| { + panic!( + "Database provided an incorrect transaction hash: {:?}", + inner.tx_hash + ) + }), + block_number: inner.block_number as BlockNumber, op: inner.op, success: inner.success, fail_reason: inner.fail_reason, @@ -257,8 +269,8 @@ async fn blocks_range( Ok(Json(range)) } -pub fn api_scope(env_options: &ConfigurationOptions, pool: ConnectionPool) -> Scope { - let data = ApiBlocksData::new(pool, env_options.api_requests_caches_size); +pub fn api_scope(api_server_options: &ApiServerOptions, pool: ConnectionPool) -> Scope { + let data = ApiBlocksData::new(pool, api_server_options.api_requests_caches_size); web::scope("blocks") .data(data) @@ -277,7 +289,7 @@ mod tests { cfg.fill_database().await?; let (client, server) = - cfg.start_server(|cfg| api_scope(&cfg.env_options, cfg.pool.clone())); + cfg.start_server(|cfg| api_scope(&cfg.api_server_options, cfg.pool.clone())); // Block requests part let blocks: Vec = { diff --git a/core/bin/zksync_api/src/api_server/rest/v1/client.rs b/core/bin/zksync_api/src/api_server/rest/v1/client.rs index 9e3fe8ce3a..a998bbeb14 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/client.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/client.rs @@ -1,7 +1,12 @@ //! Built-in API client. // Public uses -pub use super::blocks::{BlockInfo, TransactionInfo}; +pub use super::{ + blocks::{BlockInfo, TransactionInfo}, + config::Contracts, + tokens::TokenPriceKind, + transactions::{SumbitErrorCode, TxReceipt}, +}; // Built-in uses diff --git a/core/bin/zksync_api/src/api_server/rest/v1/mod.rs b/core/bin/zksync_api/src/api_server/rest/v1/mod.rs index e3c312872e..9095e07230 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/mod.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/mod.rs @@ -13,7 +13,7 @@ use actix_web::{ use serde::{Deserialize, Serialize}; // Workspace uses -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_types::BlockNumber; // Local uses @@ -33,10 +33,17 @@ pub const MAX_LIMIT: u32 = 100; type JsonResult = std::result::Result, Error>; -pub(crate) fn api_scope(tx_sender: TxSender, env_options: ConfigurationOptions) -> Scope { +pub(crate) fn api_scope( + tx_sender: TxSender, + env_options: ConfigurationOptions, + api_server_options: ApiServerOptions, +) -> Scope { web::scope("/api/v1") .service(config::api_scope(&env_options)) - .service(blocks::api_scope(&env_options, tx_sender.pool.clone())) + .service(blocks::api_scope( + &api_server_options, + tx_sender.pool.clone(), + )) .service(transactions::api_scope(tx_sender.clone())) .service(tokens::api_scope( tx_sender.tokens, diff --git a/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs b/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs index 6fd9d95bd2..2ae85f36cf 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs @@ -1,6 +1,7 @@ //! API testing helpers. // Built-in uses +use std::str::FromStr; // External uses use actix_web::{web, App, Scope}; @@ -8,7 +9,7 @@ use once_cell::sync::Lazy; use tokio::sync::Mutex; // Workspace uses -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_crypto::rand::{SeedableRng, XorShiftRng}; use zksync_storage::test_data::{ dummy_ethereum_tx_hash, gen_acc_random_updates, gen_unique_operation, @@ -19,7 +20,7 @@ use zksync_test_account::ZkSyncAccount; use zksync_types::{ethereum::OperationType, helpers::apply_updates, AccountMap, Action}; use zksync_types::{ operations::{ChangePubKeyOp, TransferToNewOp}, - ExecutedOperations, ExecutedTx, ZkSyncOp, ZkSyncTx, + Address, ExecutedOperations, ExecutedTx, Token, ZkSyncOp, ZkSyncTx, }; // Local uses @@ -28,6 +29,7 @@ use super::client::Client; #[derive(Debug, Clone)] pub struct TestServerConfig { pub env_options: ConfigurationOptions, + pub api_server_options: ApiServerOptions, pub pool: ConnectionPool, } @@ -35,11 +37,18 @@ impl Default for TestServerConfig { fn default() -> Self { Self { env_options: ConfigurationOptions::from_env(), + api_server_options: ApiServerOptions::from_env(), pool: ConnectionPool::new(Some(1)), } } } +#[derive(Debug)] +pub struct TestTransactions { + pub acc: ZkSyncAccount, + pub txs: Vec<(ZkSyncTx, ExecutedOperations)>, +} + impl TestServerConfig { pub fn start_server(&self, scope_factory: F) -> (Client, actix_web::test::TestServer) where @@ -58,7 +67,7 @@ impl TestServerConfig { } /// Creates several transactions and the corresponding executed operations. - pub fn gen_zk_txs(fee: u64) -> Vec<(ZkSyncTx, ExecutedOperations)> { + pub fn gen_zk_txs(fee: u64) -> TestTransactions { let from = ZkSyncAccount::rand(); from.set_account_id(Some(0xdead)); @@ -119,11 +128,10 @@ impl TestServerConfig { )); } - txs + TestTransactions { acc: from, txs } } pub async fn fill_database(&self) -> anyhow::Result<()> { - todo!() // static INITED: Lazy> = Lazy::new(|| Mutex::new(false)); // // // Hold this guard until transaction will be committed to avoid double init. @@ -149,6 +157,17 @@ impl TestServerConfig { // // Required since we use `EthereumSchema` in this test. // storage.ethereum_schema().initialize_eth_data().await?; // + // // Insert PHNX token + // storage + // .tokens_schema() + // .store_token(Token::new( + // 1, + // Address::from_str("38A2fDc11f526Ddd5a607C1F251C065f40fBF2f7").unwrap(), + // "PHNX", + // 18, + // )) + // .await?; + // // let mut accounts = AccountMap::default(); // let n_committed = 5; // let n_verified = n_committed - 2; @@ -164,6 +183,7 @@ impl TestServerConfig { // // Add transactions to every odd block. // let txs = if block_number % 2 == 1 { // Self::gen_zk_txs(1_000) + // .txs // .into_iter() // .map(|(_tx, op)| op) // .collect() @@ -257,5 +277,6 @@ impl TestServerConfig { // drop(inited_guard); // // Ok(()) + todo!() } } diff --git a/core/bin/zksync_api/src/api_server/rest/v1/tokens.rs b/core/bin/zksync_api/src/api_server/rest/v1/tokens.rs index 48f3837a91..b98deab30b 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/tokens.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/tokens.rs @@ -44,7 +44,12 @@ impl ApiTokensData { let mut storage = self.tokens.db.access_storage().await?; let tokens = storage.tokens_schema().load_tokens().await?; - Ok(tokens.into_iter().map(|(_k, v)| v).collect()) + + // Provide tokens in a predictable order. + let mut tokens: Vec<_> = tokens.into_iter().map(|(_k, v)| v).collect(); + tokens.sort_unstable_by_key(|token| token.id); + + Ok(tokens) } async fn token(&self, token_like: TokenLike) -> QueryResult> { @@ -253,34 +258,44 @@ mod tests { let expected_tokens = { let mut storage = cfg.pool.access_storage().await?; - storage.tokens_schema().load_tokens().await? + let mut tokens: Vec<_> = storage + .tokens_schema() + .load_tokens() + .await? + .values() + .cloned() + .collect(); + tokens.sort_unstable_by(|lhs, rhs| lhs.id.cmp(&rhs.id)); + tokens }; + assert_eq!(client.tokens().await?, expected_tokens); + + let expected_token = &expected_tokens[0]; assert_eq!( - client.tokens().await?, - expected_tokens.values().cloned().collect::>() + &client.token_by_id(&TokenLike::Id(0)).await?.unwrap(), + expected_token ); - - let expected_token = expected_tokens.values().cloned().next(); - assert_eq!(client.token_by_id(&TokenLike::Id(0)).await?, expected_token); assert_eq!( - client + &client .token_by_id(&TokenLike::parse( "0x0000000000000000000000000000000000000000" )) - .await?, + .await? + .unwrap(), expected_token ); assert_eq!( - client + &client .token_by_id(&TokenLike::parse( "0000000000000000000000000000000000000000" )) - .await?, + .await? + .unwrap(), expected_token ); assert_eq!( - client.token_by_id(&TokenLike::parse("ETH")).await?, + &client.token_by_id(&TokenLike::parse("ETH")).await?.unwrap(), expected_token ); assert_eq!(client.token_by_id(&TokenLike::parse("XM")).await?, None); diff --git a/core/bin/zksync_api/src/api_server/rest/v1/transactions.rs b/core/bin/zksync_api/src/api_server/rest/v1/transactions.rs index 0d1f7ce857..9f3a6e4ee9 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/transactions.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/transactions.rs @@ -10,10 +10,18 @@ use actix_web::{ use serde::{Deserialize, Serialize}; // Workspace uses -use zksync_types::{tx::TxEthSignature, tx::TxHash, ZkSyncTx}; +use zksync_storage::{ + chain::operations_ext::records::TxReceiptResponse, QueryResult, StorageProcessor, +}; +use zksync_types::{ + tx::{TxEthSignature, TxHash}, + BlockNumber, SignedZkSyncTx, ZkSyncTx, +}; // Local uses -use super::{client::Client, client::ClientError, Error as ApiError, JsonResult}; +use super::{ + client::Client, client::ClientError, Error as ApiError, JsonResult, Pagination, PaginationQuery, +}; use crate::api_server::rpc_server::types::TxWithSignature; use crate::api_server::tx_sender::{SubmitError, TxSender}; @@ -24,6 +32,7 @@ pub enum SumbitErrorCode { UnsupportedFastProcessing = 103, IncorrectTx = 104, TxAdd = 105, + InappropriateFeeToken = 106, Internal = 110, CommunicationCoreServer = 111, @@ -38,6 +47,7 @@ impl SumbitErrorCode { SubmitError::UnsupportedFastProcessing => Self::UnsupportedFastProcessing, SubmitError::IncorrectTx(_) => Self::IncorrectTx, SubmitError::TxAdd(_) => Self::TxAdd, + SubmitError::InappropriateFeeToken => Self::InappropriateFeeToken, SubmitError::CommunicationCoreServer(_) => Self::CommunicationCoreServer, SubmitError::Internal(_) => Self::Internal, SubmitError::Other(_) => Self::Other, @@ -72,13 +82,110 @@ impl ApiTransactionsData { fn new(tx_sender: TxSender) -> Self { Self { tx_sender } } + + async fn tx_receipt( + storage: &mut StorageProcessor<'_>, + tx_hash: TxHash, + ) -> QueryResult> { + storage + .chain() + .operations_ext_schema() + .tx_receipt(tx_hash.as_ref()) + .await + } + + async fn tx_status(&self, tx_hash: TxHash) -> QueryResult> { + let mut storage = self.tx_sender.pool.access_storage().await?; + + let tx_receipt = { + if let Some(tx_receipt) = Self::tx_receipt(&mut storage, tx_hash).await? { + tx_receipt + } else { + let tx_in_mempool = storage + .chain() + .mempool_schema() + .contains_tx(tx_hash) + .await?; + + let tx_receipt = if tx_in_mempool { + Some(TxReceipt::Pending) + } else { + None + }; + return Ok(tx_receipt); + } + }; + + let block_number = tx_receipt.block_number as BlockNumber; + // Check the cases where we don't need to get block details. + if !tx_receipt.success { + return Ok(Some(TxReceipt::Rejected { + reason: tx_receipt.fail_reason, + })); + } + + if tx_receipt.verified { + return Ok(Some(TxReceipt::Verified { + block: block_number, + })); + } + + // To distinguish committed and executed transaction we have to examine + // the transaction's block. + // + // TODO `load_block_range` possibly is too heavy operation and we should write + // specific request in the storage schema. (Task number ????) + let block = storage + .chain() + .block_schema() + .load_block_range(block_number, 1) + .await? + .into_iter() + .next(); + + let is_committed = block + .filter(|block| block.commit_tx_hash.is_some()) + .is_some(); + + let tx_receipt = if is_committed { + TxReceipt::Committed { + block: block_number, + } + } else { + TxReceipt::Executed + }; + + Ok(Some(tx_receipt)) + } + + async fn tx_data(&self, tx_hash: TxHash) -> QueryResult> { + let mut storage = self.tx_sender.pool.access_storage().await?; + + let operation = storage + .chain() + .operations_schema() + .get_executed_operation(tx_hash.as_ref()) + .await?; + + if let Some(op) = operation { + let signed_tx = SignedZkSyncTx { + tx: serde_json::from_value(op.tx)?, + eth_sign_data: op.eth_sign_data.map(serde_json::from_value).transpose()?, + }; + + Ok(Some(signed_tx)) + } else { + // Check memory pool for pending transactions. + storage.chain().mempool_schema().get_tx(tx_hash).await + } + } } // Data transfer objects. #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] -pub struct FastProcessingQuery { - pub fast_processing: Option, +struct FastProcessingQuery { + fast_processing: Option, } /// This struct has the same layout as `SignedZkSyncTx`, expect that it used @@ -95,6 +202,22 @@ struct IncomingTxBatch { signature: Option, } +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "status", rename_all = "camelCase")] +pub enum TxReceipt { + /// The transaction is awaiting execution in the memorypool. + Pending, + /// The transaction has been executed, but the block containing this transaction has not + /// yet been committed. + Executed, + /// The block which contains this transaction has been committed. + Committed { block: BlockNumber }, + /// The block which contains this transaction has been verified. + Verified { block: BlockNumber }, + /// The transaction has been rejected for some reasons. + Rejected { reason: Option }, +} + // Client implementation /// Transactions API part. @@ -124,10 +247,106 @@ impl Client { .send() .await } + + /// Gets actual transaction receipt. + pub async fn tx_status(&self, tx_hash: TxHash) -> Result, ClientError> { + self.get(&format!("transactions/{}", tx_hash.to_string())) + .send() + .await + } + + /// Gets transaction content. + pub async fn tx_data(&self, tx_hash: TxHash) -> Result, ClientError> { + self.get(&format!("transactions/{}/data", tx_hash.to_string())) + .send() + .await + } + + /// Gets transaction receipt by ID. + pub async fn tx_receipt_by_id( + &self, + tx_hash: TxHash, + receipt_id: u32, + ) -> Result, ClientError> { + self.get(&format!( + "transactions/{}/receipts/{}", + tx_hash.to_string(), + receipt_id + )) + .send() + .await + } + + /// Gets transaction receipts. + pub async fn tx_receipts( + &self, + tx_hash: TxHash, + from: Pagination, + limit: BlockNumber, + ) -> Result, ClientError> { + self.get(&format!("transactions/{}/receipts", tx_hash.to_string())) + .query(&from.into_query(limit)) + .send() + .await + } } // Server implementation +async fn tx_status( + data: web::Data, + web::Path(tx_hash): web::Path, +) -> JsonResult> { + let tx_status = data.tx_status(tx_hash).await.map_err(ApiError::internal)?; + + Ok(Json(tx_status)) +} + +async fn tx_data( + data: web::Data, + web::Path(tx_hash): web::Path, +) -> JsonResult> { + let tx_data = data.tx_data(tx_hash).await.map_err(ApiError::internal)?; + + Ok(Json(tx_data)) +} + +async fn tx_receipt_by_id( + data: web::Data, + web::Path((tx_hash, receipt_id)): web::Path<(TxHash, u32)>, +) -> JsonResult> { + // At the moment we store only last receipt, so this endpoint is just only a stub. + if receipt_id > 0 { + return Ok(Json(None)); + } + + let tx_status = data.tx_status(tx_hash).await.map_err(ApiError::internal)?; + + Ok(Json(tx_status)) +} + +async fn tx_receipts( + data: web::Data, + web::Path(tx_hash): web::Path, + web::Query(pagination): web::Query, +) -> JsonResult> { + let (pagination, _limit) = pagination.into_inner()?; + // At the moment we store only last receipt, so this endpoint is just only a stub. + let is_some = match pagination { + Pagination::Before(before) if before < 1 => false, + Pagination::After(_after) => false, + _ => true, + }; + + if is_some { + let tx_status = data.tx_status(tx_hash).await.map_err(ApiError::internal)?; + + Ok(Json(tx_status.into_iter().collect())) + } else { + Ok(Json(vec![])) + } +} + async fn submit_tx( data: web::Data, Json(body): Json, @@ -169,6 +388,13 @@ pub fn api_scope(tx_sender: TxSender) -> Scope { web::scope("transactions") .data(data) + .route("{tx_hash}", web::get().to(tx_status)) + .route("{tx_hash}/data", web::get().to(tx_data)) + .route( + "{tx_hash}/receipts/{receipt_id}", + web::get().to(tx_receipt_by_id), + ) + .route("{tx_hash}/receipts", web::get().to(tx_receipts)) .route("submit", web::post().to(submit_tx)) .route("submit/batch", web::post().to(submit_tx_batch)) } @@ -180,14 +406,19 @@ mod tests { use bigdecimal::BigDecimal; use futures::{channel::mpsc, prelude::*}; use num::BigUint; - use zksync_types::SignedZkSyncTx; + use zksync_storage::ConnectionPool; + use zksync_test_account::ZkSyncAccount; + use zksync_types::{tokens::TokenLike, tx::PackedEthSignature, SignedZkSyncTx}; - use super::{super::test_utils::TestServerConfig, *}; + use super::{ + super::test_utils::{TestServerConfig, TestTransactions}, + *, + }; use crate::{ + api_server::rest::helpers::try_parse_tx_hash, core_api_client::CoreApiClient, fee_ticker::{Fee, OutputFeeType::Withdraw, TickerRequest}, - signature_checker::VerifiedTx, - signature_checker::VerifyTxSignatureRequest, + signature_checker::{VerifiedTx, VerifyTxSignatureRequest}, }; fn submit_txs_loopback() -> (CoreApiClient, actix_web::test::TestServer) { @@ -235,6 +466,15 @@ mod tests { response.send(price).expect("Unable to send response"); } + TickerRequest::IsTokenAllowed { token, response } => { + // For test purposes, PHNX token is not allowed. + let is_phnx = match token { + TokenLike::Id(id) => id == 1, + TokenLike::Symbol(sym) => sym == "PHNX", + TokenLike::Address(_) => unreachable!(), + }; + response.send(Ok(!is_phnx)).unwrap_or_default(); + } } } }); @@ -260,6 +500,7 @@ mod tests { struct TestServer { core_server: actix_web::test::TestServer, api_server: actix_web::test::TestServer, + pool: ConnectionPool, } impl TestServer { @@ -267,6 +508,7 @@ mod tests { let (core_client, core_server) = submit_txs_loopback(); let cfg = TestServerConfig::default(); + let pool = cfg.pool.clone(); cfg.fill_database().await?; let sign_verifier = dummy_sign_verifier(); @@ -278,7 +520,7 @@ mod tests { cfg.pool.clone(), sign_verifier.clone(), fee_ticker.clone(), - &cfg.env_options, + &cfg.api_server_options, )) }); @@ -287,6 +529,7 @@ mod tests { Self { core_server, api_server, + pool, }, )) } @@ -302,7 +545,7 @@ mod tests { let (core_client, core_server) = submit_txs_loopback(); let signed_tx = SignedZkSyncTx { - tx: TestServerConfig::gen_zk_txs(0)[0].0.clone(), + tx: TestServerConfig::gen_zk_txs(0).txs[0].0.clone(), eth_sign_data: None, }; @@ -317,13 +560,103 @@ mod tests { async fn test_transactions_scope() -> anyhow::Result<()> { let (client, server) = TestServer::new().await?; + let committed_tx_hash = { + let mut storage = server.pool.access_storage().await?; + + let transactions = storage + .chain() + .block_schema() + .get_block_transactions(1) + .await?; + + try_parse_tx_hash(&transactions[0].tx_hash).unwrap() + }; + + // Tx receipt by ID. + let unknown_tx_hash = TxHash::default(); + assert!(client + .tx_receipt_by_id(committed_tx_hash, 0) + .await? + .is_some()); + assert!(client + .tx_receipt_by_id(committed_tx_hash, 1) + .await? + .is_none()); + assert!(client.tx_receipt_by_id(unknown_tx_hash, 0).await?.is_none()); + + // Tx receipts. + let queries = vec![ + ( + (committed_tx_hash, Pagination::Before(1), 1), + vec![TxReceipt::Verified { block: 1 }], + ), + ( + (committed_tx_hash, Pagination::Last, 1), + vec![TxReceipt::Verified { block: 1 }], + ), + ( + (committed_tx_hash, Pagination::Before(2), 1), + vec![TxReceipt::Verified { block: 1 }], + ), + ((committed_tx_hash, Pagination::After(0), 1), vec![]), + ((unknown_tx_hash, Pagination::Last, 1), vec![]), + ]; + + for (query, expected_response) in queries { + let actual_response = client.tx_receipts(query.0, query.1, query.2).await?; + + assert_eq!( + actual_response, + expected_response, + "tx: {} from: {:?} limit: {:?}", + query.0.to_string(), + query.1, + query.2 + ); + } + + // Tx status and data for committed transaction. + assert_eq!( + client.tx_status(committed_tx_hash).await?, + Some(TxReceipt::Verified { block: 1 }) + ); + assert_eq!( + client.tx_data(committed_tx_hash).await?.unwrap().hash(), + committed_tx_hash + ); + + // Tx status and data for pending transaction. + let tx_hash = { + let mut storage = server.pool.access_storage().await?; + + let tx = TestServerConfig::gen_zk_txs(1_u64).txs[0].0.clone(); + let tx_hash = tx.hash(); + storage + .chain() + .mempool_schema() + .insert_tx(&SignedZkSyncTx { + tx, + eth_sign_data: None, + }) + .await?; + + tx_hash + }; + assert_eq!(client.tx_status(tx_hash).await?, Some(TxReceipt::Pending)); + assert_eq!(client.tx_data(tx_hash).await?.unwrap().hash(), tx_hash); + + // Tx status for unknown transaction. + let tx_hash = TestServerConfig::gen_zk_txs(1_u64).txs[1].0.hash(); + assert_eq!(client.tx_status(tx_hash).await?, None); + assert!(client.tx_data(tx_hash).await?.is_none()); + // Submit correct transaction. - let tx = TestServerConfig::gen_zk_txs(1_00)[0].0.clone(); + let tx = TestServerConfig::gen_zk_txs(1_00).txs[0].0.clone(); let expected_tx_hash = tx.hash(); assert_eq!(client.submit_tx(tx, None, None).await?, expected_tx_hash); // Submit transaction without fee. - let tx = TestServerConfig::gen_zk_txs(0)[0].0.clone(); + let tx = TestServerConfig::gen_zk_txs(0).txs[0].0.clone(); assert!(client .submit_tx(tx, None, None) .await @@ -332,7 +665,8 @@ mod tests { .contains("Transaction fee is too low")); // Submit correct transactions batch. - let (txs, tx_hashes): (Vec<_>, Vec<_>) = TestServerConfig::gen_zk_txs(1_00) + let TestTransactions { acc, txs } = TestServerConfig::gen_zk_txs(1_00); + let (txs, tx_hashes): (Vec<_>, Vec<_>) = txs .into_iter() .map(|(tx, _op)| { let tx_hash = tx.hash(); @@ -340,19 +674,103 @@ mod tests { }) .unzip(); - let signature: TxEthSignature = serde_json::from_value( - serde_json::json!({ - "type": "EthereumSignature", - "signature": "0x080d5db7ab0ef71a31c2919cbe48e5a8c0b28812f8fefffff9231ba8b6d7396773780b783e65d214db162d1471854916f8608c84eba6ea0fbcbe19f9a8b9a8311b", - }) - ).unwrap(); + let batch_message = crate::api_server::tx_sender::get_batch_sign_message(txs.iter()); + let signature = PackedEthSignature::sign(&acc.eth_private_key, &batch_message).unwrap(); assert_eq!( - client.submit_tx_batch(txs, Some(signature)).await?, + client + .submit_tx_batch(txs, Some(TxEthSignature::EthereumSignature(signature))) + .await?, tx_hashes ); server.stop().await; Ok(()) } + + /// This test checks the following criteria: + /// + /// - Attempt to pay fees in an inappropriate token fails for single txs. + /// - Attempt to pay fees in an inappropriate token fails for single batch. + /// - Batch with an inappropriate token still can be processed if the fee is covered with a common token. + #[actix_rt::test] + async fn test_bad_fee_token() -> anyhow::Result<()> { + let (client, server) = TestServer::new().await?; + + let from = ZkSyncAccount::rand(); + from.set_account_id(Some(0xdead)); + let to = ZkSyncAccount::rand(); + + // Submit transaction with a fee token that is not allowed. + let (tx, eth_sig) = from.sign_transfer( + 1, + "PHNX", + 100u64.into(), + 100u64.into(), + &to.address, + 0.into(), + false, + ); + let transfer_bad_token = ZkSyncTx::Transfer(Box::new(tx)); + assert!(client + .submit_tx( + transfer_bad_token.clone(), + Some(TxEthSignature::EthereumSignature(eth_sig)), + None + ) + .await + .unwrap_err() + .to_string() + .contains("Chosen token is not suitable for paying fees")); + + // Prepare batch and make the same mistake. + let bad_batch = vec![transfer_bad_token.clone(), transfer_bad_token]; + let batch_message = crate::api_server::tx_sender::get_batch_sign_message(bad_batch.iter()); + let eth_sig = PackedEthSignature::sign(&from.eth_private_key, &batch_message).unwrap(); + assert!(client + .submit_tx_batch(bad_batch, Some(TxEthSignature::EthereumSignature(eth_sig)),) + .await + .unwrap_err() + .to_string() + .contains("Chosen token is not suitable for paying fees")); + + // Finally, prepare the batch in which fee is covered by the supported token. + let (tx, _) = from.sign_transfer( + 1, + "PHNX", + 100u64.into(), + 0u64.into(), // Note that fee is zero, which is OK. + &to.address, + 0.into(), + false, + ); + let phnx_transfer = ZkSyncTx::Transfer(Box::new(tx)); + let phnx_transfer_hash = phnx_transfer.hash(); + let (tx, _) = from.sign_transfer( + 0, + "ETH", + 0u64.into(), + 200u64.into(), // Here we pay fees for both transfers in ETH. + &to.address, + 0.into(), + false, + ); + let fee_tx = ZkSyncTx::Transfer(Box::new(tx)); + let fee_tx_hash = fee_tx.hash(); + + let good_batch = vec![phnx_transfer, fee_tx]; + let good_batch_hashes = vec![phnx_transfer_hash, fee_tx_hash]; + let batch_message = crate::api_server::tx_sender::get_batch_sign_message(good_batch.iter()); + let eth_sig = PackedEthSignature::sign(&from.eth_private_key, &batch_message).unwrap(); + + assert_eq!( + client + .submit_tx_batch(good_batch, Some(TxEthSignature::EthereumSignature(eth_sig))) + .await?, + good_batch_hashes + ); + + server.stop().await; + Ok(()) + } } diff --git a/core/bin/zksync_api/src/api_server/rpc_server/error.rs b/core/bin/zksync_api/src/api_server/rpc_server/error.rs index e18c7b1385..1ace3d1283 100644 --- a/core/bin/zksync_api/src/api_server/rpc_server/error.rs +++ b/core/bin/zksync_api/src/api_server/rpc_server/error.rs @@ -9,6 +9,7 @@ pub enum RpcErrorCodes { NonceMismatch = 101, IncorrectTx = 103, FeeTooLow = 104, + InappropriateFeeToken = 105, MissingEthSignature = 200, EIP1271SignatureVerificationFail = 201, @@ -73,6 +74,11 @@ impl From for jsonrpc_core::Error { message: inner.to_string(), data: None, }, + SubmitError::InappropriateFeeToken => Self { + code: RpcErrorCodes::InappropriateFeeToken.into(), + message: inner.to_string(), + data: None, + }, SubmitError::CommunicationCoreServer(reason) => Self { code: RpcErrorCodes::Other.into(), message: "Error communicating core server".to_string(), diff --git a/core/bin/zksync_api/src/api_server/rpc_server/mod.rs b/core/bin/zksync_api/src/api_server/rpc_server/mod.rs index 52e1f1273a..3fdb49e791 100644 --- a/core/bin/zksync_api/src/api_server/rpc_server/mod.rs +++ b/core/bin/zksync_api/src/api_server/rpc_server/mod.rs @@ -1,15 +1,14 @@ +// Built-in uses +use std::time::Instant; // External uses use futures::{ - channel::{ - mpsc, - oneshot::{self}, - }, + channel::{mpsc, oneshot}, SinkExt, }; use jsonrpc_core::{Error, IoHandler, MetaIoHandler, Metadata, Middleware, Result}; use jsonrpc_http_server::ServerBuilder; // Workspace uses -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_storage::{ chain::{ block::records::BlockDetails, operations::records::StoredExecutedPriorityOperation, @@ -63,22 +62,23 @@ pub struct RpcApp { impl RpcApp { pub fn new( - config_options: &ConfigurationOptions, connection_pool: ConnectionPool, sign_verify_request_sender: mpsc::Sender, ticker_request_sender: mpsc::Sender, + config_options: &ConfigurationOptions, + api_server_options: &ApiServerOptions, ) -> Self { let runtime_handle = tokio::runtime::Handle::try_current() .expect("RpcApp must be created from the context of Tokio Runtime"); - let api_requests_caches_size = config_options.api_requests_caches_size; + let api_requests_caches_size = api_server_options.api_requests_caches_size; let confirmations_for_eth_event = config_options.confirmations_for_eth_event; let tx_sender = TxSender::new( connection_pool, sign_verify_request_sender, ticker_request_sender, - config_options, + api_server_options, ); RpcApp { @@ -111,6 +111,7 @@ impl RpcApp { /// Async version of `get_ongoing_deposits` which does not use old futures as a return type. async fn get_ongoing_deposits_impl(&self, address: Address) -> Result { + let start = Instant::now(); let confirmations_for_eth_event = self.confirmations_for_eth_event; let ongoing_ops = @@ -140,6 +141,7 @@ impl RpcApp { None }; + metrics::histogram!("api.rpc.get_ongoing_deposits", start.elapsed()); Ok(OngoingDepositsResp { address, deposits, @@ -153,6 +155,7 @@ impl RpcApp { &self, serial_id: u32, ) -> Result> { + let start = Instant::now(); let res = if let Some(executed_op) = self.cache_of_executed_priority_operations.get(&serial_id) { Some(executed_op) @@ -164,14 +167,7 @@ impl RpcApp { .get_executed_priority_operation(serial_id) .await .map_err(|err| { - log::warn!( - "[{}:{}:{}] Internal Server Error: '{}'; input: {}", - file!(), - line!(), - column!(), - err, - serial_id, - ); + vlog::warn!("Internal Server Error: '{}'; input: {}", err, serial_id); Error::internal_error() })?; @@ -182,10 +178,13 @@ impl RpcApp { executed_op }; + + metrics::histogram!("api.rpc.get_executed_priority_operation", start.elapsed()); Ok(res) } async fn get_block_info(&self, block_number: i64) -> Result> { + let start = Instant::now(); let res = if let Some(block) = self.cache_of_blocks_info.get(&block_number) { Some(block) } else { @@ -205,10 +204,13 @@ impl RpcApp { block }; + + metrics::histogram!("api.rpc.get_block_info", start.elapsed()); Ok(res) } async fn get_tx_receipt(&self, tx_hash: TxHash) -> Result> { + let start = Instant::now(); let res = if let Some(tx_receipt) = self .cache_of_transaction_receipts .get(&tx_hash.as_ref().to_vec()) @@ -222,13 +224,10 @@ impl RpcApp { .tx_receipt(tx_hash.as_ref()) .await .map_err(|err| { - log::warn!( - "[{}:{}:{}] Internal Server Error: '{}'; input: {}", - file!(), - line!(), - column!(), + vlog::warn!( + "Internal Server Error: '{}'; input: {}", err, - tx_hash.to_string(), + tx_hash.to_string() ); Error::internal_error() })?; @@ -242,9 +241,32 @@ impl RpcApp { tx_receipt }; + + metrics::histogram!("api.rpc.get_tx_receipt", start.elapsed()); Ok(res) } + async fn token_allowed_for_fees( + mut ticker_request_sender: mpsc::Sender, + token: TokenLike, + ) -> Result { + let (sender, receiver) = oneshot::channel(); + ticker_request_sender + .send(TickerRequest::IsTokenAllowed { + token: token.clone(), + response: sender, + }) + .await + .expect("ticker receiver dropped"); + receiver + .await + .expect("ticker answer sender dropped") + .map_err(|err| { + vlog::warn!("Internal Server Error: '{}'; input: {:?}", err, token); + Error::internal_error() + }) + } + async fn ticker_request( mut ticker_request_sender: mpsc::Sender, tx_type: TxFeeTypes, @@ -254,7 +276,7 @@ impl RpcApp { let req = oneshot::channel(); ticker_request_sender .send(TickerRequest::GetTxFee { - tx_type: tx_type.clone(), + tx_type, address, token: token.clone(), response: req.0, @@ -263,11 +285,8 @@ impl RpcApp { .expect("ticker receiver dropped"); let resp = req.1.await.expect("ticker answer sender dropped"); resp.map_err(|err| { - log::warn!( - "[{}:{}:{}] Internal Server Error: '{}'; input: {:?}, {:?}", - file!(), - line!(), - column!(), + vlog::warn!( + "Internal Server Error: '{}'; input: {:?}, {:?}", err, tx_type, token, @@ -292,19 +311,13 @@ impl RpcApp { .expect("ticker receiver dropped"); let resp = req.1.await.expect("ticker answer sender dropped"); resp.map_err(|err| { - log::warn!( - "[{}:{}:{}] Internal Server Error: '{}'; input: {:?}", - file!(), - line!(), - column!(), - err, - token, - ); + vlog::warn!("Internal Server Error: '{}'; input: {:?}", err, token); Error::internal_error() }) } async fn get_account_state(&self, address: &Address) -> Result { + let start = Instant::now(); let mut storage = self.access_storage().await?; let account_info = storage .chain() @@ -319,10 +332,10 @@ impl RpcApp { verified: Default::default(), }; - if let Some((account_id, commited_state)) = account_info.committed { + if let Some((account_id, committed_state)) = account_info.committed { result.account_id = Some(account_id); result.committed = - ResponseAccountState::try_restore(commited_state, &self.tx_sender.tokens).await?; + ResponseAccountState::try_restore(committed_state, &self.tx_sender.tokens).await?; }; if let Some((_, verified_state)) = account_info.verified { @@ -330,6 +343,7 @@ impl RpcApp { ResponseAccountState::try_restore(verified_state, &self.tx_sender.tokens).await?; }; + metrics::histogram!("api.rpc.get_account_state", start.elapsed()); Ok(result) } @@ -369,19 +383,21 @@ impl RpcApp { #[allow(clippy::too_many_arguments)] pub fn start_rpc_server( - config_options: ConfigurationOptions, connection_pool: ConnectionPool, sign_verify_request_sender: mpsc::Sender, ticker_request_sender: mpsc::Sender, panic_notify: mpsc::Sender, + config_options: ConfigurationOptions, + api_server_options: ApiServerOptions, ) { - let addr = config_options.json_rpc_http_server_address; + let addr = api_server_options.json_rpc_http_server_address; let rpc_app = RpcApp::new( - &config_options, connection_pool, sign_verify_request_sender, ticker_request_sender, + &config_options, + &api_server_options, ); std::thread::spawn(move || { let _panic_sentinel = ThreadPanicNotify(panic_notify); diff --git a/core/bin/zksync_api/src/api_server/rpc_server/rpc_impl.rs b/core/bin/zksync_api/src/api_server/rpc_server/rpc_impl.rs index c39f882788..bb8ef1d3df 100644 --- a/core/bin/zksync_api/src/api_server/rpc_server/rpc_impl.rs +++ b/core/bin/zksync_api/src/api_server/rpc_server/rpc_impl.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::time::Instant; // External uses use jsonrpc_core::{Error, Result}; use num::BigUint; @@ -10,16 +11,17 @@ use zksync_types::{ }; // Local uses -use crate::fee_ticker::{BatchFee, Fee, TokenPriceRequestType}; +use crate::{ + api_server::tx_sender::SubmitError, + fee_ticker::{BatchFee, Fee, TokenPriceRequestType}, +}; use bigdecimal::BigDecimal; use super::{error::*, types::*, RpcApp}; impl RpcApp { pub async fn _impl_account_info(self, address: Address) -> Result { - use std::time::Instant; - - let started = Instant::now(); + let start = Instant::now(); let account_state = self.get_account_state(&address).await?; @@ -31,9 +33,10 @@ impl RpcApp { log::trace!( "account_info: address {}, total request processing {}ms", &address, - started.elapsed().as_millis() + start.elapsed().as_millis() ); + metrics::histogram!("api.rpc.account_info", start.elapsed()); Ok(AccountInfoResp { address, id: account_state.account_id, @@ -44,8 +47,9 @@ impl RpcApp { } pub async fn _impl_ethop_info(self, serial_id: u32) -> Result { + let start = Instant::now(); let executed_op = self.get_executed_priority_operation(serial_id).await?; - Ok(if let Some(executed_op) = executed_op { + let result = if let Some(executed_op) = executed_op { let block = self.get_block_info(executed_op.block_number).await?; ETHOpInfoResp { executed: true, @@ -60,7 +64,10 @@ impl RpcApp { executed: false, block: None, } - }) + }; + + metrics::histogram!("api.rpc.ethop_info", start.elapsed()); + Ok(result) } pub async fn _impl_get_confirmations_for_eth_op_amount(self) -> Result { @@ -68,7 +75,9 @@ impl RpcApp { } pub async fn _impl_tx_info(self, tx_hash: TxHash) -> Result { + let start = Instant::now(); let stored_receipt = self.get_tx_receipt(tx_hash).await?; + metrics::histogram!("api.rpc.tx_info", start.elapsed()); Ok(if let Some(stored_receipt) = stored_receipt { TransactionInfoResp { executed: true, @@ -96,10 +105,14 @@ impl RpcApp { signature: Box>, fast_processing: Option, ) -> Result { - self.tx_sender + let start = Instant::now(); + let result = self + .tx_sender .submit_tx(*tx, *signature, fast_processing) .await - .map_err(Error::from) + .map_err(Error::from); + metrics::histogram!("api.rpc.tx_submit", start.elapsed()); + result } pub async fn _impl_submit_txs_batch( @@ -107,13 +120,18 @@ impl RpcApp { txs: Vec, eth_signature: Option, ) -> Result> { - self.tx_sender + let start = Instant::now(); + let result = self + .tx_sender .submit_txs_batch(txs, eth_signature) .await - .map_err(Error::from) + .map_err(Error::from); + metrics::histogram!("api.rpc.submit_txs_batch", start.elapsed()); + result } pub async fn _impl_contract_address(self) -> Result { + let start = Instant::now(); let mut storage = self.access_storage().await?; let config = storage.config_schema().load_config().await.map_err(|err| { log::warn!( @@ -134,6 +152,8 @@ impl RpcApp { let gov_contract = config .gov_contract_addr .expect("Server config doesn't contain the gov contract address"); + + metrics::histogram!("api.rpc.contract_address", start.elapsed()); Ok(ContractAddressResp { main_contract, gov_contract, @@ -141,18 +161,13 @@ impl RpcApp { } pub async fn _impl_tokens(self) -> Result> { + let start = Instant::now(); let mut storage = self.access_storage().await?; let mut tokens = storage.tokens_schema().load_tokens().await.map_err(|err| { - log::warn!( - "[{}:{}:{}] Internal Server Error: '{}'; input: N/A", - file!(), - line!(), - column!(), - err - ); + log::warn!("Internal Server Error: '{}'; input: N/A", err); Error::internal_error() })?; - Ok(tokens + let result = tokens .drain() .map(|(id, token)| { if id == 0 { @@ -161,7 +176,9 @@ impl RpcApp { (token.symbol.clone(), token) } }) - .collect()) + .collect(); + metrics::histogram!("api.rpc.tokens", start.elapsed()); + Ok(result) } pub async fn _impl_get_tx_fee( @@ -170,13 +187,16 @@ impl RpcApp { address: Address, token: TokenLike, ) -> Result { - Self::ticker_request( - self.tx_sender.ticker_requests.clone(), - tx_type, - address, - token, - ) - .await + let start = Instant::now(); + let ticker = self.tx_sender.ticker_requests.clone(); + let token_allowed = Self::token_allowed_for_fees(ticker.clone(), token.clone()).await?; + if !token_allowed { + return Err(SubmitError::InappropriateFeeToken.into()); + } + + let result = Self::ticker_request(ticker.clone(), tx_type, address, token).await; + metrics::histogram!("api.rpc.get_tx_fee", start.elapsed()); + result } pub async fn _impl_get_txs_batch_fee_in_wei( @@ -185,6 +205,7 @@ impl RpcApp { addresses: Vec
, token: TokenLike, ) -> Result { + let start = Instant::now(); if tx_types.len() != addresses.len() { return Err(Error { code: RpcErrorCodes::IncorrectTx.into(), @@ -193,39 +214,45 @@ impl RpcApp { }); } - let ticker_request_sender = self.tx_sender.ticker_requests.clone(); + let ticker = self.tx_sender.ticker_requests.clone(); + let token_allowed = Self::token_allowed_for_fees(ticker.clone(), token.clone()).await?; + if !token_allowed { + return Err(SubmitError::InappropriateFeeToken.into()); + } let mut total_fee = BigUint::from(0u32); for (tx_type, address) in tx_types.iter().zip(addresses.iter()) { - total_fee += Self::ticker_request( - ticker_request_sender.clone(), - tx_type.clone(), - *address, - token.clone(), - ) - .await? - .total_fee; + let ticker = ticker.clone(); + let fee = Self::ticker_request(ticker, *tx_type, *address, token.clone()).await?; + total_fee += fee.total_fee; } // Sum of transactions can be unpackable total_fee = closest_packable_fee_amount(&total_fee); + metrics::histogram!("api.rpc.get_txs_batch_fee_in_wei", start.elapsed()); Ok(BatchFee { total_fee }) } pub async fn _impl_get_token_price(self, token: TokenLike) -> Result { - Self::ticker_price_request( + let start = Instant::now(); + let result = Self::ticker_price_request( self.tx_sender.ticker_requests.clone(), token, TokenPriceRequestType::USDForOneToken, ) - .await + .await; + metrics::histogram!("api.rpc.get_token_price", start.elapsed()); + result } pub async fn _impl_get_eth_tx_for_withdrawal( self, withdrawal_hash: TxHash, ) -> Result> { - self.eth_tx_for_withdrawal(withdrawal_hash).await + let start = Instant::now(); + let result = self.eth_tx_for_withdrawal(withdrawal_hash).await; + metrics::histogram!("api.rpc.get_eth_tx_for_withdrawal", start.elapsed()); + result } } diff --git a/core/bin/zksync_api/src/api_server/rpc_server/rpc_trait.rs b/core/bin/zksync_api/src/api_server/rpc_server/rpc_trait.rs index 7f5d4acec6..4c8cb2b128 100644 --- a/core/bin/zksync_api/src/api_server/rpc_server/rpc_trait.rs +++ b/core/bin/zksync_api/src/api_server/rpc_server/rpc_trait.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::time::Instant; // External uses use futures::{FutureExt, TryFutureExt}; use jsonrpc_core::Error; @@ -79,16 +78,13 @@ pub trait Rpc { impl Rpc for RpcApp { fn account_info(&self, addr: Address) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { handle.spawn(self_._impl_account_info(addr)).await.unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "account_info"); Box::new(resp.boxed().compat()) } fn ethop_info(&self, serial_id: u32) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -97,16 +93,13 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "ethop_info"); Box::new(resp.boxed().compat()) } fn tx_info(&self, hash: TxHash) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { handle.spawn(self_._impl_tx_info(hash)).await.unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "tx_info"); Box::new(resp.boxed().compat()) } @@ -116,7 +109,6 @@ impl Rpc for RpcApp { signature: Box>, fast_processing: Option, ) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -125,7 +117,6 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "tx_submit"); Box::new(resp.boxed().compat()) } @@ -134,7 +125,6 @@ impl Rpc for RpcApp { txs: Vec, eth_signature: Option, ) -> FutureResp> { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -143,25 +133,20 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "submit_txs_batch"); Box::new(resp.boxed().compat()) } fn contract_address(&self) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { handle.spawn(self_._impl_contract_address()).await.unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "contract_address"); Box::new(resp.boxed().compat()) } fn tokens(&self) -> FutureResp> { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { handle.spawn(self_._impl_tokens()).await.unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "tokens"); Box::new(resp.boxed().compat()) } @@ -171,7 +156,6 @@ impl Rpc for RpcApp { address: Address, token_like: TokenLike, ) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -180,7 +164,6 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "get_tx_fee"); Box::new(resp.boxed().compat()) } @@ -190,7 +173,6 @@ impl Rpc for RpcApp { addresses: Vec
, token_like: TokenLike, ) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -199,12 +181,10 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "get_txs_batch_fee_in_wei"); Box::new(resp.boxed().compat()) } fn get_token_price(&self, token_like: TokenLike) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -213,12 +193,10 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "get_token_price"); Box::new(resp.boxed().compat()) } fn get_confirmations_for_eth_op_amount(&self) -> FutureResp { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -227,12 +205,10 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "get_confirmations_for_eth_op_amount"); Box::new(resp.boxed().compat()) } fn get_eth_tx_for_withdrawal(&self, withdrawal_hash: TxHash) -> FutureResp> { - let start = Instant::now(); let handle = self.runtime_handle.clone(); let self_ = self.clone(); let resp = async move { @@ -241,7 +217,6 @@ impl Rpc for RpcApp { .await .unwrap() }; - metrics::histogram!("api", start.elapsed(), "rpc" => "get_eth_tx_for_withdrawal"); Box::new(resp.boxed().compat()) } } diff --git a/core/bin/zksync_api/src/api_server/rpc_subscriptions.rs b/core/bin/zksync_api/src/api_server/rpc_subscriptions.rs index ec572ced3a..8354e1ebd1 100644 --- a/core/bin/zksync_api/src/api_server/rpc_subscriptions.rs +++ b/core/bin/zksync_api/src/api_server/rpc_subscriptions.rs @@ -9,7 +9,7 @@ use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, PubSubHandler, Session, SubscriptionId}; use jsonrpc_ws_server::RequestContext; // Workspace uses -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_storage::ConnectionPool; use zksync_types::{tx::TxHash, ActionType, Address}; // Local uses @@ -175,16 +175,15 @@ struct RpcSubApp { #[allow(clippy::too_many_arguments)] pub fn start_ws_server( - config_options: &ConfigurationOptions, db_pool: ConnectionPool, sign_verify_request_sender: mpsc::Sender, ticker_request_sender: mpsc::Sender, panic_notify: mpsc::Sender, + config_options: ConfigurationOptions, + api_server_options: ApiServerOptions, ) { - let config_options = config_options.clone(); - let api_caches_size = config_options.api_requests_caches_size; - - let addr = config_options.json_rpc_ws_server_address; + let api_caches_size = api_server_options.api_requests_caches_size; + let addr = api_server_options.json_rpc_ws_server_address; let (event_sub_sender, event_sub_receiver) = mpsc::channel(2048); @@ -198,10 +197,11 @@ pub fn start_ws_server( ); let req_rpc_app = super::rpc_server::RpcApp::new( - &config_options, db_pool, sign_verify_request_sender, ticker_request_sender, + &config_options, + &api_server_options, ); std::thread::spawn(move || { diff --git a/core/bin/zksync_api/src/api_server/tx_sender.rs b/core/bin/zksync_api/src/api_server/tx_sender.rs index 5d83b682c4..280049a71f 100644 --- a/core/bin/zksync_api/src/api_server/tx_sender.rs +++ b/core/bin/zksync_api/src/api_server/tx_sender.rs @@ -1,7 +1,7 @@ //! Helper module to submit transactions into the zkSync Network. // Built-in uses -use std::fmt::Display; +use std::{fmt::Display, str::FromStr}; // External uses use bigdecimal::BigDecimal; @@ -10,11 +10,11 @@ use futures::{ channel::{mpsc, oneshot}, prelude::*, }; -use num::{bigint::ToBigInt, BigUint}; +use num::bigint::ToBigInt; use thiserror::Error; // Workspace uses -use zksync_config::ConfigurationOptions; +use zksync_config::ApiServerOptions; use zksync_storage::ConnectionPool; use zksync_types::{ tx::EthSignData, @@ -58,6 +58,8 @@ pub enum SubmitError { IncorrectTx(String), #[error("Transaction adding error: {0}.")] TxAdd(TxAddError), + #[error("Chosen token is not suitable for paying fees.")] + InappropriateFeeToken, #[error("Communication error with the core server: {0}.")] CommunicationCoreServer(String), @@ -101,16 +103,16 @@ impl TxSender { connection_pool: ConnectionPool, sign_verify_request_sender: mpsc::Sender, ticker_request_sender: mpsc::Sender, - config_options: &ConfigurationOptions, + api_server_options: &ApiServerOptions, ) -> Self { - let core_api_client = CoreApiClient::new(config_options.core_server_url.clone()); + let core_api_client = CoreApiClient::new(api_server_options.core_server_url.clone()); Self::with_client( core_api_client, connection_pool, sign_verify_request_sender, ticker_request_sender, - config_options, + api_server_options, ) } @@ -119,11 +121,11 @@ impl TxSender { connection_pool: ConnectionPool, sign_verify_request_sender: mpsc::Sender, ticker_request_sender: mpsc::Sender, - config_options: &ConfigurationOptions, + api_server_options: &ApiServerOptions, ) -> Self { - let enforce_pubkey_change_fee = config_options.enforce_pubkey_change_fee; + let enforce_pubkey_change_fee = api_server_options.enforce_pubkey_change_fee; let forced_exit_minimum_account_age = - chrono::Duration::from_std(config_options.forced_exit_minimum_account_age) + chrono::Duration::from_std(api_server_options.forced_exit_minimum_account_age) .expect("Unable to convert std::Duration to chrono::Duration"); Self { @@ -183,16 +185,29 @@ impl TxSender { let should_enforce_fee = !matches!(tx_type, TxFeeTypes::ChangePubKey{..}) || self.enforce_pubkey_change_fee; + let fee_allowed = + Self::token_allowed_for_fees(ticker_request_sender.clone(), token.clone()).await?; + + if !fee_allowed { + return Err(SubmitError::InappropriateFeeToken); + } + let required_fee = Self::ticker_request(ticker_request_sender, tx_type, address, token.clone()) .await?; - // We allow fee to be 5% off the required fee - let scaled_provided_fee = - provided_fee.clone() * BigUint::from(105u32) / BigUint::from(100u32); - if required_fee.total_fee >= scaled_provided_fee && should_enforce_fee { - vlog::warn!( - "User provided fee is too low, required: {:?}, provided: {} (scaled: {}), token: {:?}", - required_fee, provided_fee, scaled_provided_fee, token + // Converting `BitUint` to `BigInt` is safe. + let required_fee: BigDecimal = required_fee.total_fee.to_bigint().unwrap().into(); + let provided_fee: BigDecimal = provided_fee.to_bigint().unwrap().into(); + // Scaling the fee required since the price may change between signing the transaction and sending it to the server. + let scaled_provided_fee = scale_user_fee_up(provided_fee.clone()); + if required_fee >= scaled_provided_fee && should_enforce_fee { + log::error!( + "User provided fee is too low, required: {}, provided: {} (scaled: {}); difference {}, token: {:?}", + required_fee.to_string(), + provided_fee.to_string(), + scaled_provided_fee.to_string(), + (required_fee - scaled_provided_fee).to_string(), + token ); return Err(SubmitError::TxAdd(TxAddError::TxFeeTooLow)); @@ -240,16 +255,35 @@ impl TxSender { let tx_fee_info = tx.tx.get_fee_info(); if let Some((tx_type, token, address, provided_fee)) = tx_fee_info { + let fee_allowed = + Self::token_allowed_for_fees(self.ticker_requests.clone(), token.clone()) + .await?; + + // In batches, transactions with non-popular token are allowed to be included, but should not + // used to pay fees. Fees must be covered by some more common token. + if !fee_allowed && provided_fee != 0u64.into() { + return Err(SubmitError::InappropriateFeeToken); + } + + let check_token = if fee_allowed { + // For allowed tokens, we perform check in the transaction token (as expected). + token.clone() + } else { + // For non-popular tokens we've already checked that the provided fee is 0, + // and the USD price will be checked in ETH. + TokenLike::Id(0) + }; + let required_fee = Self::ticker_request( self.ticker_requests.clone(), tx_type, address, - token.clone(), + check_token.clone(), ) .await?; let token_price_in_usd = Self::ticker_price_request( self.ticker_requests.clone(), - token.clone(), + check_token.clone(), TokenPriceRequestType::USDForOneWei, ) .await?; @@ -262,10 +296,16 @@ impl TxSender { * &token_price_in_usd; } } - // We allow fee to be 5% off the required fee - let scaled_provided_fee_in_usd = - provided_total_usd_fee.clone() * BigDecimal::from(105u32) / BigDecimal::from(100u32); + // Scaling the fee required since the price may change between signing the transaction and sending it to the server. + let scaled_provided_fee_in_usd = scale_user_fee_up(provided_total_usd_fee.clone()); if required_total_usd_fee >= scaled_provided_fee_in_usd { + log::error!( + "User provided batch fee is too low, required: {}, provided: {} (scaled: {}); difference {}", + required_total_usd_fee.to_string(), + provided_total_usd_fee.to_string(), + scaled_provided_fee_in_usd.to_string(), + (required_total_usd_fee - scaled_provided_fee_in_usd).to_string(), + ); return Err(SubmitError::TxAdd(TxAddError::TxBatchFeeTooLow)); } @@ -411,7 +451,7 @@ impl TxSender { let req = oneshot::channel(); ticker_request_sender .send(TickerRequest::GetTxFee { - tx_type: tx_type.clone(), + tx_type, address, token: token.clone(), response: req.0, @@ -423,6 +463,24 @@ impl TxSender { resp.map_err(|err| internal_error!(err)) } + async fn token_allowed_for_fees( + mut ticker_request_sender: mpsc::Sender, + token: TokenLike, + ) -> Result { + let (sender, receiver) = oneshot::channel(); + ticker_request_sender + .send(TickerRequest::IsTokenAllowed { + token: token.clone(), + response: sender, + }) + .await + .expect("ticker receiver dropped"); + receiver + .await + .expect("ticker answer sender dropped") + .map_err(SubmitError::internal) + } + async fn ticker_price_request( mut ticker_request_sender: mpsc::Sender, token: TokenLike, @@ -492,6 +550,15 @@ async fn verify_tx_info_message_signature( send_verify_request_and_recv(request, req_channel, receiever).await } +pub(crate) fn get_batch_sign_message<'a, I: Iterator>(txs: I) -> Vec { + tiny_keccak::keccak256( + txs.flat_map(|tx| tx.get_bytes()) + .collect::>() + .as_slice(), + ) + .to_vec() +} + /// Send a request for Ethereum signature verification and wait for the response. /// Unlike in case of `verify_tx_info_message_signature`, we do not require /// every transaction from the batch to be signed. The signature must be obtained @@ -517,21 +584,36 @@ async fn verify_txs_batch_signature( }); } // User is expected to sign hash of the data of all transactions in the batch. - let message = tiny_keccak::keccak256( - txs.iter() - .flat_map(|tx| tx.tx.get_bytes()) - .collect::>() - .as_slice(), - ) - .to_vec(); + let message = get_batch_sign_message(txs.iter().map(|tx| &tx.tx)); let eth_sign_data = EthSignData { signature, message }; - let (sender, receiever) = oneshot::channel(); + let (sender, receiver) = oneshot::channel(); let request = VerifyTxSignatureRequest { tx: TxVariant::Batch(txs, BatchSignData(eth_sign_data)), response: sender, }; - send_verify_request_and_recv(request, req_channel, receiever).await + send_verify_request_and_recv(request, req_channel, receiver).await +} + +/// Scales the fee provided by user up to check whether the provided fee is enough to cover our expenses for +/// maintaining the protocol. +/// +/// We calculate both `provided_fee * 1.05` and `provided_fee + 1 cent` and choose the maximum. +/// This is required since the price may change between signing the transaction and sending it to the server. +fn scale_user_fee_up(provided_total_usd_fee: BigDecimal) -> BigDecimal { + // Scale by 5%. + let scaled_percent_provided_fee_in_usd = + provided_total_usd_fee.clone() * BigDecimal::from(105u32) / BigDecimal::from(100u32); + + // Scale by 1 cent. + let scaled_one_cent_provided_fee_in_usd = + provided_total_usd_fee + BigDecimal::from_str("0.01").unwrap(); + + // Choose the maximum of these two values. + std::cmp::max( + scaled_percent_provided_fee_in_usd, + scaled_one_cent_provided_fee_in_usd, + ) } diff --git a/core/bin/zksync_api/src/fee_ticker/constants.rs b/core/bin/zksync_api/src/fee_ticker/constants.rs new file mode 100644 index 0000000000..e1629e6a6c --- /dev/null +++ b/core/bin/zksync_api/src/fee_ticker/constants.rs @@ -0,0 +1,33 @@ +use zksync_types::{ + config::MAX_WITHDRAWALS_TO_COMPLETE_IN_A_CALL, + gas_counter::{CommitCost, GasCounter, VerifyCost}, + ChangePubKeyOp, TransferOp, TransferToNewOp, WithdrawOp, +}; + +// Base operation costs estimated via `gas_price` test. +// +// Factor of 1000 * CHUNKS accounts for constant overhead of the commit and verify for block of 680 chunks +// (140k + 530k) / 680. Should be removed after recursion is introduced to mainnet. +pub(crate) const BASE_TRANSFER_COST: u64 = + VerifyCost::TRANSFER_COST + CommitCost::TRANSFER_COST + 1000 * (TransferOp::CHUNKS as u64); +pub(crate) const BASE_TRANSFER_TO_NEW_COST: u64 = VerifyCost::TRANSFER_TO_NEW_COST + + CommitCost::TRANSFER_TO_NEW_COST + + 1000 * (TransferToNewOp::CHUNKS as u64); +pub(crate) const BASE_WITHDRAW_COST: u64 = VerifyCost::WITHDRAW_COST + + CommitCost::WITHDRAW_COST + + GasCounter::COMPLETE_WITHDRAWALS_COST + + 1000 * (WithdrawOp::CHUNKS as u64) + + (GasCounter::COMPLETE_WITHDRAWALS_BASE_COST / MAX_WITHDRAWALS_TO_COMPLETE_IN_A_CALL); +pub(crate) const BASE_CHANGE_PUBKEY_OFFCHAIN_COST: u64 = CommitCost::CHANGE_PUBKEY_COST_OFFCHAIN + + VerifyCost::CHANGE_PUBKEY_COST + + 1000 * (ChangePubKeyOp::CHUNKS as u64); +pub(crate) const BASE_CHANGE_PUBKEY_ONCHAIN_COST: u64 = CommitCost::CHANGE_PUBKEY_COST_ONCHAIN + + zksync_types::gas_counter::VerifyCost::CHANGE_PUBKEY_COST + + 1000 * (ChangePubKeyOp::CHUNKS as u64); + +// The Subsidized cost of operations. +// Represent the cost of performing operations after recursion is introduced to mainnet. +pub(crate) const SUBSIDY_TRANSFER_COST: u64 = 550; +pub(crate) const SUBSIDY_TRANSFER_TO_NEW_COST: u64 = 550 * 3; +pub(crate) const SUBSIDY_WITHDRAW_COST: u64 = 45000; +pub(crate) const SUBSIDY_CHANGE_PUBKEY_OFFCHAIN_COST: u64 = 10000; diff --git a/core/bin/zksync_api/src/fee_ticker/fee.rs b/core/bin/zksync_api/src/fee_ticker/fee.rs new file mode 100644 index 0000000000..2fda393402 --- /dev/null +++ b/core/bin/zksync_api/src/fee_ticker/fee.rs @@ -0,0 +1,75 @@ +// Built-in deps +// External deps +use num::{rational::Ratio, BigUint}; +use serde::{Deserialize, Serialize}; +// Workspace deps +use zksync_types::helpers::{pack_fee_amount, unpack_fee_amount}; +use zksync_utils::{round_precision, BigUintSerdeAsRadix10Str}; +// Local deps + +/// Type of the fee calculation pattern. +/// Unlike the `TxFeeTypes`, this enum represents the fee +/// from the point of zkSync view, rather than from the users +/// point of view. +/// Users do not divide transfers into `Transfer` and +/// `TransferToNew`, while in zkSync it's two different operations. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum OutputFeeType { + Transfer, + TransferToNew, + Withdraw, + FastWithdraw, + ChangePubKey { + #[serde(rename = "onchainPubkeyAuth")] + onchain_pubkey_auth: bool, + }, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Fee { + pub fee_type: OutputFeeType, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub gas_tx_amount: BigUint, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub gas_price_wei: BigUint, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub gas_fee: BigUint, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub zkp_fee: BigUint, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub total_fee: BigUint, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BatchFee { + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub total_fee: BigUint, +} + +impl Fee { + pub fn new( + fee_type: OutputFeeType, + zkp_fee: Ratio, + gas_fee: Ratio, + gas_tx_amount: BigUint, + gas_price_wei: BigUint, + ) -> Self { + let zkp_fee = round_precision(&zkp_fee, 18).ceil().to_integer(); + let gas_fee = round_precision(&gas_fee, 18).ceil().to_integer(); + + let total_fee = zkp_fee.clone() + gas_fee.clone(); + let total_fee = unpack_fee_amount(&pack_fee_amount(&total_fee)) + .expect("Failed to round gas fee amount."); + + Self { + fee_type, + gas_tx_amount, + gas_price_wei, + gas_fee, + zkp_fee, + total_fee, + } + } +} diff --git a/core/bin/zksync_api/src/fee_ticker/fee_token_validator.rs b/core/bin/zksync_api/src/fee_ticker/fee_token_validator.rs new file mode 100644 index 0000000000..2aed3e5af6 --- /dev/null +++ b/core/bin/zksync_api/src/fee_ticker/fee_token_validator.rs @@ -0,0 +1,121 @@ +//! This module contains the definition of the fee token validator, +//! an entity which decides whether certain ERC20 token is suitable for paying fees. + +// Built-in uses +use std::collections::{HashMap, HashSet}; +// Workspace uses +use zksync_types::{ + tokens::{Token, TokenLike}, + Address, +}; +// Local uses +use crate::utils::token_db_cache::TokenDBCache; + +/// Fee token validator decides whether certain ERC20 token is suitable for paying fees. +#[derive(Debug, Clone)] +pub(crate) struct FeeTokenValidator { + tokens_cache: TokenCacheWrapper, + /// List of tokens that aren't accepted to pay fees in. + disabled_tokens: HashSet
, +} + +impl FeeTokenValidator { + pub(crate) fn new( + cache: impl Into, + disabled_tokens: HashSet
, + ) -> Self { + Self { + tokens_cache: cache.into(), + disabled_tokens, + } + } + + /// Returns `true` if token can be used to pay fees. + pub(crate) async fn token_allowed(&self, token: TokenLike) -> anyhow::Result { + let token = self.resolve_token(token).await?; + + self.check_token(token).await + } + + async fn resolve_token(&self, token: TokenLike) -> anyhow::Result> { + self.tokens_cache.get_token(token).await + } + + async fn check_token(&self, token: Option) -> anyhow::Result { + // Currently we add tokens in zkSync manually, thus we can decide whether token is acceptable in before. + // Later we'll check Uniswap trading volume for tokens. That's why this function is already `async` even + // though it's not really `async` at this moment. + + if let Some(token) = token { + let not_acceptable = self.disabled_tokens.contains(&token.address); + Ok(!not_acceptable) + } else { + // Unknown tokens aren't suitable for our needs, obviously. + Ok(false) + } + } +} + +#[derive(Debug, Clone)] +pub(crate) enum TokenCacheWrapper { + DB(TokenDBCache), + Memory(HashMap), +} + +impl From for TokenCacheWrapper { + fn from(cache: TokenDBCache) -> Self { + Self::DB(cache) + } +} + +impl From> for TokenCacheWrapper { + fn from(cache: HashMap) -> Self { + Self::Memory(cache) + } +} + +impl TokenCacheWrapper { + pub async fn get_token(&self, token_like: TokenLike) -> anyhow::Result> { + match self { + Self::DB(cache) => cache.get_token(token_like).await, + Self::Memory(cache) => Ok(cache.get(&token_like).cloned()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashSet; + use std::str::FromStr; + + #[tokio::test] + async fn check_tokens() { + let dai_token_address = + Address::from_str("6b175474e89094c44da98b954eedeac495271d0f").unwrap(); + let dai_token = Token::new(1, dai_token_address, "DAI", 18); + let phnx_token_address = + Address::from_str("38A2fDc11f526Ddd5a607C1F251C065f40fBF2f7").unwrap(); + let phnx_token = Token::new(2, phnx_token_address, "PHNX", 18); + + let mut tokens = HashMap::new(); + tokens.insert(TokenLike::Address(dai_token_address), dai_token); + tokens.insert(TokenLike::Address(phnx_token_address), phnx_token); + + let mut disabled_tokens = HashSet::new(); + disabled_tokens.insert(phnx_token_address); + + let validator = FeeTokenValidator::new(tokens, disabled_tokens); + + let dai_allowed = validator + .token_allowed(TokenLike::Address(dai_token_address)) + .await + .unwrap(); + let phnx_allowed = validator + .token_allowed(TokenLike::Address(phnx_token_address)) + .await + .unwrap(); + assert_eq!(dai_allowed, true); + assert_eq!(phnx_allowed, false); + } +} diff --git a/core/bin/zksync_api/src/fee_ticker/mod.rs b/core/bin/zksync_api/src/fee_ticker/mod.rs index 143c0e73f6..e978165ca2 100644 --- a/core/bin/zksync_api/src/fee_ticker/mod.rs +++ b/core/bin/zksync_api/src/fee_ticker/mod.rs @@ -4,7 +4,7 @@ //! `( zkp cost of chunk * number of chunks + gas price of transaction) * token risk factor / cost of token is usd` // Built-in deps -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; // External deps use bigdecimal::BigDecimal; use futures::{ @@ -19,111 +19,120 @@ use num::{ use serde::{Deserialize, Serialize}; use tokio::task::JoinHandle; // Workspace deps -use zksync_config::TokenPriceSource; +use zksync_config::{FeeTickerOptions, TokenPriceSource}; use zksync_storage::ConnectionPool; use zksync_types::{ - gas_counter::{CommitCost, GasCounter, VerifyCost}, - helpers::{pack_fee_amount, unpack_fee_amount}, - Address, ChangePubKeyOp, TokenId, TokenLike, TransferOp, TransferToNewOp, TxFeeTypes, + Address, ChangePubKeyOp, Token, TokenId, TokenLike, TransferOp, TransferToNewOp, TxFeeTypes, WithdrawOp, }; -use zksync_utils::{ratio_to_big_decimal, round_precision, BigUintSerdeAsRadix10Str}; +use zksync_utils::ratio_to_big_decimal; // Local deps -use crate::fee_ticker::ticker_api::coingecko::CoinGeckoAPI; -use crate::fee_ticker::ticker_api::coinmarkercap::CoinMarketCapAPI; use crate::fee_ticker::{ - ticker_api::{FeeTickerAPI, TickerApi, CONNECTION_TIMEOUT}, + fee_token_validator::FeeTokenValidator, + ticker_api::{ + coingecko::CoinGeckoAPI, coinmarkercap::CoinMarketCapAPI, FeeTickerAPI, TickerApi, + CONNECTION_TIMEOUT, + }, ticker_info::{FeeTickerInfo, TickerInfo}, }; -use zksync_types::config::MAX_WITHDRAWALS_TO_COMPLETE_IN_A_CALL; +use crate::utils::token_db_cache::TokenDBCache; + +pub use self::fee::*; +mod constants; +mod fee; +mod fee_token_validator; mod ticker_api; mod ticker_info; -// Base operation costs estimated via `gas_price` test. -// -// Factor of 1000 * CHUNKS accounts for constant overhead of the commit and verify for block of 680 chunks -// (140k + 530k) / 680. Should be removed after recursion is introduced to mainnet. -const BASE_TRANSFER_COST: u64 = - VerifyCost::TRANSFER_COST + CommitCost::TRANSFER_COST + 1000 * (TransferOp::CHUNKS as u64); -const BASE_TRANSFER_TO_NEW_COST: u64 = VerifyCost::TRANSFER_TO_NEW_COST - + CommitCost::TRANSFER_TO_NEW_COST - + 1000 * (TransferToNewOp::CHUNKS as u64); -const BASE_WITHDRAW_COST: u64 = VerifyCost::WITHDRAW_COST - + CommitCost::WITHDRAW_COST - + GasCounter::COMPLETE_WITHDRAWALS_COST - + 1000 * (WithdrawOp::CHUNKS as u64) - + (GasCounter::COMPLETE_WITHDRAWALS_BASE_COST / MAX_WITHDRAWALS_TO_COMPLETE_IN_A_CALL); -const BASE_CHANGE_PUBKEY_OFFCHAIN_COST: u64 = CommitCost::CHANGE_PUBKEY_COST_OFFCHAIN - + VerifyCost::CHANGE_PUBKEY_COST - + 1000 * (ChangePubKeyOp::CHUNKS as u64); -const BASE_CHANGE_PUBKEY_ONCHAIN_COST: u64 = CommitCost::CHANGE_PUBKEY_COST_ONCHAIN - + zksync_types::gas_counter::VerifyCost::CHANGE_PUBKEY_COST - + 1000 * (ChangePubKeyOp::CHUNKS as u64); - -/// Type of the fee calculation pattern. -/// Unlike the `TxFeeTypes`, this enum represents the fee -/// from the point of zkSync view, rather than from the users -/// point of view. -/// Users do not divide transfers into `Transfer` and -/// `TransferToNew`, while in zkSync it's two different operations. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum OutputFeeType { - Transfer, - TransferToNew, - Withdraw, - FastWithdraw, - ChangePubKey { - #[serde(rename = "onchainPubkeyAuth")] - onchain_pubkey_auth: bool, - }, -} +#[cfg(test)] +mod tests; +/// Contains cost of zkSync operations in Wei. #[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "camelCase")] -pub struct Fee { - pub fee_type: OutputFeeType, - #[serde(with = "BigUintSerdeAsRadix10Str")] - pub gas_tx_amount: BigUint, - #[serde(with = "BigUintSerdeAsRadix10Str")] - pub gas_price_wei: BigUint, - #[serde(with = "BigUintSerdeAsRadix10Str")] - pub gas_fee: BigUint, - #[serde(with = "BigUintSerdeAsRadix10Str")] - pub zkp_fee: BigUint, - #[serde(with = "BigUintSerdeAsRadix10Str")] - pub total_fee: BigUint, +pub struct GasOperationsCost { + standard_cost: HashMap, + subsidize_cost: HashMap, } -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "camelCase")] -pub struct BatchFee { - #[serde(with = "BigUintSerdeAsRadix10Str")] - pub total_fee: BigUint, -} +impl GasOperationsCost { + pub fn from_constants(fast_processing_coeff: f64) -> Self { + // We increase gas price for fast withdrawals, since it will induce generating a smaller block + // size, resulting in us paying more gas than for bigger block. + let standard_fast_withdrawal_cost = + (constants::BASE_WITHDRAW_COST as f64 * fast_processing_coeff) as u32; + let subsidy_fast_withdrawal_cost = + (constants::SUBSIDY_WITHDRAW_COST as f64 * fast_processing_coeff) as u32; -impl Fee { - pub fn new( - fee_type: OutputFeeType, - zkp_fee: Ratio, - gas_fee: Ratio, - gas_tx_amount: BigUint, - gas_price_wei: BigUint, - ) -> Self { - let zkp_fee = round_precision(&zkp_fee, 18).ceil().to_integer(); - let gas_fee = round_precision(&gas_fee, 18).ceil().to_integer(); + let standard_cost = vec![ + ( + OutputFeeType::Transfer, + constants::BASE_TRANSFER_COST.into(), + ), + ( + OutputFeeType::TransferToNew, + constants::BASE_TRANSFER_TO_NEW_COST.into(), + ), + ( + OutputFeeType::Withdraw, + constants::BASE_WITHDRAW_COST.into(), + ), + ( + OutputFeeType::FastWithdraw, + standard_fast_withdrawal_cost.into(), + ), + ( + OutputFeeType::ChangePubKey { + onchain_pubkey_auth: false, + }, + constants::BASE_CHANGE_PUBKEY_OFFCHAIN_COST.into(), + ), + ( + OutputFeeType::ChangePubKey { + onchain_pubkey_auth: true, + }, + constants::BASE_CHANGE_PUBKEY_ONCHAIN_COST.into(), + ), + ] + .into_iter() + .collect::>(); - let total_fee = zkp_fee.clone() + gas_fee.clone(); - let total_fee = unpack_fee_amount(&pack_fee_amount(&total_fee)) - .expect("Failed to round gas fee amount."); + let subsidize_cost = vec![ + ( + OutputFeeType::Transfer, + constants::SUBSIDY_TRANSFER_COST.into(), + ), + ( + OutputFeeType::TransferToNew, + constants::SUBSIDY_TRANSFER_TO_NEW_COST.into(), + ), + ( + OutputFeeType::Withdraw, + constants::SUBSIDY_WITHDRAW_COST.into(), + ), + ( + OutputFeeType::FastWithdraw, + subsidy_fast_withdrawal_cost.into(), + ), + ( + OutputFeeType::ChangePubKey { + onchain_pubkey_auth: false, + }, + constants::SUBSIDY_CHANGE_PUBKEY_OFFCHAIN_COST.into(), + ), + ( + OutputFeeType::ChangePubKey { + onchain_pubkey_auth: true, + }, + constants::BASE_CHANGE_PUBKEY_ONCHAIN_COST.into(), + ), + ] + .into_iter() + .collect::>(); Self { - fee_type, - gas_tx_amount, - gas_price_wei, - gas_fee, - zkp_fee, - total_fee, + standard_cost, + subsidize_cost, } } } @@ -131,8 +140,9 @@ impl Fee { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct TickerConfig { zkp_cost_chunk_usd: Ratio, - gas_cost_tx: HashMap, //wei + gas_cost_tx: GasOperationsCost, tokens_risk_factors: HashMap>, + not_subsidized_tokens: HashSet
, } #[derive(Debug, PartialEq, Eq)] @@ -141,6 +151,7 @@ pub enum TokenPriceRequestType { USDForOneToken, } +#[derive(Debug)] pub enum TickerRequest { GetTxFee { tx_type: TxFeeTypes, @@ -153,6 +164,10 @@ pub enum TickerRequest { response: oneshot::Sender>, req_type: TokenPriceRequestType, }, + IsTokenAllowed { + token: TokenLike, + response: oneshot::Sender>, + }, } struct FeeTicker { @@ -160,60 +175,44 @@ struct FeeTicker { info: INFO, requests: Receiver, config: TickerConfig, + validator: FeeTokenValidator, } #[must_use] pub fn run_ticker_task( - token_price_source: TokenPriceSource, - fast_processing_coeff: f64, db_pool: ConnectionPool, tricker_requests: Receiver, ) -> JoinHandle<()> { - // We increase gas price for fast withdrawals, since it will induce generating a smaller block - // size, resulting in us paying more gas than for bigger block. - let fast_withdrawal_cost = (BASE_WITHDRAW_COST as f64 * fast_processing_coeff) as u32; + let config = FeeTickerOptions::from_env(); let ticker_config = TickerConfig { zkp_cost_chunk_usd: Ratio::from_integer(BigUint::from(10u32).pow(3u32)).inv(), - gas_cost_tx: vec![ - (OutputFeeType::Transfer, BASE_TRANSFER_COST.into()), - ( - OutputFeeType::TransferToNew, - BASE_TRANSFER_TO_NEW_COST.into(), - ), - (OutputFeeType::Withdraw, BASE_WITHDRAW_COST.into()), - (OutputFeeType::FastWithdraw, fast_withdrawal_cost.into()), - ( - OutputFeeType::ChangePubKey { - onchain_pubkey_auth: false, - }, - BASE_CHANGE_PUBKEY_OFFCHAIN_COST.into(), - ), - ( - OutputFeeType::ChangePubKey { - onchain_pubkey_auth: true, - }, - BASE_CHANGE_PUBKEY_ONCHAIN_COST.into(), - ), - ] - .into_iter() - .collect(), + gas_cost_tx: GasOperationsCost::from_constants(config.fast_processing_coeff), tokens_risk_factors: HashMap::new(), + not_subsidized_tokens: config.not_subsidized_tokens, }; + let cache = TokenDBCache::new(db_pool.clone()); + let validator = FeeTokenValidator::new(cache, config.disabled_tokens); + let client = reqwest::ClientBuilder::new() .timeout(CONNECTION_TIMEOUT) .connect_timeout(CONNECTION_TIMEOUT) .build() .expect("Failed to build reqwest::Client"); - match token_price_source { + match config.token_price_source { TokenPriceSource::CoinMarketCap { base_url } => { let token_price_api = CoinMarketCapAPI::new(client, base_url); let ticker_api = TickerApi::new(db_pool.clone(), token_price_api); let ticker_info = TickerInfo::new(db_pool); - let fee_ticker = - FeeTicker::new(ticker_api, ticker_info, tricker_requests, ticker_config); + let fee_ticker = FeeTicker::new( + ticker_api, + ticker_info, + tricker_requests, + ticker_config, + validator, + ); tokio::spawn(fee_ticker.run()) } @@ -223,8 +222,13 @@ pub fn run_ticker_task( let ticker_api = TickerApi::new(db_pool.clone(), token_price_api); let ticker_info = TickerInfo::new(db_pool); - let fee_ticker = - FeeTicker::new(ticker_api, ticker_info, tricker_requests, ticker_config); + let fee_ticker = FeeTicker::new( + ticker_api, + ticker_info, + tricker_requests, + ticker_config, + validator, + ); tokio::spawn(fee_ticker.run()) } @@ -232,12 +236,19 @@ pub fn run_ticker_task( } impl FeeTicker { - fn new(api: API, info: INFO, requests: Receiver, config: TickerConfig) -> Self { + fn new( + api: API, + info: INFO, + requests: Receiver, + config: TickerConfig, + validator: FeeTokenValidator, + ) -> Self { Self { api, info, requests, config, + validator, } } @@ -263,6 +274,10 @@ impl FeeTicker { let price = self.get_token_price(token, req_type).await; response.send(price).unwrap_or_default(); } + TickerRequest::IsTokenAllowed { token, response } => { + let allowed = self.validator.token_allowed(token).await; + response.send(allowed).unwrap_or_default(); + } } } } @@ -270,9 +285,9 @@ impl FeeTicker { async fn get_token_price( &self, token: TokenLike, - req_rype: TokenPriceRequestType, + request_type: TokenPriceRequestType, ) -> Result { - let factor = match req_rype { + let factor = match request_type { TokenPriceRequestType::USDForOneWei => { let token_decimals = self.api.get_token(token.clone()).await?.decimals; BigUint::from(10u32).pow(u32::from(token_decimals)) @@ -291,6 +306,11 @@ impl FeeTicker { self.info.is_account_new(address).await } + /// Returns `true` if the token is subsidized. + async fn is_token_subsidized(&mut self, token: Token) -> bool { + !self.config.not_subsidized_tokens.contains(&token.address) + } + async fn get_fee_from_ticker_in_wei( &mut self, tx_type: TxFeeTypes, @@ -327,7 +347,24 @@ impl FeeTicker { }; // Convert chunks amount to `BigUint`. let op_chunks = BigUint::from(op_chunks); - let gas_tx_amount = self.config.gas_cost_tx.get(&fee_type).cloned().unwrap(); + let gas_tx_amount = { + let is_token_subsidized = self.is_token_subsidized(token.clone()).await; + if is_token_subsidized { + self.config + .gas_cost_tx + .subsidize_cost + .get(&fee_type) + .cloned() + .unwrap() + } else { + self.config + .gas_cost_tx + .standard_cost + .get(&fee_type) + .cloned() + .unwrap() + } + }; let gas_price_wei = self.api.get_gas_price_wei().await?; let wei_price_usd = self.api.get_last_quote(TokenLike::Id(0)).await?.usd_price / BigUint::from(10u32).pow(18u32); @@ -354,241 +391,3 @@ impl FeeTicker { )) } } - -#[cfg(test)] -mod test { - use super::*; - use async_trait::async_trait; - use bigdecimal::BigDecimal; - use chrono::Utc; - use futures::channel::mpsc; - use futures::executor::block_on; - use std::str::FromStr; - use zksync_types::{Address, Token, TokenId, TokenPrice}; - use zksync_utils::{ratio_to_big_decimal, UnsignedRatioSerializeAsDecimal}; - - const TEST_FAST_WITHDRAW_COEFF: u64 = 10; - - #[derive(Debug, Clone)] - struct TestToken { - id: TokenId, - price_usd: Ratio, - risk_factor: Option>, - precision: u8, - } - - impl TestToken { - fn new(id: TokenId, price_usd: f64, risk_factor: Option, precision: u8) -> Self { - Self { - id, - price_usd: UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot( - &price_usd.to_string(), - ) - .unwrap(), - risk_factor: risk_factor.map(|risk_factor| { - UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot( - &risk_factor.to_string(), - ) - .unwrap() - }), - precision, - } - } - - fn risk_factor(&self) -> Ratio { - self.risk_factor - .clone() - .unwrap_or_else(|| Ratio::from_integer(1u32.into())) - } - - fn eth() -> Self { - Self::new(0, 182.0, None, 18) - } - - fn cheap() -> Self { - Self::new(1, 1.0, Some(2.5), 6) - } - fn expensive() -> Self { - Self::new(2, 173_134.192_3, Some(0.9), 18) - } - - fn all_tokens() -> Vec { - vec![Self::eth(), Self::cheap(), Self::expensive()] - } - } - - fn get_test_ticker_config() -> TickerConfig { - TickerConfig { - zkp_cost_chunk_usd: UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot( - "0.001", - ) - .unwrap(), - gas_cost_tx: vec![ - (OutputFeeType::Transfer, BigUint::from(BASE_TRANSFER_COST)), - ( - OutputFeeType::TransferToNew, - BigUint::from(BASE_TRANSFER_TO_NEW_COST), - ), - (OutputFeeType::Withdraw, BigUint::from(BASE_WITHDRAW_COST)), - ( - OutputFeeType::FastWithdraw, - BigUint::from(BASE_WITHDRAW_COST * TEST_FAST_WITHDRAW_COEFF), - ), - ( - OutputFeeType::ChangePubKey { - onchain_pubkey_auth: false, - }, - BASE_CHANGE_PUBKEY_OFFCHAIN_COST.into(), - ), - ( - OutputFeeType::ChangePubKey { - onchain_pubkey_auth: true, - }, - BASE_CHANGE_PUBKEY_ONCHAIN_COST.into(), - ), - ] - .into_iter() - .collect(), - tokens_risk_factors: TestToken::all_tokens() - .into_iter() - .filter_map(|t| { - let id = t.id; - t.risk_factor.map(|risk| (id, risk)) - }) - .collect(), - } - } - - struct MockApiProvider; - #[async_trait] - impl FeeTickerAPI for MockApiProvider { - async fn get_last_quote(&self, token: TokenLike) -> Result { - for test_token in TestToken::all_tokens() { - if TokenLike::Id(test_token.id) == token { - let token_price = TokenPrice { - usd_price: test_token.price_usd, - last_updated: Utc::now(), - }; - return Ok(token_price); - } - } - unreachable!("incorrect token input") - } - - /// Get current gas price in ETH - async fn get_gas_price_wei(&self) -> Result { - Ok(BigUint::from(10u32).pow(7u32)) // 10 GWei - } - - async fn get_token(&self, token: TokenLike) -> Result { - for test_token in TestToken::all_tokens() { - if TokenLike::Id(test_token.id) == token { - return Ok(Token::new( - test_token.id, - Address::default(), - "", - test_token.precision, - )); - } - } - unreachable!("incorrect token input") - } - } - - struct MockTickerInfo; - #[async_trait] - impl FeeTickerInfo for MockTickerInfo { - async fn is_account_new(&mut self, _address: Address) -> bool { - // Always false for simplicity. - false - } - } - - #[test] - fn test_ticker_formula() { - let config = get_test_ticker_config(); - let mut ticker = - FeeTicker::new(MockApiProvider, MockTickerInfo, mpsc::channel(1).1, config); - - let mut get_token_fee_in_usd = - |tx_type: TxFeeTypes, token: TokenLike, address: Address| -> Ratio { - let fee_in_token = - block_on(ticker.get_fee_from_ticker_in_wei(tx_type, token.clone(), address)) - .expect("failed to get fee in token"); - let token_precision = block_on(MockApiProvider.get_token(token.clone())) - .unwrap() - .decimals; - - // Fee in usd - (block_on(MockApiProvider.get_last_quote(token)) - .expect("failed to get fee in usd") - .usd_price - / BigUint::from(10u32).pow(u32::from(token_precision))) - * fee_in_token.total_fee - }; - - let get_relative_diff = |a: &Ratio, b: &Ratio| -> BigDecimal { - let max = std::cmp::max(a.clone(), b.clone()); - let min = std::cmp::min(a.clone(), b.clone()); - ratio_to_big_decimal(&((&max - &min) / min), 6) - }; - - { - let expected_price_of_eth_token_transfer_usd = - get_token_fee_in_usd(TxFeeTypes::Transfer, 0.into(), Address::default()); - let expected_price_of_eth_token_withdraw_usd = - get_token_fee_in_usd(TxFeeTypes::Withdraw, 0.into(), Address::default()); - let expected_price_of_eth_token_fast_withdraw_usd = - get_token_fee_in_usd(TxFeeTypes::FastWithdraw, 0.into(), Address::default()); - - // Cost of the transfer and withdraw in USD should be the same for all tokens up to +/- 3 digits (mantissa len == 11) - let threshold = BigDecimal::from_str("0.01").unwrap(); - for token in TestToken::all_tokens() { - let transfer_fee = - get_token_fee_in_usd(TxFeeTypes::Transfer, token.id.into(), Address::default()); - let expected_fee = - expected_price_of_eth_token_transfer_usd.clone() * token.risk_factor(); - let transfer_diff = get_relative_diff(&transfer_fee, &expected_fee); - assert!( - transfer_diff <= threshold.clone(), - "token transfer fee is above eth fee threshold: <{:?}: {}, ETH: {}, diff: {}, threshold: {}>", token.id, - UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(&transfer_fee,6), - UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(&expected_fee,6), - transfer_diff, &threshold); - - let withdraw_fee = - get_token_fee_in_usd(TxFeeTypes::Withdraw, token.id.into(), Address::default()); - let expected_fee = - expected_price_of_eth_token_withdraw_usd.clone() * token.risk_factor(); - let withdraw_diff = get_relative_diff(&withdraw_fee, &expected_fee); - assert!( - withdraw_diff <= threshold.clone(), - "token withdraw fee is above eth fee threshold: <{:?}: {}, ETH: {}, diff: {}, threshold: {}>", token.id, - UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(&withdraw_fee,6), - UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(&expected_fee,6), - withdraw_diff, &threshold - ); - - let fast_withdraw_fee = get_token_fee_in_usd( - TxFeeTypes::FastWithdraw, - token.id.into(), - Address::default(), - ); - let expected_fee = - expected_price_of_eth_token_fast_withdraw_usd.clone() * token.risk_factor(); - let fast_withdraw_diff = get_relative_diff(&fast_withdraw_fee, &expected_fee); - assert!( - fast_withdraw_diff <= threshold.clone(), - "token fast withdraw fee is above eth fee threshold: <{:?}: {}, ETH: {}, diff: {}, threshold: {}>", token.id, - UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(&fast_withdraw_fee,6), - UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(&expected_fee,6), - fast_withdraw_diff, &threshold - ); - assert!( - fast_withdraw_fee > withdraw_fee, - "Fast withdraw fee must be greater than usual withdraw fee" - ); - } - } - } -} diff --git a/core/bin/zksync_api/src/fee_ticker/tests.rs b/core/bin/zksync_api/src/fee_ticker/tests.rs new file mode 100644 index 0000000000..776765ad0f --- /dev/null +++ b/core/bin/zksync_api/src/fee_ticker/tests.rs @@ -0,0 +1,315 @@ +use super::*; +use async_trait::async_trait; +use bigdecimal::BigDecimal; +use chrono::Utc; +use futures::channel::mpsc; +use futures::executor::block_on; +use std::str::FromStr; +use zksync_types::{Address, Token, TokenId, TokenPrice}; +use zksync_utils::{ratio_to_big_decimal, UnsignedRatioSerializeAsDecimal}; + +const TEST_FAST_WITHDRAW_COEFF: f64 = 10.0; + +#[derive(Debug, Clone)] +struct TestToken { + id: TokenId, + price_usd: Ratio, + risk_factor: Option>, + precision: u8, +} + +impl TestToken { + fn new(id: TokenId, price_usd: f64, risk_factor: Option, precision: u8) -> Self { + Self { + id, + price_usd: UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot( + &price_usd.to_string(), + ) + .unwrap(), + risk_factor: risk_factor.map(|risk_factor| { + UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot( + &risk_factor.to_string(), + ) + .unwrap() + }), + precision, + } + } + + fn risk_factor(&self) -> Ratio { + self.risk_factor + .clone() + .unwrap_or_else(|| Ratio::from_integer(1u32.into())) + } + + fn eth() -> Self { + Self::new(0, 182.0, None, 18) + } + + fn hex() -> Self { + Self::new(1, 1.0, Some(2.5), 6) + } + + fn cheap() -> Self { + Self::new(2, 1.0, Some(2.5), 6) + } + + fn expensive() -> Self { + Self::new(3, 173_134.192_3, Some(0.9), 18) + } + + fn subsidized_tokens() -> Vec { + vec![Self::eth(), Self::cheap(), Self::expensive()] + } + + fn unsubsidized_tokens() -> Vec { + vec![Self::hex()] + } + + fn all_tokens() -> Vec { + let mut all_tokens = Vec::new(); + all_tokens.extend_from_slice(&Self::subsidized_tokens()); + all_tokens.extend_from_slice(&Self::unsubsidized_tokens()); + + all_tokens + } +} + +fn get_test_ticker_config() -> TickerConfig { + TickerConfig { + zkp_cost_chunk_usd: UnsignedRatioSerializeAsDecimal::deserialize_from_str_with_dot("0.001") + .unwrap(), + gas_cost_tx: GasOperationsCost::from_constants(TEST_FAST_WITHDRAW_COEFF), + tokens_risk_factors: TestToken::all_tokens() + .into_iter() + .filter_map(|t| { + let id = t.id; + t.risk_factor.map(|risk| (id, risk)) + }) + .collect(), + not_subsidized_tokens: vec![ + Address::from_str("34083bbd70d394110487feaa087da875a54624ec").unwrap(), + ] + .into_iter() + .collect(), + } +} + +struct MockApiProvider; +#[async_trait] +impl FeeTickerAPI for MockApiProvider { + async fn get_last_quote(&self, token: TokenLike) -> Result { + for test_token in TestToken::all_tokens() { + if TokenLike::Id(test_token.id) == token { + let token_price = TokenPrice { + usd_price: test_token.price_usd, + last_updated: Utc::now(), + }; + return Ok(token_price); + } + } + unreachable!("incorrect token input") + } + + /// Get current gas price in ETH + async fn get_gas_price_wei(&self) -> Result { + Ok(BigUint::from(10u32).pow(7u32)) // 10 GWei + } + + async fn get_token(&self, token: TokenLike) -> Result { + for test_token in TestToken::subsidized_tokens() { + if TokenLike::Id(test_token.id) == token { + return Ok(Token::new( + test_token.id, + Address::default(), + "", + test_token.precision, + )); + } + } + for test_token in TestToken::unsubsidized_tokens() { + if TokenLike::Id(test_token.id) == token { + return Ok(Token::new( + test_token.id, + Address::from_str("34083bbd70d394110487feaa087da875a54624ec").unwrap(), + "", + test_token.precision, + )); + } + } + unreachable!("incorrect token input") + } +} + +struct MockTickerInfo; + +#[async_trait] +impl FeeTickerInfo for MockTickerInfo { + async fn is_account_new(&mut self, _address: Address) -> bool { + // Always false for simplicity. + false + } +} + +fn format_with_dot(num: &Ratio, precision: usize) -> String { + UnsignedRatioSerializeAsDecimal::serialize_to_str_with_dot(num, precision) +} + +#[test] +fn test_ticker_formula() { + let validator = FeeTokenValidator::new(HashMap::new(), Default::default()); + + let config = get_test_ticker_config(); + let mut ticker = FeeTicker::new( + MockApiProvider, + MockTickerInfo, + mpsc::channel(1).1, + config, + validator, + ); + + let mut get_token_fee_in_usd = + |tx_type: TxFeeTypes, token: TokenLike, address: Address| -> Ratio { + let fee_in_token = + block_on(ticker.get_fee_from_ticker_in_wei(tx_type, token.clone(), address)) + .expect("failed to get fee in token"); + let token_precision = block_on(MockApiProvider.get_token(token.clone())) + .unwrap() + .decimals; + + // Fee in usd + (block_on(MockApiProvider.get_last_quote(token)) + .expect("failed to get fee in usd") + .usd_price + / BigUint::from(10u32).pow(u32::from(token_precision))) + * fee_in_token.total_fee + }; + + let get_relative_diff = |a: &Ratio, b: &Ratio| -> BigDecimal { + let max = std::cmp::max(a.clone(), b.clone()); + let min = std::cmp::min(a.clone(), b.clone()); + ratio_to_big_decimal(&((&max - &min) / min), 6) + }; + + let expected_price_of_eth_token_transfer_usd = + get_token_fee_in_usd(TxFeeTypes::Transfer, 0.into(), Address::default()); + let expected_price_of_eth_token_withdraw_usd = + get_token_fee_in_usd(TxFeeTypes::Withdraw, 0.into(), Address::default()); + let expected_price_of_eth_token_fast_withdraw_usd = + get_token_fee_in_usd(TxFeeTypes::FastWithdraw, 0.into(), Address::default()); + + // Cost of the transfer and withdraw in USD should be the same for all tokens up to +/- 3 digits + // (mantissa len == 11) + let threshold = BigDecimal::from_str("0.01").unwrap(); + for token in TestToken::subsidized_tokens() { + let transfer_fee = + get_token_fee_in_usd(TxFeeTypes::Transfer, token.id.into(), Address::default()); + let expected_fee = expected_price_of_eth_token_transfer_usd.clone() * token.risk_factor(); + let transfer_diff = get_relative_diff(&transfer_fee, &expected_fee); + assert!( + transfer_diff <= threshold.clone(), + "token transfer fee is above eth fee threshold: <{:?}: {}, ETH: {}, diff: {}, threshold: {}>", + token.id, + format_with_dot(&transfer_fee, 6), + format_with_dot(&expected_fee, 6), + transfer_diff, &threshold + ); + + let withdraw_fee = + get_token_fee_in_usd(TxFeeTypes::Withdraw, token.id.into(), Address::default()); + let expected_fee = expected_price_of_eth_token_withdraw_usd.clone() * token.risk_factor(); + let withdraw_diff = get_relative_diff(&withdraw_fee, &expected_fee); + assert!( + withdraw_diff <= threshold.clone(), + "token withdraw fee is above eth fee threshold: <{:?}: {}, ETH: {}, diff: {}, threshold: {}>", + token.id, + format_with_dot(&withdraw_fee, 6), + format_with_dot(&expected_fee, 6), + withdraw_diff, &threshold + ); + + let fast_withdraw_fee = get_token_fee_in_usd( + TxFeeTypes::FastWithdraw, + token.id.into(), + Address::default(), + ); + let expected_fee = + expected_price_of_eth_token_fast_withdraw_usd.clone() * token.risk_factor(); + let fast_withdraw_diff = get_relative_diff(&fast_withdraw_fee, &expected_fee); + assert!( + fast_withdraw_diff <= threshold.clone(), + "token fast withdraw fee is above eth fee threshold: <{:?}: {}, ETH: {}, diff: {}, threshold: {}>", + token.id, + format_with_dot(&fast_withdraw_fee, 6), + format_with_dot(&expected_fee, 6), + fast_withdraw_diff, &threshold + ); + assert!( + fast_withdraw_fee > withdraw_fee, + "Fast withdraw fee must be greater than usual withdraw fee" + ); + } +} + +#[test] +fn test_fee_for_unsubsidized_tokens() { + let validator = FeeTokenValidator::new(HashMap::new(), Default::default()); + + let config = get_test_ticker_config(); + let mut ticker = FeeTicker::new( + MockApiProvider, + MockTickerInfo, + mpsc::channel(1).1, + config, + validator, + ); + + let mut get_gas_amount = + |tx_type: TxFeeTypes, token: TokenLike, address: Address| -> num::BigUint { + block_on(ticker.get_fee_from_ticker_in_wei(tx_type, token, address)) + .expect("failed to get fee in token") + .gas_tx_amount + }; + + for subsidized_tokens in TestToken::subsidized_tokens() { + for unsubsidized_tokens in TestToken::unsubsidized_tokens() { + assert!( + get_gas_amount( + TxFeeTypes::Transfer, + subsidized_tokens.id.into(), + Address::default() + ) < get_gas_amount( + TxFeeTypes::Transfer, + unsubsidized_tokens.id.into(), + Address::default() + ) + ); + assert!( + get_gas_amount( + TxFeeTypes::Withdraw, + subsidized_tokens.id.into(), + Address::default() + ) < get_gas_amount( + TxFeeTypes::Withdraw, + unsubsidized_tokens.id.into(), + Address::default() + ) + ); + assert!( + get_gas_amount( + TxFeeTypes::ChangePubKey { + onchain_pubkey_auth: false + }, + subsidized_tokens.id.into(), + Address::default() + ) < get_gas_amount( + TxFeeTypes::ChangePubKey { + onchain_pubkey_auth: false + }, + unsubsidized_tokens.id.into(), + Address::default() + ) + ); + } + } +} diff --git a/core/bin/zksync_api/src/lib.rs b/core/bin/zksync_api/src/lib.rs index 04b46176b5..fbdd5cffbb 100644 --- a/core/bin/zksync_api/src/lib.rs +++ b/core/bin/zksync_api/src/lib.rs @@ -2,7 +2,7 @@ use crate::{api_server::start_api_server, fee_ticker::run_ticker_task}; use futures::channel::mpsc; -use zksync_config::{AdminServerOptions, ConfigurationOptions}; +use zksync_config::{AdminServerOptions, ApiServerOptions, ConfigurationOptions}; use zksync_storage::ConnectionPool; pub mod api_server; @@ -22,20 +22,17 @@ pub fn run_api( let (ticker_request_sender, ticker_request_receiver) = mpsc::channel(channel_size); let config_options = ConfigurationOptions::from_env(); + let api_server_options = ApiServerOptions::from_env(); let admin_server_options = AdminServerOptions::from_env(); - let ticker_task = run_ticker_task( - config_options.token_price_source.clone(), - config_options.ticker_fast_processing_coeff, - connection_pool.clone(), - ticker_request_receiver, - ); + let ticker_task = run_ticker_task(connection_pool.clone(), ticker_request_receiver); start_api_server( connection_pool, panic_notify, ticker_request_sender, config_options, + api_server_options, admin_server_options, ); diff --git a/core/bin/zksync_core/Cargo.toml b/core/bin/zksync_core/Cargo.toml index 155f6b3a24..bf8977dfb8 100644 --- a/core/bin/zksync_core/Cargo.toml +++ b/core/bin/zksync_core/Cargo.toml @@ -27,6 +27,7 @@ serde_json = "1.0.0" log = "0.4" env_logger = "0.6" metrics = "0.13.0-alpha.8" +itertools = "0.9.0" tokio = { version = "0.2", features = ["full"] } futures = "0.3" @@ -37,6 +38,7 @@ ctrlc = { version = "3.1", features = ["termination"] } anyhow = "1.0" thiserror = "1.0" tiny-keccak = "1.4.2" +async-trait = "0.1" [dev-dependencies] num = { version = "0.2", features = ["serde"] } diff --git a/core/bin/zksync_core/src/bin/eth_watcher.rs b/core/bin/zksync_core/src/bin/eth_watcher.rs index db0e48b9c1..05e09826c3 100644 --- a/core/bin/zksync_core/src/bin/eth_watcher.rs +++ b/core/bin/zksync_core/src/bin/eth_watcher.rs @@ -1,7 +1,7 @@ use futures::{channel::mpsc, SinkExt}; use std::time::Duration; use tokio::{runtime::Runtime, time}; -use zksync_core::eth_watch::{EthWatch, EthWatchRequest}; +use zksync_core::eth_watch::{DBStorage, EthHttpClient, EthWatch, EthWatchRequest}; use zksync_storage::ConnectionPool; fn main() { @@ -20,10 +20,13 @@ fn main() { let (eth_req_sender, eth_req_receiver) = mpsc::channel(256); let db_pool = ConnectionPool::new(None); + let eth_client = EthHttpClient::new(web3, contract_address); - let watcher = EthWatch::new(web3, contract_address, 0, eth_req_receiver, db_pool); + let storage = DBStorage::new(db_pool); - main_runtime.spawn(watcher.run()); + let watcher = EthWatch::new(eth_client, storage, 0); + + main_runtime.spawn(watcher.run(eth_req_receiver)); main_runtime.block_on(async move { let mut timer = time::interval(Duration::from_secs(1)); diff --git a/core/bin/zksync_core/src/committer.rs b/core/bin/zksync_core/src/committer.rs index 55784a2085..5a1c1bcdaf 100644 --- a/core/bin/zksync_core/src/committer.rs +++ b/core/bin/zksync_core/src/committer.rs @@ -286,37 +286,51 @@ async fn create_aggregated_operations(storage: &mut StorageProcessor<'_>) -> any } if last_committed_block > last_aggregate_create_proof_block { - let mut block_numbers = Vec::new(); - let mut blocks = Vec::new(); - let mut block_idxs_in_proof = Vec::new(); - - let mut idx = 0; + let mut proofs_exits = true; for block_number in last_aggregate_create_proof_block + 1..=last_committed_block { - let block = storage - .chain() - .block_schema() - .get_block(block_number) - .await? - .expect("Failed to get last committed block from db"); - block_numbers.push(block.block_number); - blocks.push(block); - block_idxs_in_proof.push(idx); - idx += 1; + proofs_exits = proofs_exits + && storage + .prover_schema() + .load_proof(block_number) + .await? + .is_some(); + if !proofs_exits { + break; + } } + if proofs_exits { + let mut block_numbers = Vec::new(); + let mut blocks = Vec::new(); + let mut block_idxs_in_proof = Vec::new(); + + let mut idx = 0; + for block_number in last_aggregate_create_proof_block + 1..=last_committed_block { + let block = storage + .chain() + .block_schema() + .get_block(block_number) + .await? + .expect("Failed to get last committed block from db"); + block_numbers.push(block.block_number); + blocks.push(block); + block_idxs_in_proof.push(idx); + idx += 1; + } - let aggregated_op_create = AggregatedOperation::CreateProofBlocks(block_numbers); + let aggregated_op_create = AggregatedOperation::CreateProofBlocks(block_numbers); - storage - .chain() - .operations_schema() - .store_aggregated_action(aggregated_op_create) - .await?; + storage + .chain() + .operations_schema() + .store_aggregated_action(aggregated_op_create) + .await?; - log::info!( - "Created aggregated create proof op: {} - {}", - last_aggregate_create_proof_block + 1, - last_committed_block - ); + log::info!( + "Created aggregated create proof op: {} - {}", + last_aggregate_create_proof_block + 1, + last_committed_block + ); + } } if last_aggregate_create_proof_block > last_aggregate_publish_proof_block { diff --git a/core/bin/zksync_core/src/eth_watch/client.rs b/core/bin/zksync_core/src/eth_watch/client.rs new file mode 100644 index 0000000000..0f1e87a72b --- /dev/null +++ b/core/bin/zksync_core/src/eth_watch/client.rs @@ -0,0 +1,184 @@ +use std::{convert::TryFrom, time::Instant}; + +use anyhow::format_err; +use ethabi::Hash; +use serde::export::fmt::Debug; +use web3::{ + contract::{Contract, Options}, + transports::Http, + types::{BlockNumber, FilterBuilder, Log}, + Web3, +}; + +use zksync_contracts::zksync_contract; +use zksync_types::{ethereum::CompleteWithdrawalsTx, Address, Nonce, PriorityOp, H160}; + +struct ContractTopics { + new_priority_request: Hash, + complete_withdrawals_event: Hash, +} + +impl ContractTopics { + fn new(zksync_contract: ðabi::Contract) -> Self { + Self { + new_priority_request: zksync_contract + .event("NewPriorityRequest") + .expect("main contract abi error") + .signature(), + + complete_withdrawals_event: zksync_contract + .event("PendingWithdrawalsComplete") + .expect("main contract abi error") + .signature(), + } + } +} + +#[async_trait::async_trait] +pub trait EthClient { + async fn get_priority_op_events( + &self, + from: BlockNumber, + to: BlockNumber, + ) -> anyhow::Result>; + async fn get_complete_withdrawals_event( + &self, + from: BlockNumber, + to: BlockNumber, + ) -> anyhow::Result>; + async fn block_number(&self) -> anyhow::Result; + async fn get_auth_fact(&self, address: Address, nonce: Nonce) -> anyhow::Result>; + async fn get_first_pending_withdrawal_index(&self) -> anyhow::Result; + async fn get_number_of_pending_withdrawals(&self) -> anyhow::Result; +} + +pub struct EthHttpClient { + web3: Web3, + zksync_contract: Contract, + topics: ContractTopics, +} + +impl EthHttpClient { + pub fn new(web3: Web3, zksync_contract_addr: H160) -> Self { + let zksync_contract = Contract::new(web3.eth(), zksync_contract_addr, zksync_contract()); + + let topics = ContractTopics::new(zksync_contract.abi()); + Self { + zksync_contract, + web3, + topics, + } + } + + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + topics: Vec, + ) -> anyhow::Result> + where + T: TryFrom, + T::Error: Debug, + { + let filter = FilterBuilder::default() + .address(vec![self.zksync_contract.address()]) + .from_block(from) + .to_block(to) + .topics(Some(topics), None, None, None) + .build(); + + self.web3 + .eth() + .logs(filter) + .await? + .into_iter() + .map(|event| { + T::try_from(event) + .map_err(|e| format_err!("Failed to parse event log from ETH: {:?}", e)) + }) + .collect() + } +} + +#[async_trait::async_trait] +impl EthClient for EthHttpClient { + async fn get_priority_op_events( + &self, + from: BlockNumber, + to: BlockNumber, + ) -> anyhow::Result> { + let start = Instant::now(); + + let result = self + .get_events(from, to, vec![self.topics.new_priority_request]) + .await; + metrics::histogram!("eth_watcher.get_priority_op_events", start.elapsed()); + result + } + + async fn get_complete_withdrawals_event( + &self, + from: BlockNumber, + to: BlockNumber, + ) -> anyhow::Result> { + let start = Instant::now(); + + let result = self + .get_events(from, to, vec![self.topics.complete_withdrawals_event]) + .await; + + metrics::histogram!( + "eth_watcher.get_complete_withdrawals_event", + start.elapsed() + ); + result + } + + async fn block_number(&self) -> anyhow::Result { + Ok(self.web3.eth().block_number().await?.as_u64()) + } + + async fn get_auth_fact(&self, address: Address, nonce: u32) -> anyhow::Result> { + self.zksync_contract + .query( + "authFacts", + (address, u64::from(nonce)), + None, + Options::default(), + None, + ) + .await + .map_err(|e| format_err!("Failed to query contract authFacts: {}", e)) + } + + async fn get_first_pending_withdrawal_index(&self) -> anyhow::Result { + self.zksync_contract + .query( + "firstPendingWithdrawalIndex", + (), + None, + Options::default(), + None, + ) + .await + .map_err(|e| { + format_err!( + "Failed to query contract firstPendingWithdrawalIndex: {}", + e + ) + }) + } + + async fn get_number_of_pending_withdrawals(&self) -> anyhow::Result { + self.zksync_contract + .query( + "numberOfPendingWithdrawals", + (), + None, + Options::default(), + None, + ) + .await + .map_err(|e| format_err!("Failed to query contract numberOfPendingWithdrawals: {}", e)) + } +} diff --git a/core/bin/zksync_core/src/eth_watch/eth_state.rs b/core/bin/zksync_core/src/eth_watch/eth_state.rs index 3912c00b08..879cda39a9 100644 --- a/core/bin/zksync_core/src/eth_watch/eth_state.rs +++ b/core/bin/zksync_core/src/eth_watch/eth_state.rs @@ -2,9 +2,9 @@ use std::collections::HashMap; // External uses // Workspace deps -use zksync_types::PriorityOp; +use zksync_types::{PriorityOp, SerialId}; // Local deps -use super::{received_ops::ReceivedPriorityOp, EthBlockId}; +use super::received_ops::ReceivedPriorityOp; /// Gathered state of the Ethereum network. /// Contains information about the known token types and incoming @@ -24,8 +24,8 @@ pub struct ETHState { /// Note that since these operations do not have enough confirmations, /// they may be not executed in the future, so this list is approximate. /// + unconfirmed_queue: Vec, /// Keys in this HashMap are numbers of blocks with `PriorityOp`. - unconfirmed_queue: Vec<(EthBlockId, PriorityOp)>, /// Queue of priority operations that passed the confirmation /// threshold and are waiting to be executed. priority_queue: HashMap, @@ -34,8 +34,8 @@ pub struct ETHState { impl ETHState { pub fn new( last_ethereum_block: u64, - unconfirmed_queue: Vec<(EthBlockId, PriorityOp)>, - priority_queue: HashMap, + unconfirmed_queue: Vec, + priority_queue: HashMap, ) -> Self { Self { last_ethereum_block, @@ -52,7 +52,7 @@ impl ETHState { &self.priority_queue } - pub fn unconfirmed_queue(&self) -> &[(EthBlockId, PriorityOp)] { + pub fn unconfirmed_queue(&self) -> &[PriorityOp] { &self.unconfirmed_queue } } diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index def8c76abc..a886c8f277 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -8,42 +8,47 @@ // Built-in deps use std::{ collections::HashMap, - convert::TryFrom, time::{Duration, Instant}, }; + // External uses -use anyhow::format_err; use futures::{ channel::{mpsc, oneshot}, SinkExt, StreamExt, }; + use tokio::{task::JoinHandle, time}; -use web3::{ - contract::{Contract, Options}, - types::{Address, BlockNumber, Filter, FilterBuilder, H160}, - Transport, Web3, -}; +use web3::types::{Address, BlockNumber}; + // Workspace deps use zksync_config::ConfigurationOptions; -use zksync_contracts::zksync_contract; use zksync_crypto::params::PRIORITY_EXPIRATION; use zksync_storage::ConnectionPool; -use zksync_types::{ - ethereum::CompleteWithdrawalsTx, - {Nonce, PriorityOp, PubKeyHash, ZkSyncPriorityOp}, -}; +use zksync_types::{Nonce, PriorityOp, PubKeyHash, ZkSyncPriorityOp}; + // Local deps -use self::{eth_state::ETHState, received_ops::sift_outdated_ops}; +use self::{ + client::EthClient, + eth_state::ETHState, + received_ops::{sift_outdated_ops, ReceivedPriorityOp}, + storage::Storage, +}; + +pub use client::EthHttpClient; +pub use storage::DBStorage; +mod client; mod eth_state; mod received_ops; +mod storage; + +#[cfg(test)] +mod tests; /// As `infura` may limit the requests, upon error we need to wait for a while /// before repeating the request. const RATE_LIMIT_DELAY: Duration = Duration::from_secs(30); -pub type EthBlockId = u64; - /// Ethereum Watcher operating mode. /// /// Normally Ethereum watcher will always poll the Ethereum node upon request, @@ -75,53 +80,31 @@ pub enum EthWatchRequest { }, GetUnconfirmedDeposits { address: Address, - resp: oneshot::Sender>, + resp: oneshot::Sender>, }, GetUnconfirmedOpByHash { eth_hash: Vec, - resp: oneshot::Sender>, + resp: oneshot::Sender>, }, } -pub struct EthWatch { - zksync_contract: (ethabi::Contract, Contract), +pub struct EthWatch { + client: W, + storage: S, eth_state: ETHState, - web3: Web3, /// All ethereum events are accepted after sufficient confirmations to eliminate risk of block reorg. number_of_confirmations_for_event: u64, - mode: WatcherMode, - - eth_watch_req: mpsc::Receiver, - - db_pool: ConnectionPool, } -impl EthWatch { - pub fn new( - web3: Web3, - zksync_contract_addr: H160, - number_of_confirmations_for_event: u64, - eth_watch_req: mpsc::Receiver, - db_pool: ConnectionPool, - ) -> Self { - let zksync_contract = { - ( - zksync_contract(), - Contract::new(web3.eth(), zksync_contract_addr, zksync_contract()), - ) - }; - +impl EthWatch { + pub fn new(client: W, storage: S, number_of_confirmations_for_event: u64) -> Self { Self { - zksync_contract, + client, + storage, eth_state: ETHState::default(), - web3, - eth_watch_req, - mode: WatcherMode::Working, number_of_confirmations_for_event, - - db_pool, } } @@ -130,131 +113,10 @@ impl EthWatch { self.eth_state = new_state; } - fn get_priority_op_event_filter(&self, from: BlockNumber, to: BlockNumber) -> Filter { - let priority_op_event_topic = self - .zksync_contract - .0 - .event("NewPriorityRequest") - .expect("main contract abi error") - .signature(); - FilterBuilder::default() - .address(vec![self.zksync_contract.1.address()]) - .from_block(from) - .to_block(to) - .topics(Some(vec![priority_op_event_topic]), None, None, None) - .build() - } - - fn get_complete_withdrawals_event_filter(&self, from: BlockNumber, to: BlockNumber) -> Filter { - let complete_withdrawals_event_topic = self - .zksync_contract - .0 - .event("PendingWithdrawalsComplete") - .expect("main contract abi error") - .signature(); - FilterBuilder::default() - .address(vec![self.zksync_contract.1.address()]) - .from_block(from) - .to_block(to) - .topics( - Some(vec![complete_withdrawals_event_topic]), - None, - None, - None, - ) - .build() - } - - /// Filters and parses the priority operation events from the Ethereum - /// within the provided range of blocks. - /// Returns the list of priority operations together with the block - /// numbers. - async fn get_priority_op_events_with_blocks( - &self, - from: BlockNumber, - to: BlockNumber, - ) -> Result, anyhow::Error> { - let start = Instant::now(); - let filter = self.get_priority_op_event_filter(from, to); - let result = self - .web3 - .eth() - .logs(filter) - .await? - .into_iter() - .map(|event| { - let block_number: u64 = event - .block_number - .ok_or_else(|| { - anyhow::format_err!("No block number set in the queue event log") - })? - .as_u64(); - - let priority_op = PriorityOp::try_from(event).map_err(|e| { - format_err!("Failed to parse priority queue event log from ETH: {:?}", e) - })?; - - Ok((block_number, priority_op)) - }) - .collect(); - - metrics::histogram!( - "eth_watcher.get_priority_op_events_with_blocks", - start.elapsed() - ); - result - } - - async fn get_priority_op_events( - &self, - from: BlockNumber, - to: BlockNumber, - ) -> Result, anyhow::Error> { - let start = Instant::now(); - let filter = self.get_priority_op_event_filter(from, to); - let result = self - .web3 - .eth() - .logs(filter) - .await? - .into_iter() - .map(|event| { - PriorityOp::try_from(event).map_err(|e| { - format_err!("Failed to parse priority queue event log from ETH: {:?}", e) - }) - }) - .collect(); - metrics::histogram!("eth_watcher.get_priority_op_events", start.elapsed()); - result - } - - async fn get_complete_withdrawals_event( - &self, - from: BlockNumber, - to: BlockNumber, - ) -> Result, anyhow::Error> { - let start = Instant::now(); - let filter = self.get_complete_withdrawals_event_filter(from, to); - let result = self - .web3 - .eth() - .logs(filter) - .await? - .into_iter() - .map(CompleteWithdrawalsTx::try_from) - .collect(); - - metrics::histogram!( - "eth_watcher.get_complete_withdrawals_event", - start.elapsed() - ); - result - } - async fn get_unconfirmed_ops( &mut self, current_ethereum_block: u64, - ) -> Result, anyhow::Error> { + ) -> anyhow::Result> { // We want to scan the interval of blocks from the latest one up to the oldest one which may // have unconfirmed priority ops. // `+ 1` is added because if we subtract number of confirmations, we'll obtain the last block @@ -265,143 +127,101 @@ impl EthWatch { let block_from = BlockNumber::Number(block_from_number.into()); let block_to = BlockNumber::Latest; - let pending_events = self - .get_priority_op_events_with_blocks(block_from, block_to) - .await?; - - // Collect the unconfirmed operations. - let mut unconfirmed_ops = Vec::new(); - - for (block_number, priority_op) in pending_events.into_iter() { - unconfirmed_ops.push((block_number, priority_op)); - } - - Ok(unconfirmed_ops) - } - - async fn store_complete_withdrawals( - &mut self, - complete_withdrawals_txs: Vec, - ) -> Result<(), anyhow::Error> { - let mut storage = self - .db_pool - .access_storage() + self.client + .get_priority_op_events(block_from, block_to) .await - .map_err(|e| format_err!("Can't access storage: {}", e))?; - let mut transaction = storage.start_transaction().await?; - for tx in complete_withdrawals_txs { - transaction - .chain() - .operations_schema() - .add_complete_withdrawals_transaction(tx) - .await?; - } - transaction.commit().await?; - - Ok(()) } - async fn restore_state_from_eth( + async fn update_withdrawals( &mut self, - last_ethereum_block: u64, - ) -> Result<(), anyhow::Error> { - let current_ethereum_block = - last_ethereum_block.saturating_sub(self.number_of_confirmations_for_event); - - let new_block_with_accepted_events = - current_ethereum_block.saturating_sub(self.number_of_confirmations_for_event); - let previous_block_with_accepted_events = - new_block_with_accepted_events.saturating_sub(PRIORITY_EXPIRATION); - - // restore pending queue - let unconfirmed_queue = self.get_unconfirmed_ops(current_ethereum_block).await?; - - // restore complete withdrawals events + previous_block_with_accepted_events: u64, + new_block_with_accepted_events: u64, + ) -> anyhow::Result<()> { + // Get new complete withdrawals events let complete_withdrawals_txs = self + .client .get_complete_withdrawals_event( BlockNumber::Number(previous_block_with_accepted_events.into()), BlockNumber::Number(new_block_with_accepted_events.into()), ) .await?; - self.store_complete_withdrawals(complete_withdrawals_txs) + + self.storage + .store_complete_withdrawals(complete_withdrawals_txs) .await?; + Ok(()) + } - // restore priority queue - let prior_queue_events = self - .get_priority_op_events( - BlockNumber::Number(previous_block_with_accepted_events.into()), - BlockNumber::Number(new_block_with_accepted_events.into()), - ) + async fn process_new_blocks(&mut self, last_ethereum_block: u64) -> anyhow::Result<()> { + debug_assert!(self.eth_state.last_ethereum_block() < last_ethereum_block); + + let (unconfirmed_queue, received_priority_queue) = self + .update_eth_state(last_ethereum_block, self.number_of_confirmations_for_event) .await?; - let mut priority_queue = HashMap::new(); - for priority_op in prior_queue_events.into_iter() { - priority_queue.insert(priority_op.serial_id, priority_op.into()); + + // Extend the existing priority operations with the new ones. + let mut priority_queue = sift_outdated_ops(self.eth_state.priority_queue()); + for (serial_id, op) in received_priority_queue { + priority_queue.insert(serial_id, op); } let new_state = ETHState::new(last_ethereum_block, unconfirmed_queue, priority_queue); - self.set_new_state(new_state); + Ok(()) + } - log::trace!("ETH state: {:#?}", self.eth_state); + async fn restore_state_from_eth(&mut self, last_ethereum_block: u64) -> anyhow::Result<()> { + let (unconfirmed_queue, priority_queue) = self + .update_eth_state(last_ethereum_block, PRIORITY_EXPIRATION) + .await?; + + let new_state = ETHState::new(last_ethereum_block, unconfirmed_queue, priority_queue); + self.set_new_state(new_state); + log::trace!("ETH state: {:#?}", self.eth_state); Ok(()) } - async fn process_new_blocks(&mut self, last_ethereum_block: u64) -> Result<(), anyhow::Error> { - debug_assert!(self.eth_state.last_ethereum_block() < last_ethereum_block); - - let previous_block_with_accepted_events = (self.eth_state.last_ethereum_block() + 1) - .saturating_sub(self.number_of_confirmations_for_event); + async fn update_eth_state( + &mut self, + current_ethereum_block: u64, + depth_of_last_approved_block: u64, + ) -> anyhow::Result<(Vec, HashMap)> { let new_block_with_accepted_events = - last_ethereum_block.saturating_sub(self.number_of_confirmations_for_event); + current_ethereum_block.saturating_sub(self.number_of_confirmations_for_event); + let previous_block_with_accepted_events = + new_block_with_accepted_events.saturating_sub(depth_of_last_approved_block); - // Get new complete withdrawals events - let complete_withdrawals_txs = self - .get_complete_withdrawals_event( - BlockNumber::Number(previous_block_with_accepted_events.into()), - BlockNumber::Number(new_block_with_accepted_events.into()), - ) - .await?; - self.store_complete_withdrawals(complete_withdrawals_txs) - .await?; + self.update_withdrawals( + previous_block_with_accepted_events, + new_block_with_accepted_events, + ) + .await?; - // Get new priority ops - let priority_op_events = self + let unconfirmed_queue = self.get_unconfirmed_ops(current_ethereum_block).await?; + let priority_queue = self + .client .get_priority_op_events( BlockNumber::Number(previous_block_with_accepted_events.into()), BlockNumber::Number(new_block_with_accepted_events.into()), ) - .await?; - - // Extend the existing priority operations with the new ones. - let mut priority_queue = sift_outdated_ops(self.eth_state.priority_queue()); - for priority_op in priority_op_events.into_iter() { - log::debug!("New priority op: {:?}", priority_op); - priority_queue.insert(priority_op.serial_id, priority_op.into()); - } - - // Get new pending ops - let unconfirmed_queue = self.get_unconfirmed_ops(last_ethereum_block).await?; - - // Now, after we've received all the data from the Ethereum, we can safely - // update the state. This is done atomically to avoid the situation when - // due to error occurred mid-update the overall `ETHWatcher` state become - // messed up. - let new_state = ETHState::new(last_ethereum_block, unconfirmed_queue, priority_queue); - self.set_new_state(new_state); + .await? + .into_iter() + .map(|priority_op| (priority_op.serial_id, priority_op.into())) + .collect(); - Ok(()) + Ok((unconfirmed_queue, priority_queue)) } fn get_priority_requests(&self, first_serial_id: u64, max_chunks: usize) -> Vec { - let mut res = Vec::new(); + let mut result = Vec::new(); let mut used_chunks = 0; let mut current_priority_op = first_serial_id; while let Some(op) = self.eth_state.priority_queue().get(¤t_priority_op) { if used_chunks + op.as_ref().data.chunks() <= max_chunks { - res.push(op.as_ref().clone()); + result.push(op.as_ref().clone()); used_chunks += op.as_ref().data.chunks(); current_priority_op += 1; } else { @@ -409,7 +229,7 @@ impl EthWatch { } } - res + result } async fn is_new_pubkey_hash_authorized( @@ -417,35 +237,24 @@ impl EthWatch { address: Address, nonce: Nonce, pub_key_hash: &PubKeyHash, - ) -> Result { - let auth_fact: Vec = self - .zksync_contract - .1 - .query( - "authFacts", - (address, u64::from(nonce)), - None, - Options::default(), - None, - ) - .await - .map_err(|e| format_err!("Failed to query contract authFacts: {}", e))?; + ) -> anyhow::Result { + let auth_fact = self.client.get_auth_fact(address, nonce).await?; Ok(auth_fact.as_slice() == tiny_keccak::keccak256(&pub_key_hash.data[..])) } - fn find_ongoing_op_by_hash(&self, eth_hash: &[u8]) -> Option<(EthBlockId, PriorityOp)> { + fn find_ongoing_op_by_hash(&self, eth_hash: &[u8]) -> Option { self.eth_state .unconfirmed_queue() .iter() - .find(|(_block, op)| op.eth_hash.as_slice() == eth_hash) + .find(|op| op.eth_hash.as_slice() == eth_hash) .cloned() } - fn get_ongoing_deposits_for(&self, address: Address) -> Vec<(EthBlockId, PriorityOp)> { + fn get_ongoing_deposits_for(&self, address: Address) -> Vec { self.eth_state .unconfirmed_queue() .iter() - .filter(|(_block, op)| match &op.data { + .filter(|op| match &op.data { ZkSyncPriorityOp::Deposit(deposit) => { // Address may be set to either sender or recipient. deposit.from == address || deposit.to == address @@ -456,9 +265,9 @@ impl EthWatch { .collect() } - async fn poll_eth_node(&mut self) -> Result<(), anyhow::Error> { + async fn poll_eth_node(&mut self) -> anyhow::Result<()> { let start = Instant::now(); - let last_block_number = self.web3.eth().block_number().await?.as_u64(); + let last_block_number = self.client.block_number().await?; if last_block_number > self.eth_state.last_ethereum_block() { self.process_new_blocks(last_block_number).await?; @@ -468,6 +277,7 @@ impl EthWatch { Ok(()) } + // TODO try to move it to eth client fn is_backoff_requested(&self, error: &anyhow::Error) -> bool { error.to_string().contains("429 Too Many Requests") } @@ -493,16 +303,16 @@ impl EthWatch { } } - pub async fn run(mut self) { + pub async fn run(mut self, mut eth_watch_req: mpsc::Receiver) { // As infura may be not responsive, we want to retry the query until we've actually got the // block number. // Normally, however, this loop is not expected to last more than one iteration. let block = loop { - let block = self.web3.eth().block_number().await; + let block = self.client.block_number().await; match block { Ok(block) => { - break block.as_u64(); + break block; } Err(error) => { log::warn!( @@ -524,7 +334,7 @@ impl EthWatch { .await .expect("Unable to restore ETHWatcher state"); - while let Some(request) = self.eth_watch_req.next().await { + while let Some(request) = eth_watch_req.next().await { match request { EthWatchRequest::PollETHNode => { if !self.polling_allowed() { @@ -590,15 +400,17 @@ pub fn start_eth_watch( ) -> JoinHandle<()> { let transport = web3::transports::Http::new(&config_options.web3_url).unwrap(); let web3 = web3::Web3::new(transport); + let eth_client = EthHttpClient::new(web3, config_options.contract_eth_addr); + + let storage = DBStorage::new(db_pool); let eth_watch = EthWatch::new( - web3, - config_options.contract_eth_addr, + eth_client, + storage, config_options.confirmations_for_eth_event, - eth_req_receiver, - db_pool, ); - tokio::spawn(eth_watch.run()); + + tokio::spawn(eth_watch.run(eth_req_receiver)); tokio::spawn(async move { let mut timer = time::interval(config_options.eth_watch_poll_interval); diff --git a/core/bin/zksync_core/src/eth_watch/storage.rs b/core/bin/zksync_core/src/eth_watch/storage.rs new file mode 100644 index 0000000000..49fbcaf91d --- /dev/null +++ b/core/bin/zksync_core/src/eth_watch/storage.rs @@ -0,0 +1,47 @@ +use anyhow::format_err; + +use zksync_storage::ConnectionPool; +use zksync_types::ethereum::CompleteWithdrawalsTx; + +#[async_trait::async_trait] +pub trait Storage { + async fn store_complete_withdrawals( + &mut self, + complete_withdrawals_txs: Vec, + ) -> anyhow::Result<()>; +} + +pub struct DBStorage { + db_pool: ConnectionPool, +} + +impl DBStorage { + pub fn new(db_pool: ConnectionPool) -> Self { + Self { db_pool } + } +} + +#[async_trait::async_trait] +impl Storage for DBStorage { + async fn store_complete_withdrawals( + &mut self, + complete_withdrawals_txs: Vec, + ) -> anyhow::Result<()> { + let mut storage = self + .db_pool + .access_storage() + .await + .map_err(|e| format_err!("Can't access storage: {}", e))?; + let mut transaction = storage.start_transaction().await?; + for tx in complete_withdrawals_txs { + transaction + .chain() + .operations_schema() + .add_complete_withdrawals_transaction(tx) + .await?; + } + transaction.commit().await?; + + Ok(()) + } +} diff --git a/core/bin/zksync_core/src/eth_watch/tests.rs b/core/bin/zksync_core/src/eth_watch/tests.rs new file mode 100644 index 0000000000..8a2f47de95 --- /dev/null +++ b/core/bin/zksync_core/src/eth_watch/tests.rs @@ -0,0 +1,265 @@ +use std::cmp::max; +use std::collections::HashMap; + +use web3::types::{Address, BlockNumber}; + +use zksync_types::{ethereum::CompleteWithdrawalsTx, Deposit, PriorityOp, ZkSyncPriorityOp}; + +use crate::eth_watch::{client::EthClient, storage::Storage, EthWatch}; +use std::sync::Arc; +use tokio::sync::RwLock; + +struct FakeStorage { + withdrawal_txs: Vec, +} + +impl FakeStorage { + fn new() -> Self { + Self { + withdrawal_txs: vec![], + } + } +} + +#[async_trait::async_trait] +impl Storage for FakeStorage { + async fn store_complete_withdrawals( + &mut self, + complete_withdrawals_txs: Vec, + ) -> anyhow::Result<()> { + self.withdrawal_txs.extend(complete_withdrawals_txs); + Ok(()) + } +} + +struct FakeEthClientData { + priority_ops: HashMap>, + withdrawals: HashMap>, + last_block_number: u64, +} + +impl FakeEthClientData { + fn new() -> Self { + Self { + priority_ops: Default::default(), + withdrawals: Default::default(), + last_block_number: 0, + } + } + + fn add_operations(&mut self, ops: &[PriorityOp]) { + for op in ops { + self.last_block_number = max(op.eth_block, self.last_block_number); + self.priority_ops + .entry(op.eth_block) + .or_insert(vec![]) + .push(op.clone()); + } + } +} + +#[derive(Clone)] +struct FakeEthClient { + inner: Arc>, +} + +impl FakeEthClient { + fn new() -> Self { + Self { + inner: Arc::new(RwLock::new(FakeEthClientData::new())), + } + } + + async fn add_operations(&mut self, ops: &[PriorityOp]) { + self.inner.write().await.add_operations(ops); + } + + async fn block_to_number(&self, block: &BlockNumber) -> u64 { + match block { + BlockNumber::Latest => self.inner.read().await.last_block_number, + BlockNumber::Earliest => 0, + BlockNumber::Pending => unreachable!(), + BlockNumber::Number(number) => number.as_u64(), + } + } +} + +#[async_trait::async_trait] +impl EthClient for FakeEthClient { + async fn get_priority_op_events( + &self, + from: BlockNumber, + to: BlockNumber, + ) -> Result, anyhow::Error> { + let from = self.block_to_number(&from).await; + let to = self.block_to_number(&to).await; + let mut operations = vec![]; + for number in from..=to { + if let Some(ops) = self.inner.read().await.priority_ops.get(&number) { + operations.extend_from_slice(ops); + } + } + Ok(operations) + } + + async fn get_complete_withdrawals_event( + &self, + from: BlockNumber, + to: BlockNumber, + ) -> Result, anyhow::Error> { + let from = self.block_to_number(&from).await; + let to = self.block_to_number(&to).await; + let mut withdrawals = vec![]; + for number in from..=to { + if let Some(ops) = self.inner.read().await.withdrawals.get(&number) { + withdrawals.extend_from_slice(ops); + } + } + Ok(withdrawals) + } + + async fn block_number(&self) -> Result { + Ok(self.inner.read().await.last_block_number) + } + + async fn get_auth_fact( + &self, + _address: Address, + _nonce: u32, + ) -> Result, anyhow::Error> { + unreachable!() + } + + async fn get_first_pending_withdrawal_index(&self) -> Result { + unreachable!() + } + + async fn get_number_of_pending_withdrawals(&self) -> Result { + unreachable!() + } +} + +fn create_watcher(client: T) -> EthWatch { + let storage = FakeStorage::new(); + EthWatch::new(client, storage, 1) +} + +#[tokio::test] +async fn test_operation_queues() { + let mut client = FakeEthClient::new(); + client + .add_operations(&vec![ + PriorityOp { + serial_id: 0, + data: ZkSyncPriorityOp::Deposit(Deposit { + from: Default::default(), + token: 0, + amount: Default::default(), + to: [2u8; 20].into(), + }), + deadline_block: 0, + eth_hash: [2; 32].to_vec(), + eth_block: 4, + }, + PriorityOp { + serial_id: 1, + data: ZkSyncPriorityOp::Deposit(Deposit { + from: Default::default(), + token: 0, + amount: Default::default(), + to: Default::default(), + }), + deadline_block: 0, + eth_hash: [3u8; 32].to_vec(), + eth_block: 3, + }, + ]) + .await; + let mut watcher = create_watcher(client); + watcher.poll_eth_node().await.unwrap(); + assert_eq!(watcher.eth_state.last_ethereum_block(), 4); + let priority_queues = watcher.eth_state.priority_queue(); + let unconfirmed_queue = watcher.eth_state.unconfirmed_queue(); + assert_eq!(priority_queues.len(), 1); + assert_eq!(unconfirmed_queue.len(), 1); + assert_eq!(unconfirmed_queue[0].serial_id, 0); + priority_queues.get(&1).unwrap(); + watcher.find_ongoing_op_by_hash(&[2u8; 32]).unwrap(); + let deposits = watcher.get_ongoing_deposits_for([2u8; 20].into()); + assert_eq!(deposits.len(), 1); +} + +#[tokio::test] +async fn test_restore_and_poll() { + let mut client = FakeEthClient::new(); + client + .add_operations(&vec![ + PriorityOp { + serial_id: 0, + data: ZkSyncPriorityOp::Deposit(Deposit { + from: Default::default(), + token: 0, + amount: Default::default(), + to: [2u8; 20].into(), + }), + deadline_block: 0, + eth_hash: [2; 32].to_vec(), + eth_block: 4, + }, + PriorityOp { + serial_id: 1, + data: ZkSyncPriorityOp::Deposit(Deposit { + from: Default::default(), + token: 0, + amount: Default::default(), + to: Default::default(), + }), + deadline_block: 0, + eth_hash: [3u8; 32].to_vec(), + eth_block: 3, + }, + ]) + .await; + + let mut watcher = create_watcher(client.clone()); + watcher.restore_state_from_eth(4).await.unwrap(); + client + .add_operations(&vec![ + PriorityOp { + serial_id: 3, + data: ZkSyncPriorityOp::Deposit(Deposit { + from: Default::default(), + token: 0, + amount: Default::default(), + to: [2u8; 20].into(), + }), + deadline_block: 0, + eth_hash: [2; 32].to_vec(), + eth_block: 5, + }, + PriorityOp { + serial_id: 4, + data: ZkSyncPriorityOp::Deposit(Deposit { + from: Default::default(), + token: 0, + amount: Default::default(), + to: Default::default(), + }), + deadline_block: 0, + eth_hash: [3u8; 32].to_vec(), + eth_block: 5, + }, + ]) + .await; + watcher.poll_eth_node().await.unwrap(); + assert_eq!(watcher.eth_state.last_ethereum_block(), 5); + let priority_queues = watcher.eth_state.priority_queue(); + let unconfirmed_queue = watcher.eth_state.unconfirmed_queue(); + assert_eq!(priority_queues.len(), 2); + assert_eq!(unconfirmed_queue.len(), 2); + assert_eq!(unconfirmed_queue[0].serial_id, 3); + priority_queues.get(&1).unwrap(); + watcher.find_ongoing_op_by_hash(&[2u8; 32]).unwrap(); + let deposits = watcher.get_ongoing_deposits_for([2u8; 20].into()); + assert_eq!(deposits.len(), 1); +} diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index 832524b44d..5815583903 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -15,7 +15,7 @@ use futures::{ future, SinkExt, }; use tokio::task::JoinHandle; -use zksync_config::ConfigurationOptions; +use zksync_config::{ApiServerOptions, ConfigurationOptions}; use zksync_storage::ConnectionPool; const DEFAULT_CHANNEL_CAPACITY: usize = 32_768; @@ -95,6 +95,7 @@ pub async fn run_core( panic_notify: mpsc::Sender, ) -> anyhow::Result>> { let config_opts = ConfigurationOptions::from_env(); + let api_server_options = ApiServerOptions::from_env(); let (proposed_blocks_sender, proposed_blocks_receiver) = mpsc::channel(DEFAULT_CHANNEL_CAPACITY); @@ -157,10 +158,10 @@ pub async fn run_core( // Start private API. start_private_core_api( - config_opts, panic_notify.clone(), mempool_request_sender, eth_watch_req_sender, + api_server_options, ); let task_futures = vec![ diff --git a/core/bin/zksync_core/src/private_api.rs b/core/bin/zksync_core/src/private_api.rs index 5344436a59..2694125be6 100644 --- a/core/bin/zksync_core/src/private_api.rs +++ b/core/bin/zksync_core/src/private_api.rs @@ -14,7 +14,7 @@ use futures::{ sink::SinkExt, }; use std::thread; -use zksync_config::ConfigurationOptions; +use zksync_config::ApiServerOptions; use zksync_types::{tx::TxEthSignature, Address, SignedZkSyncTx, H256}; use zksync_utils::panic_notify::ThreadPanicNotify; @@ -120,10 +120,10 @@ async fn unconfirmed_op( #[allow(clippy::too_many_arguments)] pub fn start_private_core_api( - config_options: ConfigurationOptions, panic_notify: mpsc::Sender, mempool_tx_sender: mpsc::Sender, eth_watch_req_sender: mpsc::Sender, + api_server_options: ApiServerOptions, ) { thread::Builder::new() .name("core-private-api".to_string()) @@ -149,7 +149,7 @@ pub fn start_private_core_api( .service(unconfirmed_op) .service(unconfirmed_deposits) }) - .bind(&config_options.core_server_address) + .bind(&api_server_options.core_server_address) .expect("failed to bind") .run() .await diff --git a/core/bin/zksync_core/src/state_keeper/mod.rs b/core/bin/zksync_core/src/state_keeper/mod.rs index dddddb6772..076733074a 100644 --- a/core/bin/zksync_core/src/state_keeper/mod.rs +++ b/core/bin/zksync_core/src/state_keeper/mod.rs @@ -6,6 +6,7 @@ use futures::{ stream::StreamExt, SinkExt, }; +use itertools::Itertools; use tokio::task::JoinHandle; // Workspace uses use zksync_crypto::ff; @@ -109,6 +110,13 @@ pub struct ZkSyncStateKeeper { max_miniblock_iterations: usize, fast_miniblock_iterations: usize, max_number_of_withdrawals_per_block: usize, + + // Two fields below are for optimization: we don't want to overwrite all the block contents over and over. + // With these fields we'll be able save the diff between two pending block states only. + /// Amount of succeeded transactions in the pending block at the last pending block synchronization step. + success_txs_pending_len: usize, + /// Amount of failed transactions in the pending block at the last pending block synchronization step. + failed_txs_pending_len: usize, } pub struct ZkSyncStateInitParams { @@ -352,11 +360,10 @@ impl ZkSyncStateKeeper { ) -> Self { assert!(!available_block_chunk_sizes.is_empty()); - let is_sorted = { - let mut sorted = available_block_chunk_sizes.clone(); - sorted.sort_unstable(); - sorted == available_block_chunk_sizes - }; + let is_sorted = available_block_chunk_sizes + .iter() + .tuple_windows() + .all(|(a, b)| a < b); assert!(is_sorted); let state = ZkSyncState::new( @@ -396,6 +403,9 @@ impl ZkSyncStateKeeper { max_miniblock_iterations, fast_miniblock_iterations, max_number_of_withdrawals_per_block, + + success_txs_pending_len: 0, + failed_txs_pending_len: 0, }; let root = keeper.state.root_hash(); @@ -534,6 +544,9 @@ impl ZkSyncStateKeeper { let start = Instant::now(); let mut executed_ops = Vec::new(); + // We want to store this variable before moving anything from the pending block. + let empty_proposed_block = proposed_block.is_empty(); + let mut priority_op_queue = proposed_block .priority_ops .into_iter() @@ -600,7 +613,12 @@ impl ZkSyncStateKeeper { if self.pending_block.pending_block_iteration > max_miniblock_iterations { self.seal_pending_block().await; } else { - self.store_pending_block().await; + // We've already incremented the pending block iteration, so this iteration will count towards + // reaching the block commitment timeout. + // However, we don't want to pointlessly save the same block again and again. + if !empty_proposed_block { + self.store_pending_block().await; + } } metrics::histogram!("state_keeper.execute_proposed_block", start.elapsed()); @@ -877,6 +895,10 @@ impl ZkSyncStateKeeper { ), ); + // Once block is sealed, we refresh the counters for the next block. + self.success_txs_pending_len = 0; + self.failed_txs_pending_len = 0; + // Apply fees of pending block let fee_updates = self .state @@ -948,6 +970,20 @@ impl ZkSyncStateKeeper { /// so the executed transactions are persisted and won't be lost. async fn store_pending_block(&mut self) { let start = Instant::now(); + + // We want include only the newly appeared transactions, since the older ones are already persisted in the + // database. + // This is a required optimization, since otherwise time to process the pending block may grow without any + // limits if we'll be spammed by incorrect transactions (we don't have a limit for an amount of rejected + // transactions in the block). + let new_success_operations = + self.pending_block.success_operations[self.success_txs_pending_len..].to_vec(); + let new_failed_operations = + self.pending_block.failed_txs[self.failed_txs_pending_len..].to_vec(); + + self.success_txs_pending_len = self.pending_block.success_operations.len(); + self.failed_txs_pending_len = self.pending_block.failed_txs.len(); + // Create a pending block object to send. // Note that failed operations are not included, as per any operation failure // the full block is created immediately. @@ -956,8 +992,8 @@ impl ZkSyncStateKeeper { chunks_left: self.pending_block.chunks_left, unprocessed_priority_op_before: self.pending_block.unprocessed_priority_op_before, pending_block_iteration: self.pending_block.pending_block_iteration, - success_operations: self.pending_block.success_operations.clone(), - failed_txs: self.pending_block.failed_txs.clone(), + success_operations: new_success_operations, + failed_txs: new_failed_operations, previous_block_root_hash: self.pending_block.previous_block_root_hash, timestamp: self.pending_block.timestamp, }; @@ -983,7 +1019,6 @@ impl ZkSyncStateKeeper { .send(commit_request) .await .expect("committer receiver dropped"); - metrics::histogram!("state_keeper.store_pending_block", start.elapsed()); } diff --git a/core/bin/zksync_core/src/state_keeper/tests.rs b/core/bin/zksync_core/src/state_keeper/tests.rs index 36ed74a7c1..02abec8447 100644 --- a/core/bin/zksync_core/src/state_keeper/tests.rs +++ b/core/bin/zksync_core/src/state_keeper/tests.rs @@ -159,6 +159,37 @@ pub fn create_deposit(token: TokenId, amount: impl Into) -> PriorityOp } } +/// Checks that StateKeeper will panic with incorrect initialization data +#[test] +#[should_panic] +fn test_create_incorrect_state_keeper() { + const CHANNEL_SIZE: usize = 32768; + const MAX_ITERATIONS: usize = 100; + const FAST_ITERATIONS: usize = 100; + const NUMBER_OF_WITHDRAWALS: usize = 100; + + let (_request_tx, request_rx) = mpsc::channel(CHANNEL_SIZE); + let (response_tx, _response_rx) = mpsc::channel(CHANNEL_SIZE); + + let mut fee_collector = Account::default(); + fee_collector.address = H160::random(); + + let mut init_params = ZkSyncStateInitParams::default(); + init_params.insert_account(0, fee_collector.clone()); + + // should panic + ZkSyncStateKeeper::new( + init_params, + fee_collector.address, + request_rx, + response_tx, + vec![1, 2, 2], // `available_block_chunk_sizes` must be strictly increasing. + MAX_ITERATIONS, + FAST_ITERATIONS, + NUMBER_OF_WITHDRAWALS, + ); +} + mod apply_priority_op { use super::*; @@ -390,29 +421,6 @@ async fn store_pending_block() { mod execute_proposed_block { use super::*; - /// Checks if executing an empty proposed_block is done correctly - #[tokio::test] - async fn empty() { - let mut tester = StateKeeperTester::new(1, 1, 1, 1); - let proposed_block = ProposedBlock { - txs: vec![], - priority_ops: vec![], - }; - let pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; - tester - .state_keeper - .execute_proposed_block(proposed_block) - .await; - if let Some(CommitRequest::PendingBlock(_)) = tester.response_rx.next().await { - assert_eq!( - pending_block_iteration, - tester.state_keeper.pending_block.pending_block_iteration - ); - } else { - panic!("Empty block not stored"); - } - } - /// Checks if executing a small proposed_block is done correctly #[tokio::test] async fn small() { @@ -552,4 +560,197 @@ mod execute_proposed_block { Some(CommitRequest::Block(_)) )); } + + /// Checks the following things: + /// 1. if proposed block is empty, no pending block is yielded from the state keeper. + /// 2. if there were no successful operations in the block, pending block iteration is not incremented after empty or rejected-only updates. + /// 3. if there were successful operations in the block, pending block iteration is incremented after each `execute_proposed_block` call. + #[tokio::test] + async fn pending_block_updates() { + let mut tester = StateKeeperTester::new(20, 5, 5, 4); + + // --- Phase 1: Empty pending block, empty update. --- + + // Check that empty update with empty pending block doesn't increment the iteration. + let proposed_block = ProposedBlock { + txs: vec![], + priority_ops: vec![], + }; + + tester + .state_keeper + .execute_proposed_block(proposed_block) + .await; + + // There should be no pending block yielded. + let next_block = tester.response_rx.try_next(); + assert!(next_block.is_err(), "Empty pending block was yielded"); + + // No successful operations in the pending block => no increment. + let pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + assert_eq!(pending_block_iteration, 0); + + // --- Phase 2: Empty pending block, only failed tx in update. --- + + // Then send the block with the bad transaction only + let bad_withdraw = create_account_and_withdrawal(&mut tester, 2, 2, 100u32, 145u32); + let proposed_block = ProposedBlock { + txs: vec![SignedTxVariant::Tx(bad_withdraw)], + priority_ops: vec![], + }; + + tester + .state_keeper + .execute_proposed_block(proposed_block) + .await; + + // Pending block should be created. + let next_block = tester.response_rx.next().await; + assert!(next_block.is_some(), "No pending block was yielded"); + + // Iteration should still not be incremented. + let pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + assert_eq!(pending_block_iteration, 0); + + // --- Phase 3: Empty pending block, successful tx in update. --- + + // First, create some block with successfull operation. + let good_withdraw = create_account_and_withdrawal(&mut tester, 2, 2, 200u32, 145u32); + let proposed_block = ProposedBlock { + txs: vec![SignedTxVariant::Tx(good_withdraw)], + priority_ops: vec![], + }; + + let pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + tester + .state_keeper + .execute_proposed_block(proposed_block) + .await; + + // Pending block should be created. + let next_block = tester.response_rx.next().await; + assert!(next_block.is_some(), "No pending block was yielded"); + + // Iteration should be incremented. + let new_pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + assert_eq!(new_pending_block_iteration, pending_block_iteration + 1); + + // --- Phase 4: Successful tx in pending block, failed tx in update. --- + + // Then send the block with the bad transaction only + let bad_withdraw = create_account_and_withdrawal(&mut tester, 2, 2, 100u32, 145u32); + let proposed_block = ProposedBlock { + txs: vec![SignedTxVariant::Tx(bad_withdraw)], + priority_ops: vec![], + }; + + let pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + tester + .state_keeper + .execute_proposed_block(proposed_block) + .await; + + // Pending block should be created. + let next_block = tester.response_rx.next().await; + assert!(next_block.is_some(), "No pending block was yielded"); + + // Iteration should still be incremented. + let new_pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + assert_eq!(new_pending_block_iteration, pending_block_iteration + 1); + + // --- Phase 5: Successful tx in pending block, empty update. --- + + // Finally, execute an empty block. + let proposed_block = ProposedBlock { + txs: vec![], + priority_ops: vec![], + }; + + let pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + tester + .state_keeper + .execute_proposed_block(proposed_block) + .await; + + // There should be no pending block yielded. + let next_block = tester.response_rx.try_next(); + assert!(next_block.is_err(), "Empty pending block was yielded"); + + // Iteration should still be incremented even after an empty block: there was a successful operation earlier. + let new_pending_block_iteration = tester.state_keeper.pending_block.pending_block_iteration; + assert_eq!(new_pending_block_iteration, pending_block_iteration + 1); + } + + /// Checks that only the difference between two states of a pending block is transmitted + /// to the committer. + #[tokio::test] + async fn pending_block_diff() { + let mut tester = StateKeeperTester::new(20, 5, 5, 4); + + let good_withdraw_1 = create_account_and_withdrawal(&mut tester, 0, 1, 200u32, 145u32); + let bad_withdraw_1 = create_account_and_withdrawal(&mut tester, 2, 2, 100u32, 145u32); + let proposed_block_1 = ProposedBlock { + txs: vec![ + SignedTxVariant::Tx(good_withdraw_1.clone()), + SignedTxVariant::Tx(bad_withdraw_1.clone()), + ], + priority_ops: vec![], + }; + + let good_withdraw_2 = create_account_and_withdrawal(&mut tester, 0, 3, 200u32, 145u32); + let bad_withdraw_2 = create_account_and_withdrawal(&mut tester, 2, 4, 100u32, 145u32); + let proposed_block_2 = ProposedBlock { + txs: vec![ + SignedTxVariant::Tx(good_withdraw_2.clone()), + SignedTxVariant::Tx(bad_withdraw_2.clone()), + ], + priority_ops: vec![], + }; + + tester + .state_keeper + .execute_proposed_block(proposed_block_1) + .await; + if let Some(CommitRequest::PendingBlock((block, _))) = tester.response_rx.next().await { + assert_eq!(block.number, 1); // It's the first block. + assert_eq!(block.success_operations.len(), 1); + assert_eq!( + block.success_operations[0] + .get_executed_tx() + .unwrap() + .signed_tx + .hash(), + good_withdraw_1.hash() + ); + + assert_eq!(block.failed_txs.len(), 1); + assert_eq!(block.failed_txs[0].signed_tx.hash(), bad_withdraw_1.hash()); + } else { + panic!("Block #1 not stored"); + } + + // Now we execute the next proposed block and expect that only the diff between `pending_block_2` and + // `pending_block_1` will be sent. + tester + .state_keeper + .execute_proposed_block(proposed_block_2) + .await; + if let Some(CommitRequest::PendingBlock((block, _))) = tester.response_rx.next().await { + assert_eq!(block.number, 1); // It still should be the first block. + assert_eq!(block.success_operations.len(), 1); + assert_eq!( + block.success_operations[0] + .get_executed_tx() + .unwrap() + .signed_tx + .hash(), + good_withdraw_2.hash() + ); + + assert_eq!(block.failed_txs.len(), 1); + assert_eq!(block.failed_txs[0].signed_tx.hash(), bad_withdraw_2.hash()); + } else { + panic!("Block #2 not stored"); + } + } } diff --git a/core/bin/zksync_eth_sender/src/ethereum_interface.rs b/core/bin/zksync_eth_sender/src/ethereum_interface.rs index 68b857e8c0..dc6d69e5f7 100644 --- a/core/bin/zksync_eth_sender/src/ethereum_interface.rs +++ b/core/bin/zksync_eth_sender/src/ethereum_interface.rs @@ -10,7 +10,7 @@ use zksync_eth_signer::PrivateKeySigner; // Workspace uses use super::ExecutedTxStatus; use std::time::Duration; -use zksync_config::ConfigurationOptions; +use zksync_config::EthClientOptions; use zksync_contracts::zksync_contract; use zksync_eth_client::{ETHClient, SignedCallResult}; @@ -78,7 +78,7 @@ pub struct EthereumHttpClient { } impl EthereumHttpClient { - pub fn new(options: &ConfigurationOptions) -> anyhow::Result { + pub fn new(options: &EthClientOptions) -> anyhow::Result { let transport = Http::new(&options.web3_url)?; let ethereum_signer = PrivateKeySigner::new( options diff --git a/core/bin/zksync_eth_sender/src/lib.rs b/core/bin/zksync_eth_sender/src/lib.rs index a3e78311e3..f1b502bd33 100644 --- a/core/bin/zksync_eth_sender/src/lib.rs +++ b/core/bin/zksync_eth_sender/src/lib.rs @@ -13,7 +13,7 @@ use web3::{ types::{TransactionReceipt, H256, U256}, }; // Workspace uses -use zksync_config::{ConfigurationOptions, EthSenderOptions}; +use zksync_config::{EthClientOptions, EthSenderOptions}; use zksync_eth_client::SignedCallResult; use zksync_storage::ConnectionPool; use zksync_types::{ @@ -568,16 +568,19 @@ impl ETHSender { TxCheckOutcome::Pending } } - // Non-successful execution. + // Non-successful execution, report the failure with details. Some(status) => { - // Transaction failed, report the failure with details. + // Check if transaction has enough confirmations. + if status.confirmations >= self.options.wait_confirmations { + assert!( + status.receipt.is_some(), + "Receipt should exist for a failed transaction" + ); - // TODO: check confirmations for fail (#1110). - assert!( - status.receipt.is_some(), - "Receipt should exist for a failed transaction" - ); - TxCheckOutcome::Failed(Box::new(status.receipt.unwrap())) + TxCheckOutcome::Failed(Box::new(status.receipt.unwrap())) + } else { + TxCheckOutcome::Pending + } } // Stuck transaction. None if op.is_stuck(current_block) => TxCheckOutcome::Stuck, @@ -742,15 +745,14 @@ impl ETHSender { #[must_use] pub fn run_eth_sender( pool: ConnectionPool, - config_options: ConfigurationOptions, + eth_client_options: EthClientOptions, + eth_sender_options: EthSenderOptions, ) -> JoinHandle<()> { let ethereum = - EthereumHttpClient::new(&config_options).expect("Ethereum client creation failed"); + EthereumHttpClient::new(ð_client_options).expect("Ethereum client creation failed"); let db = Database::new(pool); - let eth_sender_options = EthSenderOptions::from_env(); - tokio::spawn(async move { let eth_sender = ETHSender::new(eth_sender_options, db, ethereum).await; diff --git a/core/bin/zksync_eth_sender/src/main.rs b/core/bin/zksync_eth_sender/src/main.rs index 9d357d5dae..3acbf4e9c3 100644 --- a/core/bin/zksync_eth_sender/src/main.rs +++ b/core/bin/zksync_eth_sender/src/main.rs @@ -1,6 +1,6 @@ use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; use std::cell::RefCell; -use zksync_config::ConfigurationOptions; +use zksync_config::{EthClientOptions, EthSenderOptions}; use zksync_eth_sender::run_eth_sender; use zksync_storage::ConnectionPool; @@ -23,9 +23,10 @@ async fn main() -> anyhow::Result<()> { } let pool = ConnectionPool::new(Some(ETH_SENDER_CONNECTION_POOL_SIZE)); - let config_options = ConfigurationOptions::from_env(); + let eth_client_options = EthClientOptions::from_env(); + let eth_sender_options = EthSenderOptions::from_env(); - let task_handle = run_eth_sender(pool, config_options); + let task_handle = run_eth_sender(pool, eth_client_options, eth_sender_options); tokio::select! { _ = async { task_handle.await } => { diff --git a/core/bin/zksync_eth_sender/src/tests/mod.rs b/core/bin/zksync_eth_sender/src/tests/mod.rs index 69bee64f32..986c2e6e05 100644 --- a/core/bin/zksync_eth_sender/src/tests/mod.rs +++ b/core/bin/zksync_eth_sender/src/tests/mod.rs @@ -9,7 +9,7 @@ use super::{ }; const EXPECTED_WAIT_TIME_BLOCKS: u64 = 30; -const WAIT_CONFIRMATIONS: u64 = 1; +const WAIT_CONFIRMATIONS: u64 = 3; pub mod mock; mod test_data; @@ -48,8 +48,9 @@ async fn transaction_state() { test_data::commit_operation(0), // Will be committed. test_data::commit_operation(1), // Will be pending because of not enough confirmations. test_data::commit_operation(2), // Will be failed. - test_data::commit_operation(3), // Will be stuck. - test_data::commit_operation(4), // Will be pending due no response. + test_data::commit_operation(3), // Will be failed and pending (not enough confirmations). + test_data::commit_operation(4), // Will be stuck. + test_data::commit_operation(5), // Will be pending due no response. ]; let mut eth_operations = Vec::with_capacity(operations.len()); @@ -99,6 +100,20 @@ async fn transaction_state() { .add_execution(ð_operations[2].used_tx_hashes[0], &failed_response) .await; + // Pending failed operation. + let pending_failed_response = ExecutedTxStatus { + confirmations: WAIT_CONFIRMATIONS - 1, + success: false, + receipt: Some(Default::default()), + }; + eth_sender + .ethereum + .add_execution( + ð_operations[3].used_tx_hashes[0], + &pending_failed_response, + ) + .await; + // Committed operation. assert_eq!( eth_sender @@ -141,13 +156,27 @@ async fn transaction_state() { TxCheckOutcome::Failed(Default::default()) ); - // Stuck operation. + // Pending failed operation should be considered as pending. assert_eq!( eth_sender .check_transaction_state( TxCheckMode::Latest, ð_operations[3], ð_operations[3].used_tx_hashes[0], + current_block + pending_failed_response.confirmations, + ) + .await + .unwrap(), + TxCheckOutcome::Pending + ); + + // Stuck operation. + assert_eq!( + eth_sender + .check_transaction_state( + TxCheckMode::Latest, + ð_operations[4], + ð_operations[4].used_tx_hashes[0], current_block + EXPECTED_WAIT_TIME_BLOCKS, ) .await @@ -160,8 +189,8 @@ async fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Latest, - ð_operations[4], - ð_operations[4].used_tx_hashes[0], + ð_operations[5], + ð_operations[5].used_tx_hashes[0], current_block + EXPECTED_WAIT_TIME_BLOCKS - 1, ) .await @@ -174,8 +203,8 @@ async fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Old, - ð_operations[4], - ð_operations[4].used_tx_hashes[0], + ð_operations[5], + ð_operations[5].used_tx_hashes[0], current_block + EXPECTED_WAIT_TIME_BLOCKS - 1, ) .await @@ -486,7 +515,7 @@ async fn restore_state() { let deadline_block = eth_sender.get_deadline_block(1); let commit_op_tx = create_signed_tx(0, ð_sender, &commit_op, deadline_block, 0).await; - let deadline_block = eth_sender.get_deadline_block(2); + let deadline_block = eth_sender.get_deadline_block(1 + WAIT_CONFIRMATIONS); let verify_op_tx = create_signed_tx(1, ð_sender, &verify_op, deadline_block, 1).await; let operations = vec![commit_op, verify_op]; @@ -669,7 +698,7 @@ async fn concurrent_operations_order() { // If we'll send all the operations together, the order will be "commit-verify-commit-verify-withdraw", // since withdraw is only sent after verify operation is confirmed. let (commit_op, verify_op) = operations_iter.next().unwrap(); - println!("BEGIN"); + eth_sender .db .send_operation(commit_op.clone()) @@ -680,7 +709,7 @@ async fn concurrent_operations_order() { .send_operation(verify_op.clone()) .await .unwrap(); - println!("END"); + eth_sender.load_new_operations().await; // Call `proceed_next_operations`. Several txs should be sent. diff --git a/core/bin/zksync_witness_generator/src/lib.rs b/core/bin/zksync_witness_generator/src/lib.rs index cad75b99f3..724fec4d72 100644 --- a/core/bin/zksync_witness_generator/src/lib.rs +++ b/core/bin/zksync_witness_generator/src/lib.rs @@ -7,7 +7,7 @@ use actix_web::{web, App, HttpResponse, HttpServer}; use futures::channel::mpsc; use serde::{Deserialize, Serialize}; // Workspace deps -use zksync_config::{ConfigurationOptions, ProverOptions}; +use zksync_config::ProverOptions; use zksync_storage::{ConnectionPool, StorageProcessor}; use zksync_types::BlockNumber; // Local deps @@ -72,33 +72,33 @@ async fn get_job( return Err(actix_web::error::ErrorBadRequest("empty name")); } let mut storage = data.access_storage().await?; - // let ret = storage - // .prover_schema() - // .prover_run_for_next_commit(&r.name, data.prover_timeout, r.block_size) - // .await - // .map_err(|e| { - // vlog::warn!("could not get next unverified commit operation: {}", e); - // actix_web::error::ErrorInternalServerError("storage layer error") - // })?; - // let ret = ; - // if let Some(prover_run) = ret { - // log::info!( - // "satisfied request block {} to prove from worker: {}", - // prover_run.block_number, - // r.name - // ); - // Ok(todo!()) - // // Ok(HttpResponse::Ok().json(ProverInputResponse { - // // job_id: prover_run.id, - // // data: Some(ret.data), - // // })) - // } else { - // Ok(HttpResponse::Ok().json(ProverInputResponse { - // job_id: 0, - // data: todo!(), - // })) - // } - todo!("implement prover get data from storage") + let ret = storage + .prover_schema() + .get_idle_prover_job_from_job_queue() + .await + .map_err(|e| { + vlog::warn!("could not get next unverified commit operation: {}", e); + actix_web::error::ErrorInternalServerError("storage layer error") + })?; + if let Some(prover_job) = ret { + log::info!("satisfied request to prove from worker"); + Ok(HttpResponse::Ok().json(ProverInputResponse { + job_id: prover_job.job_id, + first_block: prover_job.first_block, + last_block: prover_job.last_block, + data: Some( + serde_json::from_value(prover_job.job_data) + .expect("Failed to parse prover job from db"), + ), + })) + } else { + Ok(HttpResponse::Ok().json(ProverInputResponse { + job_id: 0, + first_block: 0, + last_block: 0, + data: None, + })) + } } async fn prover_data( @@ -335,7 +335,6 @@ pub fn run_prover_server( connection_pool: zksync_storage::ConnectionPool, panic_notify: mpsc::Sender, prover_options: ProverOptions, - config_options: ConfigurationOptions, ) { thread::Builder::new() .name("prover_server".to_string()) @@ -362,9 +361,9 @@ pub fn run_prover_server( }; // Start pool maintainer threads. - for offset in 0..config_options.witness_generators { + for offset in 0..prover_options.witness_generators { let start_block = (last_verified_block + offset + 1) as u32; - let block_step = config_options.witness_generators as u32; + let block_step = prover_options.witness_generators as u32; log::info!( "Starting witness generator ({},{})", start_block, @@ -378,15 +377,12 @@ pub fn run_prover_server( ); pool_maintainer.start(panic_notify.clone()); } - // Start HTTP server. - let idle_provers = config_options.idle_provers; + let gone_timeout = prover_options.gone_timeout; + let idle_provers = prover_options.idle_provers; HttpServer::new(move || { - let app_state = AppState::new( - connection_pool.clone(), - prover_options.gone_timeout, - idle_provers, - ); + let app_state = + AppState::new(connection_pool.clone(), gone_timeout, idle_provers); // By calling `register_data` instead of `data` we're avoiding double // `Arc` wrapping of the object. @@ -403,7 +399,7 @@ pub fn run_prover_server( web::post().to(required_replicas), ) }) - .bind(&config_options.prover_server_address) + .bind(&prover_options.prover_server_address) .expect("failed to bind") .run() .await diff --git a/core/bin/zksync_witness_generator/src/main.rs b/core/bin/zksync_witness_generator/src/main.rs index 82a560ecff..c36eff53ee 100644 --- a/core/bin/zksync_witness_generator/src/main.rs +++ b/core/bin/zksync_witness_generator/src/main.rs @@ -1,6 +1,6 @@ use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; use std::cell::RefCell; -use zksync_config::{ConfigurationOptions, ProverOptions}; +use zksync_config::ProverOptions; use zksync_storage::ConnectionPool; use zksync_witness_generator::run_prover_server; @@ -23,15 +23,9 @@ async fn main() -> anyhow::Result<()> { } let connection_pool = ConnectionPool::new(Some(WITNESS_GENERATOR_CONNECTION_POOL_SIZE)); - let config_options = ConfigurationOptions::from_env(); let prover_options = ProverOptions::from_env(); - run_prover_server( - connection_pool, - stop_signal_sender, - prover_options, - config_options, - ); + run_prover_server(connection_pool, stop_signal_sender, prover_options); stop_signal_receiver.next().await; diff --git a/core/bin/zksync_witness_generator/tests/prover_server.rs b/core/bin/zksync_witness_generator/tests/prover_server.rs index 1982baf9ea..3b9d4eab2b 100644 --- a/core/bin/zksync_witness_generator/tests/prover_server.rs +++ b/core/bin/zksync_witness_generator/tests/prover_server.rs @@ -23,18 +23,17 @@ async fn connect_to_db() -> zksync_storage::ConnectionPool { async fn spawn_server(prover_timeout: time::Duration, rounds_interval: time::Duration) -> String { // TODO: make single server spawn for all tests (#1108). let bind_to = "127.0.0.1:8088"; - let mut config_opt = ConfigurationOptions::from_env(); - config_opt.prover_server_address = net::SocketAddr::from_str(bind_to).unwrap(); let mut prover_options = ProverOptions::from_env(); prover_options.prepare_data_interval = rounds_interval; prover_options.gone_timeout = prover_timeout; + prover_options.prover_server_address = net::SocketAddr::from_str(bind_to).unwrap(); let conn_pool = connect_to_db().await; let (tx, _rx) = mpsc::channel(1); thread::spawn(move || { - run_prover_server(conn_pool, tx, prover_options, config_opt); + run_prover_server(conn_pool, tx, prover_options); }); bind_to.to_string() } diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index c879fb24a6..723024cc29 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,14 +1,21 @@ // Built-in deps -use std::{env, net::SocketAddr, str::FromStr, time::Duration}; +use std::{collections::HashSet, env, net::SocketAddr, str::FromStr, time::Duration}; // External uses use url::Url; // Workspace uses -use zksync_basic_types::{H160, H256}; +use zksync_basic_types::{Address, H256}; use zksync_utils::{get_env, parse_env, parse_env_if_exists, parse_env_with}; // Local uses pub mod test_config; +/// Makes address for bind from port. +fn addr_from_port(port: u16) -> SocketAddr { + format!("0.0.0.0:{}", port) + .parse::() + .expect("Can't get address from port") +} + /// Configuration options for `eth_sender`. #[derive(Debug, Clone)] pub struct EthSenderOptions { @@ -35,29 +42,53 @@ impl EthSenderOptions { } } +/// Configuration options for `eth_client`. +#[derive(Debug, Clone)] +pub struct EthClientOptions { + pub chain_id: u8, + pub gas_price_factor: f64, + pub operator_commit_eth_addr: Address, + pub operator_private_key: Option, + pub web3_url: String, + pub contract_eth_addr: Address, +} + +impl EthClientOptions { + pub fn from_env() -> Self { + Self { + operator_commit_eth_addr: parse_env_with("OPERATOR_COMMIT_ETH_ADDRESS", |s| &s[2..]), + operator_private_key: parse_env_if_exists("OPERATOR_PRIVATE_KEY"), + chain_id: parse_env("CHAIN_ID"), + gas_price_factor: parse_env("GAS_PRICE_FACTOR"), + web3_url: get_env("WEB3_URL"), + contract_eth_addr: parse_env_with("CONTRACT_ADDR", |s| &s[2..]), + } + } +} + #[derive(Debug, Clone)] pub struct ProverOptions { pub prepare_data_interval: Duration, pub heartbeat_interval: Duration, pub cycle_wait: Duration, pub gone_timeout: Duration, + pub prover_server_address: SocketAddr, + pub idle_provers: u32, + pub witness_generators: usize, } impl ProverOptions { /// Parses the configuration options values from the environment variables. /// Panics if any of options is missing or has inappropriate value. pub fn from_env() -> Self { - let prepare_data_interval = - Duration::from_millis(parse_env("PROVER_PREPARE_DATA_INTERVAL")); - let heartbeat_interval = Duration::from_millis(parse_env("PROVER_HEARTBEAT_INTERVAL")); - let cycle_wait = Duration::from_millis(parse_env("PROVER_CYCLE_WAIT")); - let gone_timeout = Duration::from_millis(parse_env("PROVER_GONE_TIMEOUT")); - Self { - prepare_data_interval, - heartbeat_interval, - cycle_wait, - gone_timeout, + prepare_data_interval: Duration::from_millis(parse_env("PROVER_PREPARE_DATA_INTERVAL")), + heartbeat_interval: Duration::from_millis(parse_env("PROVER_HEARTBEAT_INTERVAL")), + cycle_wait: Duration::from_millis(parse_env("PROVER_CYCLE_WAIT")), + gone_timeout: Duration::from_millis(parse_env("PROVER_GONE_TIMEOUT")), + prover_server_address: addr_from_port(parse_env("PROVER_SERVER_PORT")), + witness_generators: parse_env("WITNESS_GENERATORS"), + idle_provers: parse_env("IDLE_PROVERS"), } } } @@ -76,7 +107,7 @@ impl AdminServerOptions { pub fn from_env() -> Self { Self { admin_http_server_url: parse_env("ADMIN_SERVER_API_URL"), - admin_http_server_address: parse_env("ADMIN_SERVER_API_BIND"), + admin_http_server_address: addr_from_port(parse_env("ADMIN_SERVER_API_PORT")), secret_auth: parse_env("SECRET_AUTH"), } } @@ -133,38 +164,87 @@ impl MiniblockTimings { } } +/// Configuration options related to fee ticker. +#[derive(Debug)] +pub struct FeeTickerOptions { + /// Source to fetch token prices from (e.g. CoinGecko or coinmarketcap). + pub token_price_source: TokenPriceSource, + /// Fee increase coefficient for fast processing of withdrawal. + pub fast_processing_coeff: f64, + /// List of the tokens that aren't acceptable for paying fee in. + pub disabled_tokens: HashSet
, + /// Tokens for which subsidies are disabled. + pub not_subsidized_tokens: HashSet
, +} + +impl FeeTickerOptions { + fn comma_separated_addresses(name: &str) -> HashSet
{ + get_env(name) + .split(',') + .map(|p| p.parse().unwrap()) + .collect() + } + + pub fn from_env() -> Self { + Self { + token_price_source: TokenPriceSource::from_env(), + fast_processing_coeff: parse_env("TICKER_FAST_PROCESSING_COEFF"), + disabled_tokens: Self::comma_separated_addresses("TICKER_DISABLED_TOKENS"), + not_subsidized_tokens: Self::comma_separated_addresses("NOT_SUBSIDIZED_TOKENS"), + } + } +} + #[derive(Debug, Clone)] -pub struct ConfigurationOptions { +pub struct ApiServerOptions { pub rest_api_server_address: SocketAddr, pub json_rpc_http_server_address: SocketAddr, pub json_rpc_ws_server_address: SocketAddr, pub core_server_address: SocketAddr, pub core_server_url: String, + pub api_requests_caches_size: usize, + /// Fee increase coefficient for fast processing of withdrawal. + pub forced_exit_minimum_account_age: Duration, + pub enforce_pubkey_change_fee: bool, +} + +impl ApiServerOptions { + pub fn from_env() -> Self { + let forced_exit_minimum_account_age = + Duration::from_secs(parse_env::("FORCED_EXIT_MINIMUM_ACCOUNT_AGE_SECS")); + + if forced_exit_minimum_account_age.as_secs() == 0 { + log::error!("Forced exit minimum account age is set to 0, this is an incorrect value for production"); + } + + Self { + rest_api_server_address: addr_from_port(parse_env("REST_API_PORT")), + json_rpc_http_server_address: addr_from_port(parse_env("HTTP_RPC_API_PORT")), + json_rpc_ws_server_address: addr_from_port(parse_env("WS_API_PORT")), + core_server_address: addr_from_port(parse_env("PRIVATE_CORE_SERVER_PORT")), + core_server_url: parse_env("PRIVATE_CORE_SERVER_URL"), + api_requests_caches_size: parse_env("API_REQUESTS_CACHES_SIZE"), + forced_exit_minimum_account_age, + enforce_pubkey_change_fee: parse_env_if_exists("ENFORCE_PUBKEY_CHANGE_FEE") + .unwrap_or(true), + } + } +} + +#[derive(Debug, Clone)] +pub struct ConfigurationOptions { pub web3_url: String, pub genesis_tx_hash: H256, - pub contract_eth_addr: H160, - pub governance_eth_addr: H160, - pub operator_fee_eth_addr: H160, - pub operator_commit_eth_addr: H160, - pub operator_private_key: Option, - pub chain_id: u8, - pub gas_price_factor: f64, - pub prover_server_address: SocketAddr, + pub contract_eth_addr: Address, + pub governance_eth_addr: Address, + pub operator_fee_eth_addr: Address, pub confirmations_for_eth_event: u64, - pub api_requests_caches_size: usize, pub available_block_chunk_sizes: Vec, pub max_number_of_withdrawals_per_block: usize, pub eth_watch_poll_interval: Duration, pub eth_network: String, - pub idle_provers: u32, pub miniblock_timings: MiniblockTimings, pub prometheus_export_port: u16, - pub token_price_source: TokenPriceSource, - pub witness_generators: usize, - /// Fee increase coefficient for fast processing of withdrawal. - pub ticker_fast_processing_coeff: f64, - pub forced_exit_minimum_account_age: Duration, - pub enforce_pubkey_change_fee: bool, } impl ConfigurationOptions { @@ -179,46 +259,21 @@ impl ConfigurationOptions { available_block_chunk_sizes.sort_unstable(); - let forced_exit_minimum_account_age = - Duration::from_secs(parse_env::("FORCED_EXIT_MINIMUM_ACCOUNT_AGE_SECS")); - - if forced_exit_minimum_account_age.as_secs() == 0 { - log::error!("Forced exit minimum account age is set to 0, this is an incorrect value for production"); - } - Self { - rest_api_server_address: parse_env("REST_API_BIND"), - json_rpc_http_server_address: parse_env("HTTP_RPC_API_BIND"), - json_rpc_ws_server_address: parse_env("WS_API_BIND"), - core_server_address: parse_env("PRIVATE_CORE_SERVER_BIND"), - core_server_url: parse_env("PRIVATE_CORE_SERVER_URL"), web3_url: get_env("WEB3_URL"), genesis_tx_hash: parse_env_with("GENESIS_TX_HASH", |s| &s[2..]), contract_eth_addr: parse_env_with("CONTRACT_ADDR", |s| &s[2..]), governance_eth_addr: parse_env_with("GOVERNANCE_ADDR", |s| &s[2..]), - operator_commit_eth_addr: parse_env_with("OPERATOR_COMMIT_ETH_ADDRESS", |s| &s[2..]), operator_fee_eth_addr: parse_env_with("OPERATOR_FEE_ETH_ADDRESS", |s| &s[2..]), - operator_private_key: parse_env_if_exists("OPERATOR_PRIVATE_KEY"), - chain_id: parse_env("CHAIN_ID"), - gas_price_factor: parse_env("GAS_PRICE_FACTOR"), - prover_server_address: parse_env("PROVER_SERVER_BIND"), confirmations_for_eth_event: parse_env("CONFIRMATIONS_FOR_ETH_EVENT"), - api_requests_caches_size: parse_env("API_REQUESTS_CACHES_SIZE"), available_block_chunk_sizes, max_number_of_withdrawals_per_block: parse_env("MAX_NUMBER_OF_WITHDRAWALS_PER_BLOCK"), eth_watch_poll_interval: Duration::from_millis(parse_env::( "ETH_WATCH_POLL_INTERVAL", )), eth_network: parse_env("ETH_NETWORK"), - idle_provers: parse_env("IDLE_PROVERS"), miniblock_timings: MiniblockTimings::from_env(), prometheus_export_port: parse_env("PROMETHEUS_EXPORT_PORT"), - token_price_source: TokenPriceSource::from_env(), - witness_generators: parse_env("WITNESS_GENERATORS"), - ticker_fast_processing_coeff: parse_env("TICKER_FAST_PROCESSING_COEFF"), - forced_exit_minimum_account_age, - enforce_pubkey_change_fee: parse_env_if_exists("ENFORCE_PUBKEY_CHANGE_FEE") - .unwrap_or(true), } } } diff --git a/core/lib/config/src/test_config.rs b/core/lib/config/src/test_config.rs index d812fc5772..1a84ed8ccf 100644 --- a/core/lib/config/src/test_config.rs +++ b/core/lib/config/src/test_config.rs @@ -56,19 +56,37 @@ impl EIP1271Config { pub struct EthConfig { /// Address of the local Ethereum node. pub web3_url: String, + /// Set of 12 words for connecting to an Ethereum wallet. + pub test_mnemonic: String, } -impl EthConfig { - pub fn load() -> Self { - let object = load_json(&config_path("constant/eth.json")); - serde_json::from_value(object).expect("Cannot deserialize Ethereum test config") - } +/// Common Api addresses. +#[derive(Debug, Deserialize)] +pub struct ApiConfig { + /// Address of the rest api. + pub rest_api_url: String, } +macro_rules! impl_config { + ($name_config:ident, $file:tt) => { + impl $name_config { + pub fn load() -> Self { + let object = load_json(&config_path(&format!("{}.json", $file))); + serde_json::from_value(object) + .expect(&format!("Cannot deserialize config from '{}'", $file)) + } + } + }; +} + +impl_config!(ApiConfig, "constant/api"); +impl_config!(EthConfig, "constant/eth"); + #[derive(Debug)] pub struct TestConfig { pub eip1271: EIP1271Config, pub eth: EthConfig, + pub api: ApiConfig, } impl TestConfig { @@ -76,6 +94,7 @@ impl TestConfig { Self { eip1271: EIP1271Config::load(), eth: EthConfig::load(), + api: ApiConfig::load(), } } } diff --git a/core/lib/crypto/README.md b/core/lib/crypto/README.md index 82fd52f468..2a4f3d4b9c 100644 --- a/core/lib/crypto/README.md +++ b/core/lib/crypto/README.md @@ -4,7 +4,7 @@ ## License -`zksync_models` is a part of zkSync stack, which is distributed under the terms of both the MIT license and the Apache +`zksync_crypto` is a part of zkSync stack, which is distributed under the terms of both the MIT license and the Apache License (Version 2.0). See [LICENSE-APACHE](../../LICENSE-APACHE), [LICENSE-MIT](../../LICENSE-MIT) for details. diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 253d9c41fa..2d888b57fe 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -54,7 +54,7 @@ mod test { use crate::EthereumSigner; use zksync_types::{H160, H256, U256}; - #[actix_rt::test] + #[tokio::test] async fn test_generating_signature() { let private_key = H256::from([5; 32]); let signer = PrivateKeySigner::new(private_key); diff --git a/core/lib/state/src/handler/deposit.rs b/core/lib/state/src/handler/deposit.rs index 04a60fcc80..e49bcee1d8 100644 --- a/core/lib/state/src/handler/deposit.rs +++ b/core/lib/state/src/handler/deposit.rs @@ -21,6 +21,7 @@ impl TxHandler for ZkSyncState { } else { self.get_free_account_id() }; + let op = DepositOp { priority_op, account_id, diff --git a/core/lib/state/src/tests/operations/forced_exit.rs b/core/lib/state/src/tests/operations/forced_exit.rs new file mode 100644 index 0000000000..dadce0a6fd --- /dev/null +++ b/core/lib/state/src/tests/operations/forced_exit.rs @@ -0,0 +1,169 @@ +use crate::tests::{AccountState::*, PlasmaTestBuilder}; +use num::{BigUint, Zero}; +use zksync_types::{account::AccountUpdate, tx::ForcedExit}; + +/// Check ForcedExit operation +#[test] +fn success() { + let token_id = 0; + let amount = BigUint::from(100u32); + let fee = BigUint::from(10u32); + + let mut tb = PlasmaTestBuilder::new(); + + let (initiator_account_id, initiator_account, initiator_sk) = tb.add_account(Unlocked); + let (target_account_id, target_account, _) = tb.add_account(Locked); + + tb.set_balance(initiator_account_id, token_id, fee.clone()); + tb.set_balance(target_account_id, token_id, amount.clone()); + + let forced_exit = ForcedExit::new_signed( + initiator_account_id, + target_account.address, + token_id, + fee.clone(), + initiator_account.nonce, + &initiator_sk, + ) + .unwrap(); + + tb.test_tx_success( + forced_exit.into(), + &[ + ( + initiator_account_id, + AccountUpdate::UpdateBalance { + old_nonce: initiator_account.nonce, + new_nonce: initiator_account.nonce + 1, + balance_update: (token_id, fee, BigUint::zero()), + }, + ), + ( + target_account_id, + AccountUpdate::UpdateBalance { + old_nonce: target_account.nonce, + new_nonce: target_account.nonce, + balance_update: (token_id, amount, BigUint::zero()), + }, + ), + ], + ) +} + +/// Check ForcedExit failure if target wallet is unlocked +#[test] +fn unlocked_target() { + let token_id = 0; + let amount = BigUint::from(100u32); + let fee = BigUint::from(10u32); + + let mut tb = PlasmaTestBuilder::new(); + + let (initiator_account_id, initiator_account, initiator_sk) = tb.add_account(Unlocked); + let (target_account_id, target_account, _) = tb.add_account(Unlocked); + + tb.set_balance(initiator_account_id, token_id, fee.clone()); + tb.set_balance(target_account_id, token_id, amount); + + let forced_exit = ForcedExit::new_signed( + initiator_account_id, + target_account.address, + token_id, + fee, + initiator_account.nonce, + &initiator_sk, + ) + .unwrap(); + + tb.test_tx_fail( + forced_exit.into(), + "Target account is not locked; forced exit is forbidden", + ); +} + +/// Check ForcedExit failure if not enough funds +#[test] +fn insufficient_funds() { + let token_id = 0; + let amount = BigUint::from(100u32); + let fee = BigUint::from(10u32); + + let mut tb = PlasmaTestBuilder::new(); + + let (initiator_account_id, initiator_account, initiator_sk) = tb.add_account(Unlocked); + let (target_account_id, target_account, _) = tb.add_account(Locked); + + tb.set_balance(target_account_id, token_id, amount); + + let forced_exit = ForcedExit::new_signed( + initiator_account_id, + target_account.address, + token_id, + fee, + initiator_account.nonce, + &initiator_sk, + ) + .unwrap(); + + tb.test_tx_fail( + forced_exit.into(), + "Initiator account: Not enough balance to cover fees", + ); +} + +/// Check ForcedExit failure if nonce is incorrect +#[test] +fn nonce_mismatch() { + let token_id = 0; + let amount = BigUint::from(100u32); + let fee = BigUint::from(10u32); + + let mut tb = PlasmaTestBuilder::new(); + + let (initiator_account_id, initiator_account, initiator_sk) = tb.add_account(Unlocked); + let (target_account_id, target_account, _) = tb.add_account(Locked); + + tb.set_balance(initiator_account_id, token_id, fee.clone()); + tb.set_balance(target_account_id, token_id, amount); + + let forced_exit = ForcedExit::new_signed( + initiator_account_id, + target_account.address, + token_id, + fee, + initiator_account.nonce + 42, + &initiator_sk, + ) + .unwrap(); + + tb.test_tx_fail(forced_exit.into(), "Nonce mismatch") +} + +/// Check ForcedExit failure if account address +/// does not correspond to accound_id +#[test] +fn invalid_account_id() { + let token_id = 0; + let amount = BigUint::from(100u32); + let fee = BigUint::from(10u32); + + let mut tb = PlasmaTestBuilder::new(); + + let (initiator_account_id, initiator_account, initiator_sk) = tb.add_account(Unlocked); + let (target_account_id, target_account, _) = tb.add_account(Locked); + + tb.set_balance(initiator_account_id, token_id, fee.clone()); + tb.set_balance(target_account_id, token_id, amount); + + let forced_exit = ForcedExit::new_signed( + initiator_account_id + 42, + target_account.address, + token_id, + fee, + initiator_account.nonce, + &initiator_sk, + ) + .unwrap(); + + tb.test_tx_fail(forced_exit.into(), "Initiator account does not exist") +} diff --git a/core/lib/state/src/tests/operations/mod.rs b/core/lib/state/src/tests/operations/mod.rs index c8785592ff..4f87dba71d 100644 --- a/core/lib/state/src/tests/operations/mod.rs +++ b/core/lib/state/src/tests/operations/mod.rs @@ -1,5 +1,6 @@ mod change_pub_key; mod close; +mod forced_exit; mod priority_ops; mod transfer; mod withdraw; diff --git a/core/lib/storage/sqlx-data.json b/core/lib/storage/sqlx-data.json index 6ba100b930..f4b1955be8 100644 --- a/core/lib/storage/sqlx-data.json +++ b/core/lib/storage/sqlx-data.json @@ -1590,6 +1590,26 @@ ] } }, + "60cf573e253358218a6319233221e8c2ff0561fd7ffbf8339a11a4509d955442": { + "query": "SELECT count(*) from mempool_txs\n WHERE tx_hash = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + } + }, "62304acbc93efab5117766689c6413d152dc0104c49c6f305e26b245b6ff7cde": { "query": "SELECT * FROM executed_priority_operations WHERE eth_hash = $1", "describe": { @@ -3283,6 +3303,56 @@ ] } }, + "debbe23f0c730c331482c798387d1739911923edcafc2bd80463464ff98f3b71": { + "query": "SELECT * from mempool_txs\n WHERE tx_hash = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "tx_hash", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "tx", + "type_info": "Jsonb" + }, + { + "ordinal": 3, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 4, + "name": "eth_sign_data", + "type_info": "Jsonb" + }, + { + "ordinal": 5, + "name": "batch_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false + ] + } + }, "e42d1180b05adcce696d87de411553e385d36018fe60e0963a348adc00ad874b": { "query": "UPDATE eth_parameters\n SET nonce = $1\n WHERE id = true", "describe": { diff --git a/core/lib/storage/src/chain/account/mod.rs b/core/lib/storage/src/chain/account/mod.rs index c6c9b68f4c..0f668c69b9 100644 --- a/core/lib/storage/src/chain/account/mod.rs +++ b/core/lib/storage/src/chain/account/mod.rs @@ -68,7 +68,10 @@ impl<'a, 'c> AccountSchema<'a, 'c> { .await? .map(|a| (account_id, a)); - metrics::histogram!("sql.chain", start.elapsed(), "account" => "account_state_by_address"); + metrics::histogram!( + "sql.chain.account.account_state_by_address", + start.elapsed() + ); Ok(StoredAccountState { committed, verified, @@ -161,7 +164,10 @@ impl<'a, 'c> AccountSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "account" => "last_committed_state_for_account"); + metrics::histogram!( + "sql.chain.account.last_committed_state_for_account", + start.elapsed() + ); Ok(account_state) } @@ -173,7 +179,9 @@ impl<'a, 'c> AccountSchema<'a, 'c> { ) -> QueryResult> { let start = Instant::now(); let (_, account) = self.get_account_and_last_block(account_id).await?; - metrics::histogram!("sql.chain", start.elapsed(), "account" => "last_verified_state_for_account" + metrics::histogram!( + "sql.chain.account.last_verified_state_for_account", + start.elapsed() ); Ok(account) } @@ -229,7 +237,10 @@ impl<'a, 'c> AccountSchema<'a, 'c> { }; transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "account" => "get_account_and_last_block"); + metrics::histogram!( + "sql.chain.account.get_account_and_last_block", + start.elapsed() + ); result } } diff --git a/core/lib/storage/src/chain/block/mod.rs b/core/lib/storage/src/chain/block/mod.rs index d43ab2131d..50f0238013 100644 --- a/core/lib/storage/src/chain/block/mod.rs +++ b/core/lib/storage/src/chain/block/mod.rs @@ -62,7 +62,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { let result = stored.into_op(&mut transaction).await; transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "execute_operation"); + metrics::histogram!("sql.chain.block.execute_operation", start.elapsed()); result } @@ -92,7 +92,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { } } } - metrics::histogram!("sql.chain", start.elapsed(), "block" => "save_block_transactions"); + metrics::histogram!("sql.chain.block.save_block_transactions", start.elapsed()); Ok(()) } @@ -106,7 +106,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_storage_block"); + metrics::histogram!("sql.chain.block.get_storage_block", start.elapsed()); Ok(block) } @@ -147,7 +147,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { stored_block.timestamp.unwrap_or_default() as u64, )); - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_block"); + metrics::histogram!("sql.chain.block.get_block", start.elapsed()); Ok(result) } @@ -164,7 +164,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { ExecutedOperations::PriorityOp(priorop) => Some(priorop.op), }) .collect(); - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_block_operations"); + metrics::histogram!("sql.chain.block.get_block_operations", start.elapsed()); Ok(result) } @@ -216,7 +216,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { .fetch_all(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_block_transactions"); + metrics::histogram!("sql.chain.block.get_block_transactions", start.elapsed()); Ok(block_txs) } @@ -279,7 +279,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { } }); - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_block_executed_ops"); + metrics::histogram!("sql.chain.block.get_block_executed_ops", start.elapsed()); Ok(executed_operations) } @@ -335,7 +335,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { ).fetch_all(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "load_block_range"); + metrics::histogram!("sql.chain.block.load_block_range", start.elapsed()); Ok(details) } @@ -444,9 +444,8 @@ impl<'a, 'c> BlockSchema<'a, 'c> { .flatten(); metrics::histogram!( - "sql.chain", - start.elapsed(), - "block" => "find_block_by_height_or_hash" + "sql.chain.block.find_block_by_height_or_hash", + start.elapsed() ); result } @@ -461,7 +460,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { } else { None }; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "load_commit_op"); + metrics::histogram!("sql.chain.block.load_commit_op", start.elapsed()); result } @@ -475,7 +474,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { let result = OperationsSchema(self.0) .get_last_block_by_action(ActionType::COMMIT, None) .await; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_last_committed_block"); + metrics::histogram!("sql.chain.block.get_last_committed_block", start.elapsed()); result } @@ -489,7 +488,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { let result = OperationsSchema(self.0) .get_last_block_by_action(ActionType::VERIFY, None) .await; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_last_verified_block"); + metrics::histogram!("sql.chain.block.get_last_verified_block", start.elapsed()); result } @@ -500,7 +499,10 @@ impl<'a, 'c> BlockSchema<'a, 'c> { let result = OperationsSchema(self.0) .get_last_block_by_action(ActionType::VERIFY, Some(true)) .await; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "get_last_verified_confirmed_block"); + metrics::histogram!( + "sql.chain.block.get_last_verified_confirmed_block", + start.elapsed() + ); result } @@ -514,7 +516,10 @@ impl<'a, 'c> BlockSchema<'a, 'c> { ) .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "load_storage_pending_block"); + metrics::histogram!( + "sql.chain.block.load_storage_pending_block", + start.elapsed() + ); Ok(maybe_block) } @@ -560,7 +565,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "load_pending_block"); + metrics::histogram!("sql.chain.block.load_pending_block", start.elapsed()); Ok(Some(result)) } @@ -613,7 +618,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { .await?; transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "load_pending_block"); + metrics::histogram!("sql.chain.block.load_pending_block", start.elapsed()); Ok(()) } @@ -690,7 +695,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "save_block"); + metrics::histogram!("sql.chain.block.save_block", start.elapsed()); Ok(()) } @@ -718,7 +723,7 @@ impl<'a, 'c> BlockSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "block" => "store_account_tree_cache"); + metrics::histogram!("sql.chain.block.store_account_tree_cache", start.elapsed()); Ok(()) } diff --git a/core/lib/storage/src/chain/mempool/mod.rs b/core/lib/storage/src/chain/mempool/mod.rs index 9509ca4c0a..a570203146 100644 --- a/core/lib/storage/src/chain/mempool/mod.rs +++ b/core/lib/storage/src/chain/mempool/mod.rs @@ -1,5 +1,5 @@ // Built-in deps -use std::{collections::VecDeque, time::Instant}; +use std::{collections::VecDeque, convert::TryFrom, time::Instant}; // External imports use itertools::Itertools; // Workspace imports @@ -122,7 +122,7 @@ impl<'a, 'c> MempoolSchema<'a, 'c> { .nonce(), }); - metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "load_txs"); + metrics::histogram!("sql.chain.mempool.load_txs", start.elapsed()); Ok(txs.into()) } @@ -210,7 +210,7 @@ impl<'a, 'c> MempoolSchema<'a, 'c> { .await?; } - metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "insert_batch"); + metrics::histogram!("sql.chain.mempool.insert_batch", start.elapsed()); Ok(batch_id) } @@ -238,7 +238,7 @@ impl<'a, 'c> MempoolSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "insert_tx"); + metrics::histogram!("sql.chain.mempool.insert_tx", start.elapsed()); Ok(()) } @@ -254,7 +254,7 @@ impl<'a, 'c> MempoolSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "remove_tx"); + metrics::histogram!("sql.chain.mempool.remove_tx", start.elapsed()); Ok(()) } @@ -270,10 +270,53 @@ impl<'a, 'c> MempoolSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "remove_txs"); + metrics::histogram!("sql.chain.mempool.remove_txs", start.elapsed()); Ok(()) } + /// Checks if the memory pool contains transaction with the given hash. + pub async fn contains_tx(&mut self, tx_hash: TxHash) -> QueryResult { + let start = Instant::now(); + + let tx_hash = hex::encode(tx_hash.as_ref()); + + let row = sqlx::query!( + "SELECT count(*) from mempool_txs + WHERE tx_hash = $1", + &tx_hash + ) + .fetch_one(self.0.conn()) + .await? + .count; + + let contains = row.filter(|&counter| counter > 0).is_some(); + + metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "contains_tx"); + Ok(contains) + } + + /// Returns zkSync transaction with thr given hash. + pub async fn get_tx(&mut self, tx_hash: TxHash) -> QueryResult> { + let start = Instant::now(); + + let tx_hash = hex::encode(tx_hash.as_ref()); + + let mempool_tx = sqlx::query_as!( + MempoolTx, + "SELECT * from mempool_txs + WHERE tx_hash = $1", + &tx_hash + ) + .fetch_optional(self.0.conn()) + .await?; + + metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "get_tx"); + mempool_tx + .map(SignedZkSyncTx::try_from) + .transpose() + .map_err(anyhow::Error::from) + } + /// Removes transactions that are already committed. /// Though it's unlikely that mempool schema will ever contain a committed /// transaction, it's better to ensure that we won't process the same transaction @@ -320,7 +363,7 @@ impl<'a, 'c> MempoolSchema<'a, 'c> { self.remove_txs(&tx_hashes_to_remove).await?; - metrics::histogram!("sql.chain", start.elapsed(), "mempool" => "collect_garbage"); + metrics::histogram!("sql.chain.mempool.collect_garbage", start.elapsed()); Ok(()) } } diff --git a/core/lib/storage/src/chain/mempool/records.rs b/core/lib/storage/src/chain/mempool/records.rs index 99a3fd109e..29106dcc77 100644 --- a/core/lib/storage/src/chain/mempool/records.rs +++ b/core/lib/storage/src/chain/mempool/records.rs @@ -1,7 +1,13 @@ +// Built-in deps +use std::convert::TryFrom; + // External imports use chrono::{DateTime, Utc}; use sqlx::FromRow; + // Workspace imports +use zksync_types::SignedZkSyncTx; + // Local imports #[derive(Debug, FromRow)] @@ -13,3 +19,17 @@ pub struct MempoolTx { pub eth_sign_data: Option, pub batch_id: i64, } + +impl TryFrom for SignedZkSyncTx { + type Error = serde_json::Error; + + fn try_from(value: MempoolTx) -> Result { + Ok(Self { + tx: serde_json::from_value(value.tx)?, + eth_sign_data: value + .eth_sign_data + .map(serde_json::from_value) + .transpose()?, + }) + } +} diff --git a/core/lib/storage/src/chain/operations/mod.rs b/core/lib/storage/src/chain/operations/mod.rs index 992d10a900..9048abff23 100644 --- a/core/lib/storage/src/chain/operations/mod.rs +++ b/core/lib/storage/src/chain/operations/mod.rs @@ -39,7 +39,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { .max .unwrap_or(0); - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "get_last_block_by_action"); + metrics::histogram!( + "sql.chain.operations.get_last_block_by_action", + start.elapsed() + ); Ok(max_block as BlockNumber) } @@ -60,7 +63,7 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { .ok() .flatten(); - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "get_operation"); + metrics::histogram!("sql.chain.operations.get_operation", start.elapsed()); result } @@ -77,7 +80,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "get_executed_operation"); + metrics::histogram!( + "sql.chain.operations.get_executed_operation", + start.elapsed() + ); Ok(op) } @@ -94,7 +100,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "get_executed_priority_operation"); + metrics::histogram!( + "sql.chain.operations.get_executed_priority_operation", + start.elapsed() + ); Ok(op) } @@ -111,7 +120,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "get_executed_priority_operation_by_hash"); + metrics::histogram!( + "sql.chain.operations.get_executed_priority_operation_by_hash", + start.elapsed() + ); Ok(op) } @@ -129,7 +141,7 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { ) .fetch_one(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "store_operation"); + metrics::histogram!("sql.chain.operations.store_operation", start.elapsed()); Ok(op) } @@ -149,7 +161,7 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { ) .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "confirm_operation"); + metrics::histogram!("sql.chain.operations.confirm_operation", start.elapsed()); Ok(()) } @@ -222,7 +234,7 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { }; transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "store_executed_tx"); + metrics::histogram!("sql.chain.operations.store_executed_tx", start.elapsed()); Ok(()) } @@ -249,7 +261,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { ) .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "store_executed_priority_op"); + metrics::histogram!( + "sql.chain.operations.store_executed_priority_op", + start.elapsed() + ); Ok(()) } @@ -284,7 +299,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { ) .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "add_pending_withdrawal"); + metrics::histogram!( + "sql.chain.operations.add_pending_withdrawal", + start.elapsed() + ); Ok(()) } @@ -305,7 +323,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { ) .execute(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "add_complete_withdrawals_transaction"); + metrics::histogram!( + "sql.chain.operations.add_complete_withdrawals_transaction", + start.elapsed() + ); Ok(()) } @@ -355,7 +376,10 @@ impl<'a, 'c> OperationsSchema<'a, 'c> { None => None, }; - metrics::histogram!("sql.chain", start.elapsed(), "operations" => "eth_tx_for_withdrawal"); + metrics::histogram!( + "sql.chain.operations.eth_tx_for_withdrawal", + start.elapsed() + ); Ok(res) } diff --git a/core/lib/storage/src/chain/operations_ext/mod.rs b/core/lib/storage/src/chain/operations_ext/mod.rs index ae2282c335..278916c0f3 100644 --- a/core/lib/storage/src/chain/operations_ext/mod.rs +++ b/core/lib/storage/src/chain/operations_ext/mod.rs @@ -63,7 +63,7 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { Ok(None) }; - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "tx_receipt"); + metrics::histogram!("sql.chain.operations_ext.tx_receipt", start.elapsed()); result } @@ -98,7 +98,10 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { }), }; - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "get_priority_op_receipt"); + metrics::histogram!( + "sql.chain.operations_ext.get_priority_op_receipt", + start.elapsed() + ); result } @@ -210,7 +213,7 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { None }; - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "find_tx_by_hash"); + metrics::histogram!("sql.chain.operations_ext.find_tx_by_hash", start.elapsed()); Ok(result) } @@ -290,7 +293,10 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { None }; - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "find_priority_op_by_hash"); + metrics::histogram!( + "sql.chain.operations_ext.find_priority_op_by_hash", + start.elapsed() + ); Ok(result) } @@ -339,7 +345,10 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "account_created_on"); + metrics::histogram!( + "sql.chain.operations_ext.account_created_on", + start.elapsed() + ); Ok(first_history_entry.map(|entry| entry.created_at)) } @@ -479,7 +488,10 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { } } - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "get_account_transactions_history"); + metrics::histogram!( + "sql.chain.operations_ext.get_account_transactions_history", + start.elapsed() + ); Ok(tx_history) } @@ -648,7 +660,10 @@ impl<'a, 'c> OperationsExtSchema<'a, 'c> { } } - metrics::histogram!("sql.chain", start.elapsed(), "operations_ext" => "get_account_transactions_history_from"); + metrics::histogram!( + "sql.chain.operations_ext.get_account_transactions_history_from", + start.elapsed() + ); Ok(tx_history) } } diff --git a/core/lib/storage/src/chain/state/mod.rs b/core/lib/storage/src/chain/state/mod.rs index aec04b2e58..adabf65fa1 100644 --- a/core/lib/storage/src/chain/state/mod.rs +++ b/core/lib/storage/src/chain/state/mod.rs @@ -161,7 +161,7 @@ impl<'a, 'c> StateSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "state" => "commit_state_update"); + metrics::histogram!("sql.chain.state.commit_state_update", start.elapsed()); Ok(()) } @@ -312,7 +312,7 @@ impl<'a, 'c> StateSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "state" => "apply_state_update"); + metrics::histogram!("sql.chain.state.apply_state_update", start.elapsed()); Ok(()) } @@ -350,7 +350,7 @@ impl<'a, 'c> StateSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "state" => "load_committed_state"); + metrics::histogram!("sql.chain.state.load_committed_state", start.elapsed()); result } @@ -400,7 +400,7 @@ impl<'a, 'c> StateSchema<'a, 'c> { } transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "state" => "load_verified_state"); + metrics::histogram!("sql.chain.state.load_verified_state", start.elapsed()); Ok((last_block, account_map)) } @@ -531,7 +531,7 @@ impl<'a, 'c> StateSchema<'a, 'c> { }; transaction.commit().await?; - metrics::histogram!("sql.chain", start.elapsed(), "state" => "load_state_diff"); + metrics::histogram!("sql.chain.state.load_state_diff", start.elapsed()); // We don't want to return an empty list to avoid the confusion, so return // `None` if there are no changes. @@ -553,7 +553,7 @@ impl<'a, 'c> StateSchema<'a, 'c> { .await .map(|diff| diff.unwrap_or_default().1); - metrics::histogram!("sql.chain", start.elapsed(), "state" => "load_state_diff"); + metrics::histogram!("sql.chain.state.load_state_diff", start.elapsed()); result } } diff --git a/core/lib/storage/src/chain/stats.rs b/core/lib/storage/src/chain/stats.rs index 3defc4f92a..652cc99ae8 100644 --- a/core/lib/storage/src/chain/stats.rs +++ b/core/lib/storage/src/chain/stats.rs @@ -24,7 +24,7 @@ impl<'a, 'c> StatsSchema<'a, 'c> { .count .unwrap_or(0); - metrics::histogram!("sql.chain", start.elapsed(), "stats" => "count_outstanding_proofs"); + metrics::histogram!("sql.chain.stats.count_outstanding_proofs", start.elapsed()); Ok(count as u32) } @@ -44,7 +44,7 @@ impl<'a, 'c> StatsSchema<'a, 'c> { .count .unwrap_or(0); - metrics::histogram!("sql.chain", start.elapsed(), "stats" => "count_total_transactions"); + metrics::histogram!("sql.chain.stats.count_total_transactions", start.elapsed()); Ok((count_tx + prior_ops) as u32) } } diff --git a/core/lib/storage/src/data_restore/mod.rs b/core/lib/storage/src/data_restore/mod.rs index 71a22b5259..18564199af 100644 --- a/core/lib/storage/src/data_restore/mod.rs +++ b/core/lib/storage/src/data_restore/mod.rs @@ -122,7 +122,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { } }) .collect(); - metrics::histogram!("sql", start.elapsed(), "data_restore" => "load_rollup_ops_blocks"); + metrics::histogram!("sql.data_restore.load_rollup_ops_blocks", start.elapsed()); Ok(ops_blocks) } @@ -145,7 +145,10 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .await?; transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "update_last_watched_block_number"); + metrics::histogram!( + "sql.data_restore.update_last_watched_block_number", + start.elapsed() + ); Ok(()) } @@ -161,7 +164,10 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .fetch_one(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "load_last_watched_block_number"); + metrics::histogram!( + "sql.data_restore.load_last_watched_block_number", + start.elapsed() + ); Ok(stored) } @@ -227,7 +233,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .update_storage_state(new_state) .await?; transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "save_rollup_ops"); + metrics::histogram!("sql.data_restore.save_rollup_ops", start.elapsed()); Ok(()) } @@ -253,7 +259,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "initialize_eth_stats"); + metrics::histogram!("sql.data_restore.initialize_eth_stats", start.elapsed()); Ok(()) } @@ -269,7 +275,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .fetch_all(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "load_events_state"); + metrics::histogram!("sql.data_restore.load_events_state", start.elapsed()); Ok(events) } @@ -291,7 +297,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .fetch_one(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "load_storage_state"); + metrics::histogram!("sql.data_restore.load_storage_state", start.elapsed()); Ok(state) } @@ -310,7 +316,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .await?; transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "update_storage_state"); + metrics::histogram!("sql.data_restore.update_storage_state", start.elapsed()); Ok(()) } @@ -333,7 +339,7 @@ impl<'a, 'c> DataRestoreSchema<'a, 'c> { .await?; } transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "data_restore" => "update_block_events"); + metrics::histogram!("sql.data_restore.update_block_events", start.elapsed()); Ok(()) } } diff --git a/core/lib/storage/src/ethereum/mod.rs b/core/lib/storage/src/ethereum/mod.rs index 9b7476100e..8be8bca81a 100644 --- a/core/lib/storage/src/ethereum/mod.rs +++ b/core/lib/storage/src/ethereum/mod.rs @@ -117,7 +117,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "load_unconfirmed_operations"); + metrics::histogram!("sql.ethereum.load_unconfirmed_operations", start.elapsed()); Ok(ops) } @@ -156,7 +156,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "load_unprocessed_operations"); + metrics::histogram!("sql.ethereum.load_unprocessed_operations", start.elapsed()); Ok(operations) } @@ -230,7 +230,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "save_new_eth_tx"); + metrics::histogram!("sql.ethereum.save_new_eth_tx", start.elapsed()); Ok(response) } @@ -245,7 +245,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { .fetch_one(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "get_eth_op_id"); + metrics::histogram!("sql.ethereum.get_eth_op_id", start.elapsed()); Ok(hash_entry.eth_op_id) } @@ -260,7 +260,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { ) .execute(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "add_hash_entry"); + metrics::histogram!("sql.ethereum.add_hash_entry", start.elapsed()); Ok(()) } @@ -286,7 +286,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "update_eth_tx"); + metrics::histogram!("sql.ethereum.update_eth_tx", start.elapsed()); Ok(()) } @@ -322,7 +322,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "report_created_operation"); + metrics::histogram!("sql.ethereum.report_created_operation", start.elapsed()); Ok(()) } @@ -353,7 +353,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "update_gas_price"); + metrics::histogram!("sql.ethereum.update_gas_price", start.elapsed()); Ok(()) } @@ -426,7 +426,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "confirm_eth_tx"); + metrics::histogram!("sql.ethereum.confirm_eth_tx", start.elapsed()); Ok(()) } @@ -457,7 +457,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { transaction.commit().await?; - metrics::histogram!("sql", start.elapsed(), "ethereum" => "get_next_nonce"); + metrics::histogram!("sql.ethereum.get_next_nonce", start.elapsed()); Ok(old_nonce_value) } @@ -499,7 +499,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { .await?; } - metrics::histogram!("sql", start.elapsed(), "ethereum" => "initialize_eth_data"); + metrics::histogram!("sql.ethereum.initialize_eth_data", start.elapsed()); Ok(()) } diff --git a/core/lib/storage/src/test_data.rs b/core/lib/storage/src/test_data.rs index 40f2e4b887..215db8f22a 100644 --- a/core/lib/storage/src/test_data.rs +++ b/core/lib/storage/src/test_data.rs @@ -103,7 +103,7 @@ pub fn gen_operation_with_txs( } /// Generates EthSignData for testing (not a valid signature) -pub fn gen_eth_sing_data(message: String) -> EthSignData { +pub fn gen_eth_sign_data(message: String) -> EthSignData { let keypair = Random.generate(); let private_key = keypair.secret(); diff --git a/core/lib/storage/src/tests/chain/block.rs b/core/lib/storage/src/tests/chain/block.rs index 582880e4a9..7a80fbf5b7 100644 --- a/core/lib/storage/src/tests/chain/block.rs +++ b/core/lib/storage/src/tests/chain/block.rs @@ -4,13 +4,14 @@ use zksync_crypto::{convert::FeConvert, rand::XorShiftRng}; use zksync_types::{ ethereum::OperationType, helpers::apply_updates, AccountMap, AccountUpdate, AccountUpdates, - Action, BlockNumber, + Action, ActionType, BlockNumber, }; // Local imports use super::utils::{get_operation, get_operation_with_txs}; use crate::{ chain::{ block::{records::BlockDetails, BlockSchema}, + operations::records::NewOperation, state::StateSchema, }, ethereum::EthereumSchema, @@ -847,79 +848,110 @@ async fn test_unproven_block_query(mut storage: StorageProcessor<'_>) -> QueryRe todo!() } -// TODO: Restore this test (#1125). -// /// Here we create blocks and publish proofs for them in different order -// #[db_test] -// async fn test_operations_counter(mut storage: StorageProcessor<'_>) -> QueryResult<()> { -// let _ = env_logger::try_init(); - -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::COMMIT, false).await?, -// 0 -// ); -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::VERIFY, false).await?, -// 0 -// ); -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::COMMIT, true).await?, -// 0 -// ); -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::VERIFY, true).await?, -// 0 -// ); - -// for (block_number, action) in &[ -// (1, ActionType::COMMIT), -// (2, ActionType::COMMIT), -// (3, ActionType::COMMIT), -// (4, ActionType::COMMIT), -// (1, ActionType::VERIFY), -// (2, ActionType::VERIFY), -// ] { -// diesel::insert_into(operations::table) -// .values(NewOperation { -// block_number: *block_number, -// action_type: action.to_string(), -// }) -// .execute(conn.conn()) -// .expect("operation creation failed"); -// } - -// for (block, action) in &[ -// (1, ActionType::COMMIT), -// (2, ActionType::COMMIT), -// (3, ActionType::COMMIT), -// (1, ActionType::VERIFY), -// (2, ActionType::VERIFY), -// ] { -// diesel::update( -// operations::table -// .filter(operations::block_number.eq(block)) -// .filter(operations::action_type.eq(action.to_string())), -// ) -// .set(operations::confirmed.eq(true)) -// .execute(conn.conn()) -// .expect("operation update failed"); -// } - -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::COMMIT, false).await?, -// 1 -// ); -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::VERIFY, false).await?, -// 0 -// ); -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::COMMIT, true).await?, -// 3 -// ); -// assert_eq!( -// BlockSchema(&mut storage).count_operations(ActionType::VERIFY, true).await?, -// 2 -// ); - -// Ok(()) -// } +/// Check that operations are counted correctly. +#[db_test] +async fn test_operations_counter(mut storage: StorageProcessor<'_>) -> QueryResult<()> { + // Expect no operations stored. + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::COMMIT, false) + .await?, + 0 + ); + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::VERIFY, false) + .await?, + 0 + ); + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::COMMIT, true) + .await?, + 0 + ); + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::VERIFY, true) + .await?, + 0 + ); + + // Store new operations. + for (block_number, action) in &[ + (1, ActionType::COMMIT), + (2, ActionType::COMMIT), + (3, ActionType::COMMIT), + (4, ActionType::COMMIT), + (1, ActionType::VERIFY), + (2, ActionType::VERIFY), + ] { + storage + .chain() + .operations_schema() + .store_operation(NewOperation { + block_number: *block_number, + action_type: action.to_string(), + }) + .await?; + } + + // Set all of them confirmed except one. + for (block_number, action) in &[ + (1, ActionType::COMMIT), + (2, ActionType::COMMIT), + (3, ActionType::COMMIT), + (1, ActionType::VERIFY), + (2, ActionType::VERIFY), + ] { + storage + .chain() + .operations_schema() + .confirm_operation(*block_number, *action) + .await?; + } + + // We have one unconfirmed COMMIT operation, the rest is confirmed. + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::COMMIT, false) + .await?, + 1 + ); + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::VERIFY, false) + .await?, + 0 + ); + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::COMMIT, true) + .await?, + 3 + ); + assert_eq!( + storage + .chain() + .block_schema() + .count_operations(ActionType::VERIFY, true) + .await?, + 2 + ); + + Ok(()) +} diff --git a/core/lib/storage/src/tests/chain/mempool.rs b/core/lib/storage/src/tests/chain/mempool.rs index 5767588f16..76475169f8 100644 --- a/core/lib/storage/src/tests/chain/mempool.rs +++ b/core/lib/storage/src/tests/chain/mempool.rs @@ -287,3 +287,53 @@ async fn collect_garbage(mut storage: StorageProcessor<'_>) -> QueryResult<()> { Ok(()) } + +/// Checks that memory pool contains previously inserted transaction. +#[db_test] +async fn contains_and_get_tx(mut storage: StorageProcessor<'_>) -> QueryResult<()> { + let txs = gen_transfers(5); + + // Make sure that the mempool responds that these transactions are missing. + for tx in &txs { + let tx_hash = tx.hash(); + + assert_eq!( + MempoolSchema(&mut storage).contains_tx(tx_hash).await?, + false + ); + assert!(MempoolSchema(&mut storage).get_tx(tx_hash).await?.is_none()); + } + + // Submit transactions. + { + let single_tx = &txs[0]; + + let batch = &txs[1..]; + let batch_signature = Some(get_eth_sign_data("test message".to_owned()).signature); + + let mut mempool = MempoolSchema(&mut storage); + mempool.insert_tx(single_tx).await?; + mempool.insert_batch(batch, batch_signature).await?; + } + + // Make sure that the memory pool now responds that these transactions exist. + for tx in &txs { + let tx_hash = tx.hash(); + + assert_eq!( + MempoolSchema(&mut storage).contains_tx(tx_hash).await?, + true + ); + assert_eq!( + MempoolSchema(&mut storage) + .get_tx(tx_hash) + .await? + .as_ref() + .unwrap() + .hash(), + tx_hash, + ); + } + + Ok(()) +} diff --git a/core/lib/storage/src/tests/chain/utils.rs b/core/lib/storage/src/tests/chain/utils.rs index 4295b30a54..940c629a31 100644 --- a/core/lib/storage/src/tests/chain/utils.rs +++ b/core/lib/storage/src/tests/chain/utils.rs @@ -1,5 +1,5 @@ // Reexports for compatibility with the existing code. pub use crate::test_data::{ - gen_acc_random_updates as acc_create_random_updates, gen_eth_sing_data as get_eth_sign_data, + gen_acc_random_updates as acc_create_random_updates, gen_eth_sign_data as get_eth_sign_data, gen_operation as get_operation, gen_operation_with_txs as get_operation_with_txs, }; diff --git a/core/lib/storage/src/tokens/mod.rs b/core/lib/storage/src/tokens/mod.rs index a2272289ab..6ea0a6fb38 100644 --- a/core/lib/storage/src/tokens/mod.rs +++ b/core/lib/storage/src/tokens/mod.rs @@ -41,7 +41,7 @@ impl<'a, 'c> TokensSchema<'a, 'c> { .execute(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "token" => "store_token"); + metrics::histogram!("sql.token.store_token", start.elapsed()); Ok(()) } @@ -68,7 +68,7 @@ impl<'a, 'c> TokensSchema<'a, 'c> { }) .collect()); - metrics::histogram!("sql", start.elapsed(), "token" => "load_tokens"); + metrics::histogram!("sql.token.load_tokens", start.elapsed()); result } @@ -84,7 +84,7 @@ impl<'a, 'c> TokensSchema<'a, 'c> { .await? .count; - metrics::histogram!("sql", start.elapsed(), "token" => "get_count"); + metrics::histogram!("sql.token.get_count", start.elapsed()); Ok(tokens_count) } @@ -133,7 +133,7 @@ impl<'a, 'c> TokensSchema<'a, 'c> { } }; - metrics::histogram!("sql", start.elapsed(), "token" => "get_token"); + metrics::histogram!("sql.token.get_token", start.elapsed()); Ok(db_token.map(|t| t.into())) } @@ -154,7 +154,7 @@ impl<'a, 'c> TokensSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "token" => "get_historical_ticker_price"); + metrics::histogram!("sql.token.get_historical_ticker_price", start.elapsed()); Ok(db_price.map(|p| p.into())) } @@ -180,7 +180,7 @@ impl<'a, 'c> TokensSchema<'a, 'c> { .fetch_optional(self.0.conn()) .await?; - metrics::histogram!("sql", start.elapsed(), "token" => "update_historical_ticker_price"); + metrics::histogram!("sql.token.update_historical_ticker_price", start.elapsed()); Ok(()) } } diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index f64a2ad241..1484fda98f 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -30,7 +30,9 @@ parity-crypto = {version = "0.6.2", features = ["publickey"] } ethabi = "12.0.0" [dev-dependencies] +lazy_static = "1.4.0" criterion = "0.3.0" +web3 = "0.13.0" secp256k1 = {version = "0.17.2", features = ["std", "recovery"] } [[bench]] diff --git a/core/lib/types/README.md b/core/lib/types/README.md index 4eb262cde6..6e1ab80d25 100644 --- a/core/lib/types/README.md +++ b/core/lib/types/README.md @@ -17,7 +17,7 @@ zkSync operations are split into the following categories: ## License -`zksync_models` is a part of zkSync stack, which is distributed under the terms of both the MIT license and the Apache +`zksync_types` is a part of zkSync stack, which is distributed under the terms of both the MIT license and the Apache License (Version 2.0). See [LICENSE-APACHE](../../LICENSE-APACHE), [LICENSE-MIT](../../LICENSE-MIT) for details. diff --git a/core/lib/types/src/account/account_update.rs b/core/lib/types/src/account/account_update.rs index 99d3badd08..b888c8f15c 100644 --- a/core/lib/types/src/account/account_update.rs +++ b/core/lib/types/src/account/account_update.rs @@ -13,6 +13,7 @@ pub enum AccountUpdate { Create { address: Address, nonce: Nonce }, /// Delete an existing account. /// Note: Currently this kind of update is not used directly in the network. + /// However, it's used to revert made operation (e.g. to restore state back in time from the last verified block). Delete { address: Address, nonce: Nonce }, /// Change the account balance. UpdateBalance { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 6afe1d518e..6c4da7fe53 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -72,6 +72,7 @@ pub use zksync_basic_types::*; pub type AccountMap = zksync_crypto::fnv::FnvHashMap; pub type AccountUpdates = Vec<(u32, AccountUpdate)>; pub type AccountTree = SparseMerkleTree>; +pub type SerialId = u64; use crate::block::Block; pub use zksync_crypto::{ diff --git a/core/lib/types/src/priority_ops/mod.rs b/core/lib/types/src/priority_ops/mod.rs index b5d0812909..68226da56d 100644 --- a/core/lib/types/src/priority_ops/mod.rs +++ b/core/lib/types/src/priority_ops/mod.rs @@ -15,7 +15,10 @@ use zksync_crypto::params::{ use zksync_crypto::primitives::FromBytes; use zksync_utils::BigUintSerdeAsRadix10Str; -use super::operations::{DepositOp, FullExitOp}; +use super::{ + operations::{DepositOp, FullExitOp}, + SerialId, +}; /// Deposit priority operation transfers funds from the L1 account to the desired L2 account. /// If the target L2 account didn't exist at the moment of the operation execution, a new @@ -160,12 +163,12 @@ impl ZkSyncPriorityOp { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PriorityOp { /// Unique ID of the priority operation. - pub serial_id: u64, + pub serial_id: SerialId, /// Priority operation. pub data: ZkSyncPriorityOp, /// Ethereum deadline block until which operation must be processed. pub deadline_block: u64, - /// Hash of the corresponding Ethereum transaction. + /// Hash of the corresponding Ethereum transaction. Size should be 32 bytes pub eth_hash: Vec, /// Block in which Ethereum transaction was included. pub eth_block: u64, diff --git a/core/lib/types/src/tests/hardcoded.rs b/core/lib/types/src/tests/hardcoded.rs new file mode 100644 index 0000000000..b2a18e9354 --- /dev/null +++ b/core/lib/types/src/tests/hardcoded.rs @@ -0,0 +1,403 @@ +//! Tests strictly validating the output for hardcoded input data +//! +//! These tests only check the code that should not change, and if it does change, then this may be a critical change. +//! +//! If after changing some code, the tests stopped passing, then not only change the expected +//! answer for the tests but also be sure to notify the command about the changes introduced! + +use num::BigUint; +use std::convert::TryFrom; +use std::str::FromStr; +use web3::types::Bytes; + +use crate::{ + account::PubKeyHash, + operations::{ + ChangePubKeyOp, DepositOp, ForcedExitOp, FullExitOp, NoopOp, TransferOp, TransferToNewOp, + WithdrawOp, + }, + priority_ops::{Deposit, FullExit}, + tx::{ChangePubKey, ForcedExit, PackedEthSignature, Transfer, Withdraw}, + Log, PriorityOp, +}; +use lazy_static::lazy_static; +use zksync_basic_types::{Address, H256}; + +#[cfg(test)] +pub mod operations_test { + use super::*; + // Public data parameters, using them we can restore `ZkSyncOp`. + const NOOP_PUBLIC_DATA: &str = "000000000000000000"; + const DEPOSIT_PUBLIC_DATA: &str = "010000002a002a0000000000000000000000000000002a21abaed8712072e918632259780e587698ef58da0000000000000000000000"; + const TRANSFER_TO_NEW_PUBLIC_DATA: &str = "0200000001002a000000054021abaed8712072e918632259780e587698ef58da00000002054000000000000000000000000000000000"; + const WITHDRAW_PUBLIC_DATA: &str = "030000002a002a0000000000000000000000000000002a054021abaed8712072e918632259780e587698ef58da000000000000000000"; + const TRANSFER_PUBLIC_DATA: &str = "0500000001002a0000000200000005400540"; + const FULL_EXIT_PUBLIC_DATA: &str = "060000002a2a0a81e257a2f5d6ed4f07b81dbda09f107bd026002a000000000000000000000000000000000000000000000000000000"; + const CHANGE_PUBKEY_PUBLIC_DATA: &str = "070000002a3cfb9a39096d9e02b24187355f628f9a6331511b2a0a81e257a2f5d6ed4f07b81dbda09f107bd0260000002a002a054000"; + const FORCED_EXIT_PUBLIC_DATA: &str = "080000002a0000002a002a0000000000000000000000000000000005402a0a81e257a2f5d6ed4f07b81dbda09f107bd0260000000000"; + + #[test] + fn test_public_data_conversions_noop() { + let expected_op = NoopOp {}; + + assert_eq!(hex::encode(expected_op.get_public_data()), NOOP_PUBLIC_DATA); + } + + #[test] + fn test_public_data_conversions_deposit() { + let expected_op = { + let priority_op = Deposit { + from: Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(), + to: Address::from_str("21abaed8712072e918632259780e587698ef58da").unwrap(), + token: 42, + amount: BigUint::from(42u32), + }; + let account_id = 42u32; + + DepositOp { + priority_op, + account_id, + } + }; + + assert_eq!( + hex::encode(expected_op.get_public_data()), + DEPOSIT_PUBLIC_DATA + ); + } + + #[test] + fn test_public_data_conversions_transfer() { + let (expected_transfer, expected_transfer_to_new) = { + let tx = Transfer::new( + 42, + Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(), + Address::from_str("21abaed8712072e918632259780e587698ef58da").unwrap(), + 42, + BigUint::from(42u32), + BigUint::from(42u32), + 42, + None, + ); + let (from, to) = (1u32, 2u32); + + ( + TransferOp { + tx: tx.clone(), + from, + to, + }, + TransferToNewOp { tx, from, to }, + ) + }; + + assert_eq!( + hex::encode(expected_transfer.get_public_data()), + TRANSFER_PUBLIC_DATA + ); + assert_eq!( + hex::encode(expected_transfer_to_new.get_public_data()), + TRANSFER_TO_NEW_PUBLIC_DATA + ); + } + + #[test] + fn test_public_data_conversions_withdraw() { + let expected_op = { + let tx = Withdraw::new( + 42, + Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(), + Address::from_str("21abaed8712072e918632259780e587698ef58da").unwrap(), + 42, + BigUint::from(42u32), + BigUint::from(42u32), + 42, + None, + ); + let account_id = 42u32; + + WithdrawOp { tx, account_id } + }; + + assert_eq!( + hex::encode(expected_op.get_public_data()), + WITHDRAW_PUBLIC_DATA + ); + } + + #[test] + fn test_public_data_conversions_full_exit() { + let expected_op = { + let priority_op = FullExit { + eth_address: Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(), + account_id: 42, + token: 42, + }; + + FullExitOp { + priority_op, + withdraw_amount: None, + } + }; + + assert_eq!( + hex::encode(expected_op.get_public_data()), + FULL_EXIT_PUBLIC_DATA + ); + } + + #[test] + fn test_public_data_conversions_change_pubkey() { + let expected_op = { + let tx = ChangePubKey::new( + 42, + Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(), + PubKeyHash::from_hex("sync:3cfb9a39096d9e02b24187355f628f9a6331511b").unwrap(), + 42, + BigUint::from(42u32), + 42, + None, + Some(PackedEthSignature::deserialize_packed( + &hex::decode("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026dbda09f107bd026f5d6ed4f02a0a81e257a2f5d6ed4f07b81dbda09f107bd026dbda09f107bd026f5d6ed4f0d4").unwrap(), + ).unwrap()), + ); + let account_id = 42u32; + + ChangePubKeyOp { tx, account_id } + }; + + assert_eq!( + hex::encode(expected_op.get_public_data()), + CHANGE_PUBKEY_PUBLIC_DATA + ); + } + + #[test] + fn test_public_data_conversions_forced_exit() { + let expected_op = { + let tx = ForcedExit::new( + 42, + Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(), + 42, + BigUint::from(42u32), + 42, + None, + ); + let target_account_id = 42u32; + + ForcedExitOp { + tx, + target_account_id, + withdraw_amount: None, + } + }; + + assert_eq!( + hex::encode(expected_op.get_public_data()), + FORCED_EXIT_PUBLIC_DATA + ); + } + + #[test] + fn test_withdrawal_data() { + let (withdraw, forced_exit, full_exit) = ( + WithdrawOp::from_public_data(&hex::decode(WITHDRAW_PUBLIC_DATA).unwrap()).unwrap(), + ForcedExitOp::from_public_data(&hex::decode(FORCED_EXIT_PUBLIC_DATA).unwrap()).unwrap(), + FullExitOp::from_public_data(&hex::decode(FULL_EXIT_PUBLIC_DATA).unwrap()).unwrap(), + ); + + assert_eq!( + hex::encode(withdraw.get_withdrawal_data()), + "0121abaed8712072e918632259780e587698ef58da002a0000000000000000000000000000002a" + ); + assert_eq!( + hex::encode(forced_exit.get_withdrawal_data()), + "012a0a81e257a2f5d6ed4f07b81dbda09f107bd026002a00000000000000000000000000000000" + ); + assert_eq!( + hex::encode(full_exit.get_withdrawal_data()), + "002a0a81e257a2f5d6ed4f07b81dbda09f107bd026002a00000000000000000000000000000000" + ); + } + + #[test] + fn test_eth_witness() { + // TODO: Change pre-defined input / output after merging breaking to dev (#1188). + + let mut change_pubkey = + ChangePubKeyOp::from_public_data(&hex::decode(CHANGE_PUBKEY_PUBLIC_DATA).unwrap()) + .unwrap(); + + change_pubkey.tx.eth_signature = PackedEthSignature::deserialize_packed( + &hex::decode("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026dbda09f107bd026f5d6ed4f02a0a81e257a2f5d6ed4f07b81dbda09f107bd026dbda09f107bd026f5d6ed4f0d4").unwrap(), + ).ok(); + + assert_eq!( + hex::encode(change_pubkey.get_eth_witness()), + "2a0a81e257a2f5d6ed4f07b81dbda09f107bd026dbda09f107bd026f5d6ed4f02a0a81e257a2f5d6ed4f07b81dbda09f107bd026dbda09f107bd026f5d6ed4f0d4" + ); + } +} + +#[cfg(test)] +pub mod tx_conversion_test { + use super::*; + + // General configuration parameters for all types of operations + const ACCOUNT_ID: u32 = 100; + const TOKEN_ID: u16 = 5; + const NONCE: u32 = 20; + lazy_static! { + static ref ALICE: Address = + Address::from_str("2a0a81e257a2f5d6ed4f07b81dbda09f107bd026").unwrap(); + static ref BOB: Address = + Address::from_str("21abaed8712072e918632259780e587698ef58da").unwrap(); + static ref PK_HASH: PubKeyHash = + PubKeyHash::from_hex("sync:3cfb9a39096d9e02b24187355f628f9a6331511b").unwrap(); + static ref AMOUNT: BigUint = BigUint::from(12345678u64); + static ref FEE: BigUint = BigUint::from(1000000u32); + } + + #[test] + fn test_convert_to_bytes_change_pubkey() { + let change_pubkey = ChangePubKey::new( + ACCOUNT_ID, + *ALICE, + (*PK_HASH).clone(), + TOKEN_ID, + (*FEE).clone(), + NONCE, + None, + None, + ); + + let bytes = change_pubkey.get_bytes(); + assert_eq!(hex::encode(bytes), "07000000642a0a81e257a2f5d6ed4f07b81dbda09f107bd0263cfb9a39096d9e02b24187355f628f9a6331511b00057d0300000014"); + } + + #[test] + fn test_convert_to_bytes_transfer() { + let transfer = Transfer::new( + ACCOUNT_ID, + *ALICE, + *BOB, + TOKEN_ID, + (*AMOUNT).clone(), + (*FEE).clone(), + NONCE, + None, + ); + + let bytes = transfer.get_bytes(); + assert_eq!(hex::encode(bytes), "05000000642a0a81e257a2f5d6ed4f07b81dbda09f107bd02621abaed8712072e918632259780e587698ef58da000500178c29c07d0300000014"); + } + + #[test] + fn test_convert_to_bytes_forced_exit() { + let forced_exit = + ForcedExit::new(ACCOUNT_ID, *ALICE, TOKEN_ID, (*FEE).clone(), NONCE, None); + + let bytes = forced_exit.get_bytes(); + assert_eq!( + hex::encode(bytes), + "08000000642a0a81e257a2f5d6ed4f07b81dbda09f107bd02600057d0300000014" + ); + } + + #[test] + fn test_convert_to_bytes_withdraw() { + let withdraw = Withdraw::new( + ACCOUNT_ID, + *ALICE, + *BOB, + TOKEN_ID, + (*AMOUNT).clone(), + (*FEE).clone(), + NONCE, + None, + ); + + let bytes = withdraw.get_bytes(); + assert_eq!(hex::encode(bytes), "03000000642a0a81e257a2f5d6ed4f07b81dbda09f107bd02621abaed8712072e918632259780e587698ef58da000500000000000000000000000000bc614e7d0300000014"); + } +} + +#[test] +fn test_priority_op_from_valid_logs() { + let valid_logs = [ + Log { + address: Address::from_str("bd2ea2073d4efa1a82269800a362f889545983c2").unwrap(), + topics: vec![H256::from_str( + "d0943372c08b438a88d4b39d77216901079eda9ca59d45349841c099083b6830", + ) + .unwrap()], + data: Bytes(vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 97, 92, 243, 73, 215, 246, 52, 72, 145, + 177, 231, 202, 124, 114, 136, 63, 93, 192, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 18, 133, 59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108, + 107, 147, 91, 139, 189, 64, 0, 0, 111, 183, 165, 210, 134, 53, 93, 80, 193, 119, + 133, 131, 237, 37, 53, 35, 227, 136, 205, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]), + block_hash: Some( + H256::from_str("1de24c5271c2a3ecdc8b56449b479e388a2390be481faad72d48799c93668c42") + .unwrap(), + ), + block_number: Some(1196475.into()), + transaction_hash: Some( + H256::from_str("5319d65d7a60a1544e4b17d2272f00b5d17a68dea4a0a92e40d046f98a8ed6c5") + .unwrap(), + ), + transaction_index: Some(0.into()), + log_index: Some(0.into()), + transaction_log_index: None, + log_type: None, + removed: Some(false), + }, + Log { + address: Address::from_str("bd2ea2073d4efa1a82269800a362f889545983c2").unwrap(), + topics: vec![H256::from_str( + "d0943372c08b438a88d4b39d77216901079eda9ca59d45349841c099083b6830", + ) + .unwrap()], + data: Bytes(vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 54, 97, 92, 243, 73, 215, 246, 52, 72, 145, + 177, 231, 202, 124, 114, 136, 63, 93, 192, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 18, 133, 223, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 37, + 184, 252, 127, 148, 119, 128, 0, 59, 187, 156, 57, 129, 3, 106, 206, 113, 189, 130, + 135, 229, 227, 157, 236, 165, 121, 1, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]), + block_hash: Some( + H256::from_str("6054a4d29cda776d0493805cb8898e0659711532430b1af9844a48e67f5c794f") + .unwrap(), + ), + block_number: Some(1196639.into()), + transaction_hash: Some( + H256::from_str("4ab4002673c2b28853eebb00de588a2f8507d20078f29caef192d8b815acd379") + .unwrap(), + ), + transaction_index: Some(2.into()), + log_index: Some(6.into()), + transaction_log_index: None, + log_type: None, + removed: Some(false), + }, + ]; + + for event in valid_logs.iter() { + let op = PriorityOp::try_from((*event).clone()); + + assert!(op.is_ok()); + } +} diff --git a/core/lib/types/src/tests/mod.rs b/core/lib/types/src/tests/mod.rs index ce3e9fed0b..fdb6d74fac 100644 --- a/core/lib/types/src/tests/mod.rs +++ b/core/lib/types/src/tests/mod.rs @@ -1,2 +1,3 @@ mod block; +mod hardcoded; pub mod utils; diff --git a/core/lib/types/src/tokens.rs b/core/lib/types/src/tokens.rs index d43b3811f4..1c176e2882 100644 --- a/core/lib/types/src/tokens.rs +++ b/core/lib/types/src/tokens.rs @@ -122,7 +122,7 @@ pub struct TokenPrice { } /// Type of transaction fees that exist in the zkSync network. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Hash, Eq)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Hash, Eq)] pub enum TxFeeTypes { /// Fee for the `Withdraw` or `ForcedExit` transaction. Withdraw, diff --git a/core/lib/types/src/tx/tests.rs b/core/lib/types/src/tx/tests.rs index cddf8b662e..5ff0b2f222 100644 --- a/core/lib/types/src/tx/tests.rs +++ b/core/lib/types/src/tx/tests.rs @@ -1,6 +1,3 @@ -use num::{BigUint, ToPrimitive}; -use serde::{Deserialize, Serialize}; - use zksync_basic_types::Address; use zksync_crypto::franklin_crypto::{ eddsa::{PrivateKey, PublicKey}, @@ -10,6 +7,9 @@ use zksync_crypto::params::{max_account_id, max_token_id, JUBJUB_PARAMS}; use zksync_crypto::public_key_from_private; use zksync_crypto::rand::{Rng, SeedableRng, XorShiftRng}; +use num::{BigUint, ToPrimitive}; +use serde::{Deserialize, Serialize}; + use super::*; use crate::{ helpers::{pack_fee_amount, pack_token_amount}, @@ -283,3 +283,14 @@ fn eth_sign_data_compatibility() { assert_eq!(deserialized.signature, eth_sign_data.signature); assert_eq!(deserialized.message, eth_sign_data.message); } + +#[test] +fn test_check_signature() { + let (pk, msg) = gen_pk_and_msg(); + let signature = TxSignature::sign_musig(&pk, &msg[1]) + .signature + .serialize_packed() + .unwrap(); + + assert_eq!(hex::encode(signature), "4e3298ac8cc13868dbbc94ad6fb41085ffe05b3c2eee22f88b05e69b7a5126aea723d7a3e7282ef5a32d9479c9c8dde52b3e3c462dd445dcd8158ebb6edb6000"); +} diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index a2c104c615..1c67664fb1 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -12,6 +12,9 @@ categories = ["cryptography"] [dependencies] num = { version = "0.2", features = ["serde"] } bigdecimal = { version = "0.1", features = ["serde"]} -serde = "1.0" +serde = { version = "1.0", features = ["derive"] } anyhow = "1.0" futures = "0.3" + +[dev-dependencies] +serde_json = "1.0.0" diff --git a/core/lib/utils/src/convert.rs b/core/lib/utils/src/convert.rs index dc13616bb6..89f2685964 100644 --- a/core/lib/utils/src/convert.rs +++ b/core/lib/utils/src/convert.rs @@ -31,3 +31,57 @@ pub fn round_precision(num: &Ratio, precision: usize) -> Ratio let numerator = (num * &ten_pow).trunc().to_integer(); Ratio::new(numerator, ten_pow) } + +#[cfg(test)] +mod test { + use super::*; + use std::str::FromStr; + + #[test] + fn test_ratio_to_big_decimal() { + let ratio = Ratio::from_integer(BigUint::from(0u32)); + let dec = ratio_to_big_decimal(&ratio, 1); + assert_eq!(dec.to_string(), "0.0"); + let ratio = Ratio::from_integer(BigUint::from(1234u32)); + let dec = ratio_to_big_decimal(&ratio, 7); + assert_eq!(dec.to_string(), "1234.0000000"); + // 4 divided by 9 is 0.(4). + let ratio = Ratio::new(BigUint::from(4u32), BigUint::from(9u32)); + let dec = ratio_to_big_decimal(&ratio, 12); + assert_eq!(dec.to_string(), "0.444444444444"); + // First 7 decimal digits of pi. + let ratio = Ratio::new(BigUint::from(52163u32), BigUint::from(16604u32)); + let dec = ratio_to_big_decimal(&ratio, 6); + assert_eq!(dec.to_string(), "3.141592"); + } + + #[test] + fn test_big_decimal_to_ratio() { + // Expect unsigned number. + let dec = BigDecimal::from(-1); + assert!(big_decimal_to_ratio(&dec).is_err()); + let expected = Ratio::from_integer(BigUint::from(0u32)); + let dec = BigDecimal::from(0); + let ratio = big_decimal_to_ratio(&dec).unwrap(); + assert_eq!(ratio, expected); + let expected = Ratio::new(BigUint::from(1234567u32), BigUint::from(10000u32)); + let dec = BigDecimal::from_str("123.4567").unwrap(); + let ratio = big_decimal_to_ratio(&dec).unwrap(); + assert_eq!(ratio, expected); + } + + #[test] + fn test_round_precision() { + let ratio = Ratio::new(BigUint::from(4u32), BigUint::from(9u32)); + let rounded = round_precision(&ratio, 6); + assert_eq!(ratio_to_big_decimal(&rounded, 6).to_string(), "0.444444"); + let ratio = Ratio::new(BigUint::from(355u32), BigUint::from(113u32)); + let rounded = round_precision(&ratio, 6); + assert_eq!(ratio_to_big_decimal(&rounded, 6).to_string(), "3.141592"); + // 9.87648 with precision of 2 digits is 987 / 100. + let ratio = Ratio::new(BigUint::from(123456u32), BigUint::from(12500u32)); + let rounded = round_precision(&ratio, 2); + let expected = Ratio::new(BigUint::from(987u32), BigUint::from(100u32)); + assert_eq!(rounded, expected); + } +} diff --git a/core/lib/utils/src/env_tools.rs b/core/lib/utils/src/env_tools.rs index 073fca5c47..f2ef5a8116 100644 --- a/core/lib/utils/src/env_tools.rs +++ b/core/lib/utils/src/env_tools.rs @@ -47,3 +47,25 @@ where }) .ok() } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_env_tools() { + const KEY: &str = "KEY"; + // Our test environment variable. + env::set_var(KEY, "123"); + assert_eq!(get_env(KEY), "123"); + assert_eq!(parse_env::(KEY), 123); + assert_eq!(parse_env_if_exists::(KEY), Some(123)); + + env::remove_var(KEY); + assert_eq!(parse_env_if_exists::(KEY), None); + + env::set_var(KEY, "ABC123"); + let parsed: i32 = parse_env_with(KEY, |key| &key[3..]); + assert_eq!(parsed, 123); + } +} diff --git a/core/lib/utils/src/serde_wrappers.rs b/core/lib/utils/src/serde_wrappers.rs index 981b0683ca..851e2ae3f9 100644 --- a/core/lib/utils/src/serde_wrappers.rs +++ b/core/lib/utils/src/serde_wrappers.rs @@ -76,3 +76,39 @@ impl From for BigUintSerdeWrapper { BigUintSerdeWrapper(uint) } } + +#[cfg(test)] +mod test { + use super::*; + + /// Tests that `Ratio` serializer works correctly. + #[test] + fn test_ratio_serialize_as_decimal() { + #[derive(Clone, Serialize, Deserialize)] + struct RatioSerdeWrapper( + #[serde(with = "UnsignedRatioSerializeAsDecimal")] pub Ratio, + ); + // It's essential that this number is a finite decimal, otherwise the precision will be lost + // and the assertion will fail. + let expected = RatioSerdeWrapper(Ratio::new( + BigUint::from(120315391195132u64), + BigUint::from(1250000000u64), + )); + let value = + serde_json::to_value(expected.clone()).expect("cannot serialize Ratio as Decimal"); + let ratio: RatioSerdeWrapper = + serde_json::from_value(value).expect("cannot deserialize Ratio from Decimal"); + assert_eq!(expected.0, ratio.0); + } + + /// Tests that `BigUint` serializer works correctly. + #[test] + fn test_serde_big_uint_wrapper() { + let expected = BigUint::from(u64::MAX); + let wrapper = BigUintSerdeWrapper::from(expected.clone()); + let value = serde_json::to_value(wrapper).expect("cannot serialize BigUintSerdeWrapper"); + let uint: BigUintSerdeWrapper = + serde_json::from_value(value).expect("cannot deserialize BigUintSerdeWrapper"); + assert_eq!(uint.0, expected); + } +} diff --git a/core/tests/loadtest/src/api/rest_api_tests.rs b/core/tests/loadtest/src/api/rest_api_tests.rs index 1e118e7447..26faf92caf 100644 --- a/core/tests/loadtest/src/api/rest_api_tests.rs +++ b/core/tests/loadtest/src/api/rest_api_tests.rs @@ -4,8 +4,8 @@ // Built-in uses // External uses // Workspace uses +use zksync_config::test_config::TestConfig; use zksync_types::{tx::TxHash, Address}; -use zksync_utils::parse_env; // Local uses use super::{ApiDataPool, ApiTestsBuilder}; use crate::monitor::Monitor; @@ -217,8 +217,7 @@ macro_rules! declare_tests { } pub fn wire_tests<'a>(builder: ApiTestsBuilder<'a>, monitor: &'a Monitor) -> ApiTestsBuilder<'a> { - // TODO: add this field to the ConfigurationOptions (#1116). - let rest_api_url = parse_env::("REST_API_ADDR"); + let rest_api_url = TestConfig::load().api.rest_api_url; let client = RestApiClient::new(rest_api_url, monitor.api_data_pool.clone()); declare_tests!( diff --git a/core/tests/testkit/Cargo.toml b/core/tests/testkit/Cargo.toml index 79b7231344..5c0d7a83c2 100644 --- a/core/tests/testkit/Cargo.toml +++ b/core/tests/testkit/Cargo.toml @@ -23,6 +23,7 @@ zksync_contracts = { path = "../../lib/contracts", version = "1.0" } zksync_eth_client = { path = "../../lib/eth_client", version = "1.0" } zksync_eth_signer = { path = "../../lib/eth_signer", version = "1.0" } zksync_test_account = { path = "../test_account", version = "1.0" } +zksync_data_restore = { path = "../../bin/data_restore", version = "1.0" } tokio = { version = "0.2", features = ["full"] } log = "0.4" diff --git a/core/tests/testkit/src/bin/revert_blocks_test.rs b/core/tests/testkit/src/bin/revert_blocks_test.rs index 1af4ef6c3f..993fd85397 100644 --- a/core/tests/testkit/src/bin/revert_blocks_test.rs +++ b/core/tests/testkit/src/bin/revert_blocks_test.rs @@ -1,10 +1,15 @@ +use web3::transports::Http; + +use zksync_testkit::*; +use zksync_testkit::{ + data_restore::verify_restore, + scenarios::{perform_basic_operations, BlockProcessing}, +}; + use crate::eth_account::{parse_ether, EthereumAccount}; use crate::external_commands::{deploy_contracts, get_test_accounts, Contracts}; use crate::zksync_account::ZkSyncAccount; -use web3::transports::Http; use zksync_crypto::Fr; -use zksync_testkit::scenarios::{perform_basic_operations, BlockProcessing}; -use zksync_testkit::*; /// Executes blocks with some basic operations with new state keeper /// if block_processing is equal to BlockProcessing::NoVerify this should revert all not verified blocks @@ -19,6 +24,7 @@ async fn execute_blocks_with_new_state_keeper( let (sk_thread_handle, stop_state_keeper_sender, sk_channels) = spawn_state_keeper(&fee_account.address); + let fee_account_address = fee_account.address; let transport = Http::new(&testkit_config.web3_url).expect("http transport start"); let (test_accounts_info, commit_account_info) = get_test_accounts(); let commit_account = EthereumAccount::new( @@ -75,6 +81,7 @@ async fn execute_blocks_with_new_state_keeper( let deposit_amount = parse_ether("1.0").unwrap(); let mut executed_blocks = Vec::new(); + let mut tokens = vec![]; for token in 0..=1 { let blocks = perform_basic_operations( token, @@ -83,6 +90,7 @@ async fn execute_blocks_with_new_state_keeper( block_processing, ) .await; + tokens.push(token); executed_blocks.extend(blocks.into_iter()); } @@ -102,6 +110,20 @@ async fn execute_blocks_with_new_state_keeper( .revert_blocks(&executed_blocks_reverse_order) .await .expect("revert_blocks call fails"); + } else { + // Do not restore in reverting state, because there no valid blocks in blockchain + println!("Start restoring"); + + verify_restore( + &testkit_config.web3_url, + testkit_config.available_block_chunk_sizes.clone(), + &contracts, + fee_account_address, + test_setup.get_accounts_state().await, + tokens, + test_setup.last_committed_block.new_root_hash, + ) + .await; } stop_state_keeper_sender.send(()).expect("sk stop send"); diff --git a/core/tests/testkit/src/data_restore.rs b/core/tests/testkit/src/data_restore.rs new file mode 100644 index 0000000000..b71ca8946c --- /dev/null +++ b/core/tests/testkit/src/data_restore.rs @@ -0,0 +1,52 @@ +use web3::{transports::Http, types::Address}; + +use zksync_crypto::Fr; +use zksync_data_restore::{ + data_restore_driver::DataRestoreDriver, inmemory_storage_interactor::InMemoryStorageInteractor, + ETH_BLOCKS_STEP, +}; +use zksync_types::AccountMap; + +use crate::external_commands::Contracts; + +pub async fn verify_restore( + web3_url: &str, + available_block_chunk_sizes: Vec, + contracts: &Contracts, + fee_account_address: Address, + acc_state_from_test_setup: AccountMap, + tokens: Vec, + root_hash: Fr, +) { + let transport = Http::new(web3_url).expect("http transport start"); + + let mut interactor = InMemoryStorageInteractor::new(); + let mut driver = DataRestoreDriver::new( + transport, + contracts.governance, + contracts.contract, + ETH_BLOCKS_STEP, + 0, + available_block_chunk_sizes, + true, + Default::default(), + ); + + interactor.insert_new_account(0, &fee_account_address); + driver.load_state_from_storage(&mut interactor).await; + driver.run_state_update(&mut interactor).await; + + assert_eq!(driver.tree_state.root_hash(), root_hash); + + for (id, account) in acc_state_from_test_setup { + let driver_acc = driver.tree_state.get_account(id).expect("Should exist"); + let inter_acc = interactor.get_account(&id).expect("Should exist"); + for id in &tokens { + assert_eq!(driver_acc.address, inter_acc.address); + assert_eq!(account.address, inter_acc.address); + assert_eq!(driver_acc.get_balance(*id), account.get_balance(*id)); + assert_eq!(inter_acc.get_balance(*id), account.get_balance(*id)); + } + } + println!("Data restore test is ok") +} diff --git a/core/tests/testkit/src/lib.rs b/core/tests/testkit/src/lib.rs index 84da84ae0f..c57760566c 100644 --- a/core/tests/testkit/src/lib.rs +++ b/core/tests/testkit/src/lib.rs @@ -5,9 +5,11 @@ pub use self::{ account_set::AccountSet, state_keeper_utils::spawn_state_keeper, test_setup::TestSetup, types::*, }; + pub use zksync_test_account as zksync_account; pub mod account_set; +pub mod data_restore; pub mod eth_account; pub mod external_commands; pub mod scenarios; diff --git a/core/tests/testkit/src/scenarios.rs b/core/tests/testkit/src/scenarios.rs index d6be73d695..4f542a298c 100644 --- a/core/tests/testkit/src/scenarios.rs +++ b/core/tests/testkit/src/scenarios.rs @@ -1,14 +1,17 @@ //! Common scenarios used by testkit derivatives. -use crate::eth_account::{parse_ether, EthereumAccount}; -use crate::external_commands::{deploy_contracts, get_test_accounts}; -use crate::state_keeper_utils::spawn_state_keeper; -use crate::zksync_account::ZkSyncAccount; - use num::BigUint; use std::time::Instant; use web3::transports::Http; +use crate::{ + data_restore::verify_restore, + eth_account::{parse_ether, EthereumAccount}, + external_commands::{deploy_contracts, get_test_accounts}, + state_keeper_utils::spawn_state_keeper, + zksync_account::ZkSyncAccount, +}; + use super::*; use zksync_types::block::Block; @@ -21,8 +24,9 @@ pub async fn perform_basic_tests() { let testkit_config = TestkitConfig::from_env(); let fee_account = ZkSyncAccount::rand(); + let fee_account_address = fee_account.address; let (sk_thread_handle, stop_state_keeper_sender, sk_channels) = - spawn_state_keeper(&fee_account.address); + spawn_state_keeper(&fee_account_address); let initial_root = genesis_state(&fee_account.address).tree.root_hash(); @@ -36,6 +40,7 @@ pub async fn perform_basic_tests() { ); let transport = Http::new(&testkit_config.web3_url).expect("http transport start"); + let (test_accounts_info, commit_account_info) = get_test_accounts(); let commit_account = EthereumAccount::new( commit_account_info.private_key, @@ -90,6 +95,7 @@ pub async fn perform_basic_tests() { let deposit_amount = parse_ether("1.0").unwrap(); + let mut tokens = vec![]; for token in 0..=1 { perform_basic_operations( token, @@ -98,8 +104,20 @@ pub async fn perform_basic_tests() { BlockProcessing::CommitAndVerify, ) .await; + tokens.push(token); } + verify_restore( + &testkit_config.web3_url, + testkit_config.available_block_chunk_sizes.clone(), + &contracts, + fee_account_address, + test_setup.get_accounts_state().await, + tokens, + test_setup.last_committed_block.new_root_hash, + ) + .await; + stop_state_keeper_sender.send(()).expect("sk stop send"); sk_thread_handle.join().expect("sk thread join"); } diff --git a/core/tests/testkit/src/test_setup.rs b/core/tests/testkit/src/test_setup.rs index dde7cd59f4..86417f793d 100644 --- a/core/tests/testkit/src/test_setup.rs +++ b/core/tests/testkit/src/test_setup.rs @@ -43,7 +43,6 @@ pub struct TestSetup { pub expected_changes_for_current_block: ExpectedAccountState, pub commit_account: EthereumAccount, - pub last_committed_block: Block, } diff --git a/core/tests/testkit/src/types.rs b/core/tests/testkit/src/types.rs index b4ae1de288..d00866d46d 100644 --- a/core/tests/testkit/src/types.rs +++ b/core/tests/testkit/src/types.rs @@ -2,7 +2,7 @@ use num::BigUint; use std::collections::HashMap; use web3::types::TransactionReceipt; -use zksync_config::ConfigurationOptions; +use zksync_config::{ConfigurationOptions, EthClientOptions}; use zksync_types::block::Block; use zksync_types::TokenId; @@ -11,15 +11,19 @@ pub struct TestkitConfig { pub chain_id: u8, pub gas_price_factor: f64, pub web3_url: String, + pub available_block_chunk_sizes: Vec, } impl TestkitConfig { pub fn from_env() -> Self { let env_config = ConfigurationOptions::from_env(); + let eth_client_options = EthClientOptions::from_env(); + TestkitConfig { - chain_id: env_config.chain_id, - gas_price_factor: env_config.gas_price_factor, + chain_id: eth_client_options.chain_id, + gas_price_factor: eth_client_options.gas_price_factor, web3_url: env_config.web3_url, + available_block_chunk_sizes: env_config.available_block_chunk_sizes, } } } diff --git a/core/tests/ts-tests/tests/api.ts b/core/tests/ts-tests/tests/api.ts index c9d0cccd21..15eee26941 100644 --- a/core/tests/ts-tests/tests/api.ts +++ b/core/tests/ts-tests/tests/api.ts @@ -1,4 +1,5 @@ import fs from 'fs'; +import * as path from 'path'; import fetch from 'node-fetch'; import { expect } from 'chai'; @@ -13,6 +14,8 @@ import { Interface as TransactionInterface } from '../api-types/transaction'; const apiTypesFolder = './api-types'; const ADDRESS_REGEX = /^0x([0-9a-fA-F]){40}$/; const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const apiTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/api.json`, { encoding: 'utf-8' })); // Checks that json string has the expected js type. // Usage: pass a path to .ts file that exports a type named `Interface` and a json string. @@ -49,14 +52,14 @@ async function validateResponseFromUrl(typeFilePath: string, url: string): Promi } export async function checkStatusResponseType(): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/status`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/status`; const typeFilePath = `${apiTypesFolder}/status.ts`; const data: StatusInterface = await validateResponseFromUrl(typeFilePath, url); return data; } export async function checkWithdrawalProcessingTimeResponseType(): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/withdrawal_processing_time`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/withdrawal_processing_time`; const typeFilePath = `${apiTypesFolder}/withdrawal-processing.ts`; const data: StatusInterface = await validateResponseFromUrl(typeFilePath, url); return data; @@ -65,7 +68,7 @@ export async function checkWithdrawalProcessingTimeResponseType(): Promise { const offset = 0; const limit = 20; - const url = `${process.env.REST_API_ADDR}/api/v0.1/account/${address}/history/${offset}/${limit}`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/account/${address}/history/${offset}/${limit}`; const typeFilePath = `${apiTypesFolder}/tx-history.ts`; const data: TxHistoryInterface = await validateResponseFromUrl(typeFilePath, url); @@ -77,7 +80,7 @@ export async function checkTxHistoryResponseType(address: string): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/blocks/${blockNumber}`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/blocks/${blockNumber}`; const typeFilePath = `${apiTypesFolder}/block.ts`; const data: BlockInterface = await validateResponseFromUrl(typeFilePath, url); expect(data.committed_at, 'Wrong date format').to.match(DATE_REGEX); @@ -85,14 +88,14 @@ export async function checkBlockResponseType(blockNumber: number): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/blocks`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/blocks`; const typeFilePath = `${apiTypesFolder}/blocks.ts`; const data: BlocksInterface = await validateResponseFromUrl(typeFilePath, url); return data; } export async function checkBlockTransactionsResponseType(blockNumber: number): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/blocks/${blockNumber}/transactions`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/blocks/${blockNumber}/transactions`; const typeFilePath = `${apiTypesFolder}/block-transactions.ts`; const data: BlockTransactionsInterface = await validateResponseFromUrl(typeFilePath, url); @@ -104,7 +107,7 @@ export async function checkBlockTransactionsResponseType(blockNumber: number): P } export async function checkTestnetConfigResponseType(): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/testnet_config`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/testnet_config`; const typeFilePath = `${apiTypesFolder}/config.ts`; const data: TestnetConfigInterface = await validateResponseFromUrl(typeFilePath, url); expect(data.contractAddress, 'Wrong address format').to.match(ADDRESS_REGEX); @@ -112,7 +115,7 @@ export async function checkTestnetConfigResponseType(): Promise { - const url = `${process.env.REST_API_ADDR}/api/v0.1/transactions_all/${txHash}`; + const url = `${apiTestConfig.rest_api_url}/api/v0.1/transactions_all/${txHash}`; const typeFilePath = `${apiTypesFolder}/transaction.ts`; const data: TransactionInterface = await validateResponseFromUrl(typeFilePath, url); expect(data.created_at, 'Wrong date format').to.match(DATE_REGEX); diff --git a/core/tests/ts-tests/tests/main.test.ts b/core/tests/ts-tests/tests/main.test.ts index e62eb0048e..56c470f5fa 100644 --- a/core/tests/ts-tests/tests/main.test.ts +++ b/core/tests/ts-tests/tests/main.test.ts @@ -21,12 +21,14 @@ describe(`ZkSync integration tests (token: ${token}, transport: ${transport})`, let tester: Tester; let alice: Wallet; let bob: Wallet; + let chuck: Wallet; let operatorBalance: BigNumber; before('create tester and test wallets', async () => { tester = await Tester.init('localhost', transport); alice = await tester.fundedWallet('5.0'); bob = await tester.emptyWallet(); + chuck = await tester.emptyWallet(); operatorBalance = await tester.operatorBalance(token); }); @@ -77,25 +79,26 @@ describe(`ZkSync integration tests (token: ${token}, transport: ${transport})`, }); step('should execute a transfer to new account', async () => { - await tester.testTransfer(alice, bob, token, TX_AMOUNT); + await tester.testTransfer(alice, chuck, token, TX_AMOUNT); }); step('should execute a transfer to existing account', async () => { - await tester.testTransfer(alice, bob, token, TX_AMOUNT); + await tester.testTransfer(alice, chuck, token, TX_AMOUNT); }); it('should execute a transfer to self', async () => { await tester.testTransfer(alice, alice, token, TX_AMOUNT); }); - step('should change pubkey offchain for alice', async () => { - await tester.testChangePubKey(alice, token, false); + step('should change pubkey offchain', async () => { + await tester.testChangePubKey(chuck, token, false); }); step('should test multi-transfers', async () => { await tester.testBatch(alice, bob, token, TX_AMOUNT); await tester.testIgnoredBatch(alice, bob, token, TX_AMOUNT); - await tester.testFailedBatch(alice, bob, token, TX_AMOUNT); + // TODO: With subsidized costs, this test fails on CI due to low gas prices and high allowance. (ZKS-138) + // await tester.testFailedBatch(alice, bob, token, TX_AMOUNT); }); step('should execute a withdrawal', async () => { diff --git a/core/tests/ts-tests/tests/tester.ts b/core/tests/ts-tests/tests/tester.ts index c64bb14ffe..762e22096d 100644 --- a/core/tests/ts-tests/tests/tester.ts +++ b/core/tests/ts-tests/tests/tester.ts @@ -1,10 +1,15 @@ import * as ethers from 'ethers'; import * as zksync from 'zksync'; +import * as fs from 'fs'; +import * as path from 'path'; const franklin_abi = require('../../../../contracts/artifacts/cache/solpp-generated-contracts/ZkSync.sol/ZkSync.json') .abi; type Network = 'localhost' | 'rinkeby' | 'ropsten'; +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + export class Tester { public contract: ethers.Contract; public runningFee: ethers.BigNumber; @@ -29,7 +34,7 @@ export class Tester { } const syncProvider = await zksync.getDefaultProvider(network, transport); const ethWallet = ethers.Wallet.fromMnemonic( - process.env.TEST_MNEMONIC as string, + ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/0" ).connect(ethProvider); const syncWallet = await zksync.Wallet.fromEthSigner(ethWallet, syncProvider); diff --git a/core/tests/ts-tests/tests/transfer.ts b/core/tests/ts-tests/tests/transfer.ts index 75cdc1741c..5875c4602b 100644 --- a/core/tests/ts-tests/tests/transfer.ts +++ b/core/tests/ts-tests/tests/transfer.ts @@ -111,28 +111,29 @@ Tester.prototype.testIgnoredBatch = async function ( expect(receiverAfter.eq(receiverBefore), 'Wrong batch was not ignored').to.be.true; }; -Tester.prototype.testFailedBatch = async function ( - sender: Wallet, - receiver: Wallet, - token: types.TokenLike, - amount: BigNumber -) { - const tx = { - to: receiver.address(), - token, - amount, - fee: BigNumber.from('0') - }; - - let thrown = true; - try { - const handles = await sender.syncMultiTransfer([{ ...tx }, { ...tx }]); - for (const handle of handles) { - await handle.awaitVerifyReceipt(); - } - thrown = false; // this line should be unreachable - } catch (e) { - expect(e.jrpcError.message).to.equal('Transactions batch summary fee is too low'); - } - expect(thrown, 'Batch should have failed').to.be.true; -}; +// TODO: With subsidized costs, this test fails on CI due to low gas prices and high allowance. (ZKS-138) +// Tester.prototype.testFailedBatch = async function ( +// sender: Wallet, +// receiver: Wallet, +// token: types.TokenLike, +// amount: BigNumber +// ) { +// const tx = { +// to: receiver.address(), +// token, +// amount, +// fee: BigNumber.from('0'), +// }; + +// let thrown = true; +// try { +// const handles = await sender.syncMultiTransfer([{ ...tx }, { ...tx }]); +// for (const handle of handles) { +// await handle.awaitVerifyReceipt(); +// } +// thrown = false; // this line should be unreachable +// } catch (e) { +// expect(e.jrpcError.message).to.equal('Transactions batch summary fee is too low'); +// } +// expect(thrown, 'Batch should have failed').to.be.true; +// }; diff --git a/docker/fee-seller/Dockerfile b/docker/fee-seller/Dockerfile index 678ecd2f1d..680997dd0d 100644 --- a/docker/fee-seller/Dockerfile +++ b/docker/fee-seller/Dockerfile @@ -9,7 +9,7 @@ COPY infrastructure/fee-seller/ . # required env # ENV FEE_ACCOUNT_PRIVATE_KEY # ENV MAX_LIQUIDATION_FEE_PERCENT -# ENV FEE_ACCUMULATOR_ADDRESS +# ENV OPERATOR_FEE_ETH_ADDRESS # ENV ETH_NETWORK # ENV WEB3_URL # ENV NOTIFICATION_WEBHOOK_URL diff --git a/docs/development.md b/docs/development.md index dd2ab7f3cd..bc5da0d6ed 100644 --- a/docs/development.md +++ b/docs/development.md @@ -53,12 +53,13 @@ zk down # Shut down `geth` and `postgres` containers ## Committing changes -`zksync` uses pre-commit git hooks for basic code integrity checks. Hooks are set up automatically within the workspace -initialization process. These hooks will not allow to commit the code which does not pass several checks. +`zksync` uses pre-commit and pre-push git hooks for basic code integrity checks. Hooks are set up automatically within +the workspace initialization process. These hooks will not allow to commit the code which does not pass several checks. Currently the following criteria are checked: -- Code should always be formatted via `cargo fmt`. +- Rust code should always be formatted via `cargo fmt`. +- Other code should always be formatted via `zk fmt`. - Dummy Prover should not be staged for commit (see below for the explanation). ## Using Dummy Prover diff --git a/docs/setup-dev.md b/docs/setup-dev.md index 9c39771d8d..bccf82f3a0 100644 --- a/docs/setup-dev.md +++ b/docs/setup-dev.md @@ -91,7 +91,7 @@ Install `psql` CLI tool to interact with postgres. On debian-based linux: ```sh -sudo apt-get install postgresql +sudo apt-get install postgresql-client ``` ## `Diesel` CLI @@ -115,7 +115,7 @@ sudo apt-get install libpq-dev Also, we need [`sqlx`](https://github.com/launchbadge/sqlx) CLI (it is used to generate database wrappers): ```sh -cargo install --version=0.1.0-beta.1 sqlx-cli +cargo install --version=0.2.0 sqlx-cli ``` ## `solc` diff --git a/etc/env/dev.env.example b/etc/env/dev.env.example index dae6c2d5ae..b7b812f484 100755 --- a/etc/env/dev.env.example +++ b/etc/env/dev.env.example @@ -1,9 +1,4 @@ -# Mnemonic can be generated here: https://bitcoinqrcodegenerator.win/bip39/ - -MNEMONIC="fine music test violin matrix prize squirrel panther purchase material script deal" -TEST_MNEMONIC="stuff slice staff easily soup parent arm payment cotton trade scatter struggle" - -# Set in env file only for local development, for production, staging and testnet it is configured on k8s only. +# Set in env file for development, production, staging and testnet. OPERATOR_PRIVATE_KEY=27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be # Address to be used for zkSync account managing the interaction with a contract on Ethereum. @@ -42,7 +37,7 @@ CHAIN_ID=9 GAS_PRICE_FACTOR=1 ETH_NETWORK=localhost -# Set in env file only for local development, for production, staging and testnet it is configured on k8s only. +# Set in env file for development, production, staging and testnet. DATABASE_URL=postgres://postgres@localhost/plasma DB_POOL_SIZE=10 @@ -86,33 +81,23 @@ DOCKER_DUMMY_PROVER=false # Serving addresses configutarions # Admin server configuration -ADMIN_SERVER_API_BIND=127.0.0.1:8080 +ADMIN_SERVER_API_PORT=8080 ADMIN_SERVER_API_URL=http://127.0.0.1:8080 # Shared secret for authorization to admin server using JSON Web Token SECRET_AUTH=123 -REST_API_BIND=0.0.0.0:3001 -REST_API_ADDR=http://127.0.0.1:3001 - -HTTP_RPC_API_BIND=0.0.0.0:3030 -HTTP_RPC_API_ADDR=http://127.0.0.1:3030 +REST_API_PORT=3001 +HTTP_RPC_API_PORT=3030 +WS_API_PORT=3031 -WS_API_BIND=0.0.0.0:3031 -WS_API_ADDR=ws://127.0.0.1:3031 - -PROVER_SERVER_BIND=0.0.0.0:8088 +PROVER_SERVER_PORT=8088 PROVER_SERVER_URL=http://127.0.0.1:8088 -PRIVATE_CORE_SERVER_BIND=0.0.0.0:8090 +PRIVATE_CORE_SERVER_PORT=8090 PRIVATE_CORE_SERVER_URL=http://127.0.0.1:8090 RUST_BACKTRACE=1 -# DigitalOcean - -# Prover -BELLMAN_VERBOSE=1 - # key dir ending with latest version of circuit commit hash KEY_DIR=keys/plonk-8c6e12e4c # actual supported block chunks sizes by verifier contract (determined by circuit size on setup boundaries) @@ -134,11 +119,6 @@ BALANCE_TREE_DEPTH=11 # Number of idle provers running (to scale up faster) IDLE_PROVERS=1 -SERVER_API_HOST=localhost -SERVER_API_HOST_CERT="" -EXPLORER_HOST=localhost -EXPLORER_HOST_CERT="" - REQ_SERVER_TIMEOUT=10 API_REQUESTS_CACHES_SIZE=10000 @@ -175,5 +155,11 @@ FORCED_EXIT_MINIMUM_ACCOUNT_AGE_SECS=0 # FEE LIQUIDATION CONSTANTS MAX_LIQUIDATION_FEE_PERCENT=5 -FEE_ACCUMULATOR_ADDRESS=0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7 FEE_ACCOUNT_PRIVATE_KEY=unset + +# Set of token addresses for which no subsidies are provided. +NOT_SUBSIDIZED_TOKENS=2b591e99afe9f32eaa6214f7b7629768c40eeb39,34083bbd70d394110487feaa087da875a54624ec + +# Set of token addresses which are not acceptable in the ticker for paying fees in. +# Should be a comma-separated list. +TICKER_DISABLED_TOKENS=38A2fDc11f526Ddd5a607C1F251C065f40fBF2f7 diff --git a/etc/test_config/README.md b/etc/test_config/README.md index 466d7b07a0..ac7ecffd4e 100644 --- a/etc/test_config/README.md +++ b/etc/test_config/README.md @@ -2,9 +2,10 @@ This folder contains the data required for various zkSync tests. -Directory contains two subfolders: +Directory contains three subfolders: - `constant`: Data that remains the same between various runs, filled manually and committed to the repository. For example, private / public keys of test accounts. - `volatile`: Data that may change, filled by scripts and is **not** committed to the repository. For example, deployed contracts addresses. +- `sdk`: Data used to test SDK implementations. diff --git a/etc/test_config/constant/api.json b/etc/test_config/constant/api.json new file mode 100644 index 0000000000..050daba0e6 --- /dev/null +++ b/etc/test_config/constant/api.json @@ -0,0 +1,3 @@ +{ + "rest_api_url": "http://127.0.0.1:3001" +} diff --git a/etc/test_config/constant/eth.json b/etc/test_config/constant/eth.json index d58b2461f8..624e605e3c 100644 --- a/etc/test_config/constant/eth.json +++ b/etc/test_config/constant/eth.json @@ -1,3 +1,5 @@ { - "web3_url": "http://127.0.0.1:8545" + "web3_url": "http://127.0.0.1:8545", + "test_mnemonic": "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", + "mnemonic": "fine music test violin matrix prize squirrel panther purchase material script deal" } diff --git a/etc/test_config/sdk/test-vectors.json b/etc/test_config/sdk/test-vectors.json new file mode 100644 index 0000000000..ee7e3f2857 --- /dev/null +++ b/etc/test_config/sdk/test-vectors.json @@ -0,0 +1,316 @@ +{ + "cryptoPrimitivesTest": { + "description": "Contains the seed for private key and the message for signing", + "items": [ + { + "inputs": { + "seed": "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "message": "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" + }, + "outputs": { + "privateKey": "0x0552a69519d1f3043611126c13489ff4a2a867a1c667b1d9d9031cd27fdcff5a", + "pubKeyHash": "17f3708f5e2b2c39c640def0cf0010fd9dd9219650e389114ea9da47f5874184", + "signature": "5462c3083d92b832d540c9068eed0a0450520f6dd2e4ab169de1a46585b394a4292896a2ebca3c0378378963a6bc1710b64c573598e73de3a33d6cec2f5d7403" + } + } + ] + }, + "txTest": { + "description": "Contains various zkSync transactions as inputs and zkSync and Ethereum signature data as outputs", + "items": [ + { + "inputs": { + "type": "Transfer", + "ethPrivateKey": "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "data": { + "accountId": 44, + "from": "0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045", + "to": "0x19aa2ed8712072e918632259780e587698ef58df", + "tokenId": 0, + "amount": "1000000000000", + "fee": "1000000", + "nonce": 12 + }, + "ethSignData": { + "stringAmount": "1000000000000", + "stringToken": "ETH", + "stringFee": "1000000", + "to": "0x19aa2ed8712072e918632259780e587698ef58df", + "accountId": 44, + "nonce": 12 + } + }, + "outputs": { + "signBytes": "0x050000002ccdb6aaa2607df186f7dd2d8eb4ee60f83720b04519aa2ed8712072e918632259780e587698ef58df00004a817c80027d030000000c", + "signature": { + "pubKey": "40771354dc314593e071eaf4d0f42ccb1fad6c7006c57464feeb7ab5872b7490", + "signature": "cfcc4ce57f7915657e742c700bb835ee9a783ce0640f4f6f06b096c1e0d7f88cadfb3ae9ac5ac7d315a24173d612aac68c2709fe1e643deb8d51ad134959cb05" + }, + "ethSignMessage": "Transfer 1000000000000 ETH\nTo: 0x19aa2ed8712072e918632259780e587698ef58df\nNonce: 12\nFee: 1000000 ETH\nAccount Id: 44", + "ethSignature": "0x6de424125c2a65a42812c3e9249bdcf98cbe4d0328a616f32ed23fa86a780be6088cb62149f98f33b9433cdcfe900500e7bf6d6909560ba1a8de99d42521162a1b" + } + }, + { + "inputs": { + "type": "ChangePubKey", + "ethPrivateKey": "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "data": { + "accountId": 55, + "account": "0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045", + "newPkHash": "sync:18e8446d7748f2de52b28345bdbc76160e6b35eb", + "feeTokenId": 0, + "fee": "1000000000", + "nonce": 13 + }, + "ethSignData": { + "pubKeyHash": "sync:18e8446d7748f2de52b28345bdbc76160e6b35eb", + "accountId": 55, + "nonce": 13 + } + }, + "outputs": { + "signBytes": "0x0700000037cdb6aaa2607df186f7dd2d8eb4ee60f83720b04518e8446d7748f2de52b28345bdbc76160e6b35eb00007d060000000d", + "signature": { + "pubKey": "40771354dc314593e071eaf4d0f42ccb1fad6c7006c57464feeb7ab5872b7490", + "signature": "d88da8d25514ea380931fa4bb2be0189c98bff9a541be660f3125b0caa241809bcf81de215f08eedb3815a3694cfb041c0e031bb1183cb8f0c3e61863e98d200" + }, + "ethSignMessage": "Register zkSync pubkey:\n\n18e8446d7748f2de52b28345bdbc76160e6b35eb\nnonce: 0x0000000d\naccount id: 0x00000037\n\nOnly sign this message for a trusted client!", + "ethSignature": "0xe062aca0dd8438174f424a26f3dd528ca9bd98366b2dafd6c6735eeaccd9e787245ac7dbbe2a37e3a74f168e723c5a2c613de25795a056bc81ff4c8d4106e56f1c" + } + }, + { + "inputs": { + "type": "Withdraw", + "ethPrivateKey": "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "data": { + "accountId": 44, + "from": "0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045", + "ethAddress": "0x19aa2ed8712072e918632259780e587698ef58df", + "tokenId": 0, + "amount": "1000000000000", + "fee": "1000000", + "nonce": 12 + }, + "ethSignData": { + "stringAmount": "1000000000000", + "stringToken": "ETH", + "stringFee": "1000000", + "ethAddress": "0x19aa2ed8712072e918632259780e587698ef58df", + "accountId": 44, + "nonce": 12 + } + }, + "outputs": { + "signBytes": "0x030000002ccdb6aaa2607df186f7dd2d8eb4ee60f83720b04519aa2ed8712072e918632259780e587698ef58df00000000000000000000000000e8d4a510007d030000000c", + "signature": { + "pubKey": "40771354dc314593e071eaf4d0f42ccb1fad6c7006c57464feeb7ab5872b7490", + "signature": "a194482cacc1e6962944fe12859d92ed53e6a489b3fd62666d7871a0a64a42a2bfa197448ed217e2a77cd60caf1cc4d48d1e5c619aa57dbba0d9efe5f5261900" + }, + "ethSignMessage": "Withdraw 1000000000000 ETH\nTo: 0x19aa2ed8712072e918632259780e587698ef58df\nNonce: 12\nFee: 1000000 ETH\nAccount Id: 44", + "ethSignature": "0x006ae08bbdf02f23dfd7965c2bb15b7365fe447f51b5fbf1428ca6ff54457bbc489488f3dc5f10c123796bca0ea4a155b2f7c3933f34432c155c14fb9caefb841c" + } + }, + { + "inputs": { + "type": "ForcedExit", + "ethPrivateKey": "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", + "data": { + "initiatorAccountId": 44, + "from": "0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045", + "target": "0x19aa2ed8712072e918632259780e587698ef58df", + "tokenId": 0, + "fee": "1000000", + "nonce": 12 + }, + "ethSignData": null + }, + "outputs": { + "signBytes": "0x080000002c19aa2ed8712072e918632259780e587698ef58df00007d030000000c", + "signature": { + "pubKey": "40771354dc314593e071eaf4d0f42ccb1fad6c7006c57464feeb7ab5872b7490", + "signature": "62fa1d2f56e1d9a422fbf689cc27ff9da6a33ee9add6d47d4afdf4657b0a93146da6f720e06e2b3894b7ab645eb07d3cd1576710157848f5cb04809e0e2f3a04" + }, + "ethSignMessage": null, + "ethSignature": null + } + } + ] + }, + "utils": { + "amountPacking": { + "description": "Checks for amount packing", + "items": [ + { + "inputs": { + "value": "0" + }, + "outputs": { + "packable": true, + "closestPackable": "0", + "packedValue": "0x0000000000" + } + }, + { + "inputs": { + "value": "1000" + }, + "outputs": { + "packable": true, + "closestPackable": "1000", + "packedValue": "0x0000007d00" + } + }, + { + "inputs": { + "value": "1111" + }, + "outputs": { + "packable": true, + "closestPackable": "1111", + "packedValue": "0x0000008ae0" + } + }, + { + "inputs": { + "value": "474732833474" + }, + "outputs": { + "packable": false, + "closestPackable": "474732833400", + "packedValue": "0x235ecf69c2" + } + }, + { + "inputs": { + "value": "474732833400" + }, + "outputs": { + "packable": true, + "closestPackable": "474732833400", + "packedValue": "0x235ecf69c2" + } + }, + { + "inputs": { + "value": "10000000000000" + }, + "outputs": { + "packable": true, + "closestPackable": "10000000000000", + "packedValue": "0x4a817c8003" + } + } + ] + }, + "feePacking": { + "description": "Checks for fee packing", + "items": [ + { + "inputs": { + "value": "0" + }, + "outputs": { + "packable": true, + "closestPackable": "0", + "packedValue": "0x0000" + } + }, + { + "inputs": { + "value": "1000" + }, + "outputs": { + "packable": true, + "closestPackable": "1000", + "packedValue": "0x7d00" + } + }, + { + "inputs": { + "value": "1111" + }, + "outputs": { + "packable": true, + "closestPackable": "1111", + "packedValue": "0x8ae0" + } + }, + { + "inputs": { + "value": "474732833474" + }, + "outputs": { + "packable": false, + "closestPackable": "474000000000", + "packedValue": "0x3b49" + } + }, + { + "inputs": { + "value": "474732833400" + }, + "outputs": { + "packable": false, + "closestPackable": "474000000000", + "packedValue": "0x3b49" + } + }, + { + "inputs": { + "value": "10000000000000" + }, + "outputs": { + "packable": true, + "closestPackable": "10000000000000", + "packedValue": "0x7d0a" + } + } + ] + }, + "tokenFormatting": { + "description": "Checks for token amount formatting", + "items": [ + { + "inputs": { + "token": "NNM", + "decimals": 0, + "amount": "1000000000000000100000" + }, + "outputs": { + "formatted": "1000000000000000100000.0 NNM" + } + }, + { + "inputs": { + "token": "DAI", + "decimals": 6, + "amount": "1000000" + }, + "outputs": { + "formatted": "1.0 DAI" + } + }, + { + "inputs": { + "token": "ZRO", + "decimals": 11, + "amount": "0" + }, + "outputs": { + "formatted": "0.0 ZRO" + } + }, + { + "inputs": { + "token": "ETH", + "decimals": 18, + "amount": "1000000000000000100000" + }, + "outputs": { + "formatted": "1000.0000000000001 ETH" + } + } + ] + } + } +} diff --git a/infrastructure/analytics/tests/commands.test.ts b/infrastructure/analytics/tests/commands.test.ts index 1d6c3171e0..338564ae43 100644 --- a/infrastructure/analytics/tests/commands.test.ts +++ b/infrastructure/analytics/tests/commands.test.ts @@ -6,6 +6,8 @@ import { Config, Network } from '../src/types'; import { loadConfig } from '../src/config'; import { TimePeriod } from '../src/utils'; import * as commands from '../src/commands'; +import * as fs from 'fs'; +import * as path from 'path'; use(chaiAsPromised); @@ -17,12 +19,15 @@ describe('Tests', () => { before('prepare auxiliary data & create new zksync account, make transfer', async () => { config = loadConfig(network); + const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); + const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + const timeFrom = new Date().toISOString(); const ethProvider = new ethers.providers.JsonRpcProvider(); const zksProvider = await zksync.getDefaultProvider(network, 'HTTP'); - const ethWallet = ethers.Wallet.fromMnemonic(process.env.TEST_MNEMONIC as string, "m/44'/60'/0'/0/0").connect( + const ethWallet = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/0").connect( ethProvider ); diff --git a/infrastructure/fee-seller/index.ts b/infrastructure/fee-seller/index.ts index 384eb27991..3d0f34ff8a 100644 --- a/infrastructure/fee-seller/index.ts +++ b/infrastructure/fee-seller/index.ts @@ -4,7 +4,7 @@ * Selling is done in steps: * Step 1 - token is withdrawn to the ETH account * Step 2 - token is swapped for ETH using 1inch - * Step 3 - ETH is transferred to the FEE_ACCUMULATOR_ADDRESS + * Step 3 - ETH is transferred to the OPERATOR_FEE_ETH_ADDRESS * * Each step happens one after another without waiting for previous to complete * so this script should be run frequently (e.g. once every 15 min). @@ -28,7 +28,7 @@ import { /** Env parameters. */ const FEE_ACCOUNT_PRIVATE_KEY = process.env.FEE_ACCOUNT_PRIVATE_KEY; const MAX_LIQUIDATION_FEE_PERCENT = parseInt(process.env.MAX_LIQUIDATION_FEE_PERCENT); -const FEE_ACCUMULATOR_ADDRESS = process.env.FEE_ACCUMULATOR_ADDRESS; +const OPERATOR_FEE_ETH_ADDRESS = process.env.OPERATOR_FEE_ETH_ADDRESS; const ETH_NETWORK = process.env.ETH_NETWORK as any; const WEB3_URL = process.env.WEB3_URL; const MAX_LIQUIDATION_FEE_SLIPPAGE = parseInt(process.env.MAX_LIQUIDATION_FEE_SLIPPAGE) || 5; @@ -196,8 +196,8 @@ async function sendETH(zksWallet: zksync.Wallet) { const ethTransferFee = BigNumber.from('21000').mul(await ethProvider.getGasPrice()); const ethToSend = ethBalance.sub(ETH_TRANSFER_THRESHOLD); if (isOperationFeeAcceptable(ethToSend, ethTransferFee, MAX_LIQUIDATION_FEE_PERCENT)) { - console.log(`Sending ${fmtToken(zksWallet.provider, 'ETH', ethToSend)} to ${FEE_ACCUMULATOR_ADDRESS}`); - const tx = await ethWallet.sendTransaction({ to: FEE_ACCUMULATOR_ADDRESS, value: ethToSend }); + console.log(`Sending ${fmtToken(zksWallet.provider, 'ETH', ethToSend)} to ${OPERATOR_FEE_ETH_ADDRESS}`); + const tx = await ethWallet.sendTransaction({ to: OPERATOR_FEE_ETH_ADDRESS, value: ethToSend }); console.log(`Tx hash: ${tx.hash}`); await sendNotification( diff --git a/infrastructure/grafana/.gitignore b/infrastructure/grafana/.gitignore new file mode 100644 index 0000000000..b56ef53254 --- /dev/null +++ b/infrastructure/grafana/.gitignore @@ -0,0 +1,2 @@ +*.json +grafonnet-lib diff --git a/infrastructure/grafana/README.md b/infrastructure/grafana/README.md new file mode 100644 index 0000000000..addc81db9e --- /dev/null +++ b/infrastructure/grafana/README.md @@ -0,0 +1,40 @@ +# Generating Grafana dashboards with `jsonnet` + +## Motivation + +Configuring Grafana's dashboards by hand can be very time-consuming. Especially, if we have ~170 metrics. Also, it +should be preferred to store the configs in code anyways. + +Although Grafana stores all its dashboards in `json`, the data there is hardly readable and very repetitive. + +## Solution: `jsonnet` + +[`jsonnet`](https://jsonnet.org) is a superset of `json` that aims to be an easy language for generating `json`. + +Grafana supports `jsonnet` for configuration via the library [`grafonnet`](https://github.com/grafana/grafonnet-lib), +which is used to configure our dashboards. + +You can familiarize yourself with `jsonnet` on their official website, although it is not necessary (assuming the goal +is to add/change a metric) given the simplicity of the language. + +## Usage + +**Dependencies**: `jsonnet`, `jq` + +Adding a metric is trivial, there are plenty of examples in `dashboards/` folder. Simply add the metric name to the +`metrics` array. + +To create a new dashboard, assuming it will contain graphs of running averages of metrics provided by +`metrics::histogram!`, create a new `.jsonnet` file in the `dashboards/` folder. Use `G.panel` and `G.dashboard` +functions to configure your dashboard. + +To (re)build and (re)deploy the dashboard, run the `./generate.sh` script with `$AUTH` env variable set to your Grafana +credentials, like so: + +``` +$ AUTH=login:password ./generate.sh +Building metrics.jsonnet ... Done +Deploying metrics.json ... "success" +``` + +If you don't see the message that dashboard is deployed, `touch` the source file and try again. diff --git a/infrastructure/grafana/dashboards/api_rpc.jsonnet b/infrastructure/grafana/dashboards/api_rpc.jsonnet new file mode 100644 index 0000000000..2cffe3d38b --- /dev/null +++ b/infrastructure/grafana/dashboards/api_rpc.jsonnet @@ -0,0 +1,24 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "api.rpc.account_info", + "api.rpc.contract_address", + "api.rpc.ethop_info", + "api.rpc.get_eth_tx_for_withdrawal", + "api.rpc.get_token_price", + "api.rpc.get_tx_fee", + "api.rpc.get_txs_batch_fee_in_wei", + "api.rpc.submit_txs_batch", + "api.rpc.tokens", + "api.rpc.tx_info", + "api.rpc.tx_submit", + "api.rpc.get_ongoing_deposits", + "api.rpc.get_executed_priority_operation", + "api.rpc.get_block_info", + "api.rpc.get_tx_receipt", + "api.rpc.get_account_state", +]; + +G.dashboard( + 'Metrics / rpc', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/api_v01.jsonnet b/infrastructure/grafana/dashboards/api_v01.jsonnet new file mode 100644 index 0000000000..c465d36a6c --- /dev/null +++ b/infrastructure/grafana/dashboards/api_v01.jsonnet @@ -0,0 +1,23 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "api.v01.block_by_id", + "api.v01.block_transactions", + "api.v01.block_tx", + "api.v01.blocks", + "api.v01.executed_tx_by_hash", + "api.v01.explorer_search", + "api.v01.priority_op", + "api.v01.status", + "api.v01.testnet_config", + "api.v01.tokens", + "api.v01.tx_by_hash", + "api.v01.tx_history", + "api.v01.tx_history_newer_than", + "api.v01.tx_history_older_than", + "api.v01.withdrawal_processing_time", +]; + +G.dashboard( + 'Metrics / api v0.1', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/chain_block.jsonnet b/infrastructure/grafana/dashboards/chain_block.jsonnet new file mode 100644 index 0000000000..65236039c9 --- /dev/null +++ b/infrastructure/grafana/dashboards/chain_block.jsonnet @@ -0,0 +1,26 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.chain.block.find_block_by_height_or_hash", + "sql.chain.block.get_last_verified_confirmed_block", + "sql.chain.block.load_storage_pending_block", + "sql.chain.block.execute_operation", + "sql.chain.block.get_block", + "sql.chain.block.get_block_executed_ops", + "sql.chain.block.get_block_operations", + "sql.chain.block.get_block_transactions", + "sql.chain.block.get_last_committed_block", + "sql.chain.block.get_last_verified_block", + "sql.chain.block.get_storage_block", + "sql.chain.block.load_block_range", + "sql.chain.block.load_commit_op", + "sql.chain.block.load_pending_block", + "sql.chain.block.load_pending_block", + "sql.chain.block.save_block", + "sql.chain.block.save_block_transactions", + "sql.chain.block.store_account_tree_cache", +]; + +G.dashboard( + 'Metrics / sql / chain / block', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/chain_mempool.jsonnet b/infrastructure/grafana/dashboards/chain_mempool.jsonnet new file mode 100644 index 0000000000..a985091df2 --- /dev/null +++ b/infrastructure/grafana/dashboards/chain_mempool.jsonnet @@ -0,0 +1,14 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.chain.mempool.collect_garbage", + "sql.chain.mempool.insert_batch", + "sql.chain.mempool.insert_tx", + "sql.chain.mempool.load_txs", + "sql.chain.mempool.remove_tx", + "sql.chain.mempool.remove_txs", +]; + +G.dashboard( + 'Metrics / sql / chain / mempool', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/chain_misc.jsonnet b/infrastructure/grafana/dashboards/chain_misc.jsonnet new file mode 100644 index 0000000000..014cfd3629 --- /dev/null +++ b/infrastructure/grafana/dashboards/chain_misc.jsonnet @@ -0,0 +1,14 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.chain.account.account_state_by_address", + "sql.chain.account.get_account_and_last_block", + "sql.chain.account.last_committed_state_for_account", + "sql.chain.account.last_verified_state_for_account", + "sql.chain.stats.count_outstanding_proofs", + "sql.chain.stats.count_total_transactions", +]; + +G.dashboard( + 'Metrics / sql / chain / account & stats', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/chain_operations.jsonnet b/infrastructure/grafana/dashboards/chain_operations.jsonnet new file mode 100644 index 0000000000..e9bf17ef29 --- /dev/null +++ b/infrastructure/grafana/dashboards/chain_operations.jsonnet @@ -0,0 +1,27 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.chain.operations.add_complete_withdrawals_transaction", + "sql.chain.operations.add_pending_withdrawal", + "sql.chain.operations.eth_tx_for_withdrawal", + "sql.chain.operations.get_executed_operation", + "sql.chain.operations.get_executed_priority_operation", + "sql.chain.operations.get_executed_priority_operation_by_hash", + "sql.chain.operations.get_last_block_by_action", + "sql.chain.operations.store_executed_priority_op", + "sql.chain.operations.confirm_operation", + "sql.chain.operations.get_operation", + "sql.chain.operations.store_executed_tx", + "sql.chain.operations.store_operation", + "sql.chain.operations_ext.account_created_on", + "sql.chain.operations_ext.find_priority_op_by_hash", + "sql.chain.operations_ext.get_account_transactions_history", + "sql.chain.operations_ext.get_account_transactions_history_from", + "sql.chain.operations_ext.get_priority_op_receipt", + "sql.chain.operations_ext.find_tx_by_hash", + "sql.chain.operations_ext.tx_receipt", +]; + +G.dashboard( + 'Metrics / sql / chain / operations', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/chain_state.jsonnet b/infrastructure/grafana/dashboards/chain_state.jsonnet new file mode 100644 index 0000000000..d757077cac --- /dev/null +++ b/infrastructure/grafana/dashboards/chain_state.jsonnet @@ -0,0 +1,14 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.chain.state.apply_state_update", + "sql.chain.state.commit_state_update", + "sql.chain.state.load_committed_state", + "sql.chain.state.load_state_diff", + "sql.chain.state.load_state_diff", + "sql.chain.state.load_verified_state", +]; + +G.dashboard( + 'Metrics / sql / chain / state', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/data_restore.jsonnet b/infrastructure/grafana/dashboards/data_restore.jsonnet new file mode 100644 index 0000000000..4ce742fb61 --- /dev/null +++ b/infrastructure/grafana/dashboards/data_restore.jsonnet @@ -0,0 +1,17 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.data_restore.load_last_watched_block_number", + "sql.data_restore.update_last_watched_block_number", + "sql.data_restore.initialize_eth_stats", + "sql.data_restore.load_events_state", + "sql.data_restore.load_rollup_ops_blocks", + "sql.data_restore.load_storage_state", + "sql.data_restore.save_rollup_ops", + "sql.data_restore.update_block_events", + "sql.data_restore.update_storage_state", +]; + +G.dashboard( + 'Metrics / data_restore', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/eth.jsonnet b/infrastructure/grafana/dashboards/eth.jsonnet new file mode 100644 index 0000000000..01723e9c38 --- /dev/null +++ b/infrastructure/grafana/dashboards/eth.jsonnet @@ -0,0 +1,15 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "eth_watcher.get_complete_withdrawals_event", + "eth_watcher.get_priority_op_events_with_blocks", + "eth_watcher.get_priority_op_events", + "eth_watcher.poll_eth_node", + "eth_sender.load_new_operations", + "eth_sender.perform_commitment_step", + "eth_sender.proceed_next_operations", +]; + +G.dashboard( + 'Metrics / eth_sender & eth_watcher', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/event_fetcher.jsonnet b/infrastructure/grafana/dashboards/event_fetcher.jsonnet new file mode 100644 index 0000000000..92d15c211b --- /dev/null +++ b/infrastructure/grafana/dashboards/event_fetcher.jsonnet @@ -0,0 +1,14 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "api.event_fetcher.last_committed_block", + "api.event_fetcher.last_verified_block", + "api.event_fetcher.load_operation", + "api.event_fetcher.load_pending_block", + "api.event_fetcher.send_operations", + "api.event_fetcher.update_pending_block", +]; + +G.dashboard( + 'Metrics / event_fetcher', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/misc.jsonnet b/infrastructure/grafana/dashboards/misc.jsonnet new file mode 100644 index 0000000000..cb2c58d8d5 --- /dev/null +++ b/infrastructure/grafana/dashboards/misc.jsonnet @@ -0,0 +1,18 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + 'committer.commit_block', + 'committer.save_pending_block', + 'witness_generator.prepare_witness_and_save_it', + 'witness_generator.load_account_tree', + 'root_hash', + 'mempool.propose_new_block', + 'signature_checker.verify_eth_signature_single_tx', + 'signature_checker.verify_eth_signature_txs_batch', + "sql.load_config", + "sql.connection_acquire", +]; + +G.dashboard( + 'Metrics / miscellaneous', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/notifier.jsonnet b/infrastructure/grafana/dashboards/notifier.jsonnet new file mode 100644 index 0000000000..03a8563a79 --- /dev/null +++ b/infrastructure/grafana/dashboards/notifier.jsonnet @@ -0,0 +1,18 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "api.notifier.add_account_update_sub", + "api.notifier.add_priority_op_sub", + "api.notifier.add_transaction_sub", + "api.notifier.handle_executed_operations", + "api.notifier.handle_new_block", + "api.notifier.get_tx_receipt", + "api.notifier.get_block_info", + "api.notifier.get_executed_priority_operation", + "api.notifier.get_account_info", + "api.notifier.get_account_state", +]; + +G.dashboard( + 'Metrics / notifier', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/sql_ethereum.jsonnet b/infrastructure/grafana/dashboards/sql_ethereum.jsonnet new file mode 100644 index 0000000000..f5f4a4e0f9 --- /dev/null +++ b/infrastructure/grafana/dashboards/sql_ethereum.jsonnet @@ -0,0 +1,19 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.ethereum.add_hash_entry", + "sql.ethereum.confirm_eth_tx", + "sql.ethereum.get_eth_op_id", + "sql.ethereum.get_next_nonce", + "sql.ethereum.initialize_eth_data", + "sql.ethereum.load_unconfirmed_operations", + "sql.ethereum.load_unprocessed_operations", + "sql.ethereum.report_created_operation", + "sql.ethereum.save_new_eth_tx", + "sql.ethereum.update_eth_tx", + "sql.ethereum.update_gas_price", +]; + +G.dashboard( + 'Metrics / sql / ethereum', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/sql_prover.jsonnet b/infrastructure/grafana/dashboards/sql_prover.jsonnet new file mode 100644 index 0000000000..3cc7899d2b --- /dev/null +++ b/infrastructure/grafana/dashboards/sql_prover.jsonnet @@ -0,0 +1,20 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.prover.get_existing_prover_run", + "sql.prover.get_witness", + "sql.prover.load_proof", + "sql.prover.pending_jobs_count", + "sql.prover.prover_by_id", + "sql.prover.prover_run_for_next_commit", + "sql.prover.record_prover_is_working", + "sql.prover.record_prover_stop", + "sql.prover.register_prover", + "sql.prover.store_proof", + "sql.prover.store_witness", + "sql.prover.unstarted_jobs_count", +]; + +G.dashboard( + 'Metrics / sql / prover', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/sql_token.jsonnet b/infrastructure/grafana/dashboards/sql_token.jsonnet new file mode 100644 index 0000000000..012a67f3a1 --- /dev/null +++ b/infrastructure/grafana/dashboards/sql_token.jsonnet @@ -0,0 +1,14 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "sql.token.get_count", + "sql.token.get_historical_ticker_price", + "sql.token.get_token", + "sql.token.load_tokens", + "sql.token.store_token", + "sql.token.update_historical_ticker_price", +]; + +G.dashboard( + 'Metrics / sql / token', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/state.jsonnet b/infrastructure/grafana/dashboards/state.jsonnet new file mode 100644 index 0000000000..14c1283358 --- /dev/null +++ b/infrastructure/grafana/dashboards/state.jsonnet @@ -0,0 +1,16 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "state.change_pubkey", + "state.deposit", + "state.forced_exit", + "state.full_exit", + "state.transfer", + "state.transfer_to_new", + "state.transfer_to_self", + "state.withdraw", +]; + +G.dashboard( + 'Metrics / plasma (state)', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/state_keeper.jsonnet b/infrastructure/grafana/dashboards/state_keeper.jsonnet new file mode 100644 index 0000000000..14fa75cc14 --- /dev/null +++ b/infrastructure/grafana/dashboards/state_keeper.jsonnet @@ -0,0 +1,16 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "state_keeper.apply_batch", + "state_keeper.apply_priority_op", + "state_keeper.apply_tx", + // "state_keeper.create_genesis_block", + "state_keeper.execute_proposed_block", + // "state_keeper.initialize", + "state_keeper.seal_pending_block", + "state_keeper.store_pending_block", +]; + +G.dashboard( + 'Metrics / state_keeper', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/dashboards/statistics.jsonnet b/infrastructure/grafana/dashboards/statistics.jsonnet new file mode 100644 index 0000000000..2c8939dd6c --- /dev/null +++ b/infrastructure/grafana/dashboards/statistics.jsonnet @@ -0,0 +1,25 @@ +local G = import '../generator.libsonnet'; + +local gauge(title, metric) = + G.grafana.statPanel.new( + title, + datasource = 'Prometheus', + reducerFunction = 'last' + ).addTarget( + G.grafana.prometheus.target( + metric, + legendFormat = '{{namespace}}' + ) + ) + { gridPos: { h: G.height, w: G.width } }; + +G.dashboard( + 'Metrics / statistics', + [ + gauge('COMMIT not confirmed operations', 'count_operations{action="COMMIT", confirmed="false"}'), + gauge('VERIFY not confirmed operations', 'count_operations{action="VERIFY", confirmed="false"}'), + gauge('COMMIT confirmed operations', 'count_operations{action="COMMIT", confirmed="true"}'), + gauge('VERIFY confirmed operations', 'count_operations{action="VERIFY", confirmed="true"}'), + gauge('Transaction batch sizes', 'tx_batch_size'), + ] +) + diff --git a/infrastructure/grafana/dashboards/ticker.jsonnet b/infrastructure/grafana/dashboards/ticker.jsonnet new file mode 100644 index 0000000000..857adb8c36 --- /dev/null +++ b/infrastructure/grafana/dashboards/ticker.jsonnet @@ -0,0 +1,12 @@ +local G = import '../generator.libsonnet'; +local metrics = [ + "ticker.get_gas_price_wei", + "ticker.get_historical_ticker_price", + "ticker.get_last_quote", + "ticker.get_token", +]; + +G.dashboard( + 'Metrics / ticker', + [ G.panel(metric) for metric in metrics ] +) diff --git a/infrastructure/grafana/generate.sh b/infrastructure/grafana/generate.sh new file mode 100755 index 0000000000..9c7857ac58 --- /dev/null +++ b/infrastructure/grafana/generate.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +if ! [ -d grafonnet-lib ]; then + git clone https://github.com/grafana/grafonnet-lib +fi + +mkdir -p build + +# AUTH must be in the form `login:password` +# We should move to using API Keys later +[ -z "$AUTH" ] && echo 'Set $AUTH to deploy dashboards' + +for template in $(ls dashboards); do + dashboard=$(basename $template net) + # check if source is newer than target, otherwise we don't have to do anything + [ "build/$dashboard" -nt "dashboards/$template" ] && continue + echo -n "Building $template ... " + jsonnet dashboards/$template > build/$dashboard + echo Done + [ -z "$AUTH" ] && continue + echo -n "Deploying $dashboard ... " + curl -X POST -H "Content-Type: application/json" \ + -d "$(jq '{"folderId": 0, "overwrite": true, "dashboard": .}' build/$dashboard)" \ + https://$AUTH@grafana.test.zksync.dev/api/dashboards/db 2> /dev/null | jq .status +done diff --git a/infrastructure/grafana/generator.libsonnet b/infrastructure/grafana/generator.libsonnet new file mode 100644 index 0000000000..f0dcd927d3 --- /dev/null +++ b/infrastructure/grafana/generator.libsonnet @@ -0,0 +1,27 @@ +{ + grafana:: import 'grafonnet-lib/grafonnet/grafana.libsonnet', + width:: 1337, + height:: 10, + + panel(metric, span = '1h'):: + local formatted = std.strReplace(metric, '.', '_'); + $.grafana.graphPanel.new( + title = metric, + datasource = 'Prometheus', + ).addTarget( + $.grafana.prometheus.target( + 'rate(%s_sum[%s]) / rate(%s_count[%s])' + % [formatted, span, formatted, span], + legendFormat = '{{namespace}}' + ) + ) + { gridPos: { h: $.height, w: $.width } }, + + dashboard(title = '', panels = []):: + $.grafana.dashboard.new( + title, + schemaVersion = 18, + editable = true, + refresh = '1m', + tags = ['prometheus'] + ).addPanels(panels) +} diff --git a/infrastructure/sdk-test-vector-generator/.gitignore b/infrastructure/sdk-test-vector-generator/.gitignore new file mode 100644 index 0000000000..8b7ec87e5b --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/.gitignore @@ -0,0 +1 @@ +test-vectors.json diff --git a/infrastructure/sdk-test-vector-generator/README.md b/infrastructure/sdk-test-vector-generator/README.md new file mode 100644 index 0000000000..7d15f762e2 --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/README.md @@ -0,0 +1,13 @@ +# zkSync SDK test vector generator + +An utility to generate deterministic test vectors for various SDK. By having all the SDK share the same test vectors, +it's easier to ensure that behavior of all the implementations is correct and consistent. + +## Launching + +```bash +yarn +yarn generate +``` + +Result test vector will be created in the package directory. Output file name is `test-vectors.json`. diff --git a/infrastructure/sdk-test-vector-generator/package.json b/infrastructure/sdk-test-vector-generator/package.json new file mode 100644 index 0000000000..cf27f4008d --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/package.json @@ -0,0 +1,17 @@ +{ + "name": "sdk-test-vector-generator", + "version": "1.0.0", + "description": "Generator for test vectors for various zkSync SDK", + "repository": "https://github.com/matter-labs/zksync", + "author": "Matter Labs team", + "license": "MIT", + "private": false, + "dependencies": { + "ethers": "^5.0.19", + "ts-node": "^9.0.0", + "zksync": "link:../../sdk/zksync.js" + }, + "scripts": { + "generate": "yarn ts-node src/main.ts" + } +} diff --git a/infrastructure/sdk-test-vector-generator/src/main.ts b/infrastructure/sdk-test-vector-generator/src/main.ts new file mode 100644 index 0000000000..fc3bb13f4d --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/src/main.ts @@ -0,0 +1,26 @@ +// Generator for test vectors to be used by various SDK +import * as fs from 'fs'; + +import { generateCryptoTestVectors } from './vectors/crypto-vector'; +import { generateTxEncodingVectors } from './vectors/tx-vector'; +import { generateUtilsVectors } from './vectors/utils-vector'; + +export async function generateSDKTestVectors(outputFile: string = 'test-vectors.json') { + const cryptoVectors = await generateCryptoTestVectors(); + const txVectors = await generateTxEncodingVectors(); + const utilsVectors = generateUtilsVectors(); + + const resultTestVector = { + cryptoPrimitivesTest: cryptoVectors, + txTest: txVectors, + utils: utilsVectors + }; + + const testVectorJSON = JSON.stringify(resultTestVector, null, 2); + + fs.writeFileSync(outputFile, testVectorJSON); +} + +(async () => { + await generateSDKTestVectors(); +})(); diff --git a/infrastructure/sdk-test-vector-generator/src/types.ts b/infrastructure/sdk-test-vector-generator/src/types.ts new file mode 100644 index 0000000000..82f7dde045 --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/src/types.ts @@ -0,0 +1,11 @@ +import * as zksync from 'zksync'; + +export interface TestVectorEntry { + inputs: any; + outputs: any; +} + +export interface TestVector { + description: string; + items: T[]; +} diff --git a/infrastructure/sdk-test-vector-generator/src/utils.ts b/infrastructure/sdk-test-vector-generator/src/utils.ts new file mode 100644 index 0000000000..9a47b165df --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/src/utils.ts @@ -0,0 +1,11 @@ +/** + * Generates an filled data array. + */ +export function generateArray(length: number): Uint8Array { + const data = new Uint8Array(length); + for (let i = 0; i < length; i++) { + data[i] = i % 255; + } + + return data; +} diff --git a/infrastructure/sdk-test-vector-generator/src/vectors/crypto-vector.ts b/infrastructure/sdk-test-vector-generator/src/vectors/crypto-vector.ts new file mode 100644 index 0000000000..920a21d503 --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/src/vectors/crypto-vector.ts @@ -0,0 +1,53 @@ +import { TestVector, TestVectorEntry } from '../types'; +import * as zksync from 'zksync'; +import { utils } from 'ethers'; +import { generateArray } from '../utils'; + +/** + * Interface for the crypto primitives test vector. + */ +export interface CryptoPrimitivesTestEntry extends TestVectorEntry { + inputs: { + // Seed to generate private key. + seed: string; + // Message to be signed. + message: string; + }; + outputs: { + // Private key to be obtained from seed. + privateKey: string; + // Hash of a public key corresponding to the generated private key. + pubKeyHash: string; + // Signature obtained using private key and message. + signature: string; + }; +} + +/** + * Returns the test vector to generate cryptographic primitives. + * All the data fields are represented in a hexadecimal form. + */ +export async function generateCryptoTestVectors(): Promise> { + const seed = generateArray(32); + const bytesToSign = generateArray(64); + + const privateKey = await zksync.crypto.privateKeyFromSeed(seed); + const { pubKey, signature } = await zksync.crypto.signTransactionBytes(privateKey, bytesToSign); + + const item = { + inputs: { + seed: utils.hexlify(seed), + message: utils.hexlify(bytesToSign) + }, + outputs: { + privateKey: utils.hexlify(privateKey), + pubKeyHash: pubKey, + signature: signature + } + }; + + return { + description: 'Contains the seed for private key and the message for signing', + items: [item] + }; +} diff --git a/infrastructure/sdk-test-vector-generator/src/vectors/tx-vector.ts b/infrastructure/sdk-test-vector-generator/src/vectors/tx-vector.ts new file mode 100644 index 0000000000..a2d6e36a03 --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/src/vectors/tx-vector.ts @@ -0,0 +1,220 @@ +import { utils } from 'ethers'; +import * as ethers from 'ethers'; +import * as zksync from 'zksync'; +import { TestVector, TestVectorEntry } from '../types'; +import { generateArray } from '../utils'; + +/** + * Interface for the transactions test vector. + */ +export interface TxTestEntry extends TestVectorEntry { + inputs: { + // Type of transaction. Valid values are: `Transfer`, `Withdraw`, `ChangePubKey`, `ForcedExit`. + type: string; + // Ethereum private key. zkSync private key should be derived from it. + ethPrivateKey: string; + // Transaction-specific input. + data: any; + // Transactin-specific input to generate Ethereum signature. + // Can be `null` if Ethereum signature is not required for transaction + ethSignData: any | null; + }; + outputs: { + // Encoded transaction bytes to be used for signing. + signBytes: string; + // Transaction zkSync signature. + signature: zksync.types.Signature; + // Message to be used to provie Ethereum signature. `null` if `inputs.ethSignData` is `null`. + ethSignMessage: string | null; + // Ethereum signature for a transaction. `null` if `inputs.ethSignData` is `null`. + ethSignature: string | null; + }; +} + +/** + * Returns the test vector containing the transaction input data and the outputs: encoded transaction bytes, + * message for Ethereum signature, and both zkSync and Ethereum signatures. + * All the byte array data fields are represented in a hexadecimal form. + */ +export async function generateTxEncodingVectors(): Promise> { + const ethPrivateKey = generateArray(32); + const ethSigner = new ethers.Wallet(ethPrivateKey); + const { signer } = await zksync.Signer.fromETHSignature(ethSigner); + const ethMessageSigner = new zksync.EthMessageSigner(ethSigner, { + verificationMethod: 'ECDSA', + isSignedMsgPrefixed: true + }); + + const transferItem = await getTransferSignVector(ethPrivateKey, signer, ethMessageSigner); + const changePubKeyItem = await getChangePubKeySignVector(ethPrivateKey, signer, ethMessageSigner); + const withdrawItem = await getWithdrawSignVector(ethPrivateKey, signer, ethMessageSigner); + const forcedExitItem = await getForcedExitSignVector(ethPrivateKey, signer); + + const items = [transferItem, changePubKeyItem, withdrawItem, forcedExitItem]; + + return { + description: 'Contains various zkSync transactions as inputs and zkSync and Ethereum signature data as outputs', + items: items + }; +} + +async function getTransferSignVector( + ethPrivateKey: Uint8Array, + signer: zksync.Signer, + ethMessageSigner: zksync.EthMessageSigner +): Promise { + const transferData = { + accountId: 44, + from: '0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045', + to: '0x19aa2ed8712072e918632259780e587698ef58df', + tokenId: 0, + amount: '1000000000000', + fee: '1000000', + nonce: 12 + }; + const transferSignBytes = signer.transferSignBytes(transferData); + const transferSignature = (await signer.signSyncTransfer(transferData)).signature; + const transferEthSignInput = { + stringAmount: '1000000000000', + stringToken: 'ETH', + stringFee: '1000000', + to: transferData.to, + accountId: transferData.accountId, + nonce: transferData.nonce + }; + const transferEthSignMessage = ethMessageSigner.getTransferEthSignMessage(transferEthSignInput); + const transferEthSignature = await ethMessageSigner.ethSignTransfer(transferEthSignInput); + + const transferItem = { + inputs: { + type: 'Transfer', + ethPrivateKey: utils.hexlify(ethPrivateKey), + data: transferData, + ethSignData: transferEthSignInput + }, + outputs: { + signBytes: utils.hexlify(transferSignBytes), + signature: transferSignature, + ethSignMessage: transferEthSignMessage, + ethSignature: transferEthSignature.signature + } + }; + + return transferItem; +} + +async function getChangePubKeySignVector( + ethPrivateKey: Uint8Array, + signer: zksync.Signer, + ethMessageSigner: zksync.EthMessageSigner +): Promise { + const changePubKeyData = { + accountId: 55, + account: '0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045', + newPkHash: await signer.pubKeyHash(), + feeTokenId: 0, + fee: '1000000000', + nonce: 13 + }; + const changePubKeySignBytes = signer.changePubKeySignBytes(changePubKeyData); + const changePubKeySignature = (await signer.signSyncChangePubKey(changePubKeyData)).signature; + const changePubKeyEthSignInput = { + pubKeyHash: changePubKeyData.newPkHash, + accountId: changePubKeyData.accountId, + nonce: changePubKeyData.nonce + }; + const changePubKeyEthSignMessage = ethMessageSigner.getChangePubKeyEthSignMessage(changePubKeyEthSignInput); + const changePubKeyEthSignature = await ethMessageSigner.ethSignChangePubKey(changePubKeyEthSignInput); + + const changePubKeyItem = { + inputs: { + type: 'ChangePubKey', + ethPrivateKey: utils.hexlify(ethPrivateKey), + data: changePubKeyData, + ethSignData: changePubKeyEthSignInput + }, + outputs: { + signBytes: utils.hexlify(changePubKeySignBytes), + signature: changePubKeySignature, + ethSignMessage: changePubKeyEthSignMessage, + ethSignature: changePubKeyEthSignature.signature + } + }; + + return changePubKeyItem; +} + +async function getWithdrawSignVector( + ethPrivateKey: Uint8Array, + signer: zksync.Signer, + ethMessageSigner: zksync.EthMessageSigner +): Promise { + const withdrawData = { + accountId: 44, + from: '0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045', + ethAddress: '0x19aa2ed8712072e918632259780e587698ef58df', + tokenId: 0, + amount: '1000000000000', + fee: '1000000', + nonce: 12 + }; + const withdrawSignBytes = signer.withdrawSignBytes(withdrawData); + const withdrawSignature = (await signer.signSyncWithdraw(withdrawData)).signature; + const withdrawEthSignInput = { + stringAmount: '1000000000000', + stringToken: 'ETH', + stringFee: '1000000', + ethAddress: withdrawData.ethAddress, + accountId: withdrawData.accountId, + nonce: withdrawData.nonce + }; + const withdrawEthSignMessage = ethMessageSigner.getWithdrawEthSignMessage(withdrawEthSignInput); + const withdrawEthSignature = await ethMessageSigner.ethSignWithdraw(withdrawEthSignInput); + + const withdrawItem = { + inputs: { + type: 'Withdraw', + ethPrivateKey: utils.hexlify(ethPrivateKey), + data: withdrawData, + ethSignData: withdrawEthSignInput + }, + outputs: { + signBytes: utils.hexlify(withdrawSignBytes), + signature: withdrawSignature, + ethSignMessage: withdrawEthSignMessage, + ethSignature: withdrawEthSignature.signature + } + }; + + return withdrawItem; +} + +async function getForcedExitSignVector(ethPrivateKey: Uint8Array, signer: zksync.Signer): Promise { + const forcedExitData = { + initiatorAccountId: 44, + from: '0xcdb6aaa2607df186f7dd2d8eb4ee60f83720b045', + target: '0x19aa2ed8712072e918632259780e587698ef58df', + tokenId: 0, + fee: '1000000', + nonce: 12 + }; + const forcedExitSignBytes = signer.forcedExitSignBytes(forcedExitData); + const forcedExitSignature = (await signer.signSyncForcedExit(forcedExitData)).signature; + + const forcedExitItem = { + inputs: { + type: 'ForcedExit', + ethPrivateKey: utils.hexlify(ethPrivateKey), + data: forcedExitData, + ethSignData: null + }, + outputs: { + signBytes: utils.hexlify(forcedExitSignBytes), + signature: forcedExitSignature, + ethSignMessage: null, + ethSignature: null + } + }; + + return forcedExitItem; +} diff --git a/infrastructure/sdk-test-vector-generator/src/vectors/utils-vector.ts b/infrastructure/sdk-test-vector-generator/src/vectors/utils-vector.ts new file mode 100644 index 0000000000..533e613d65 --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/src/vectors/utils-vector.ts @@ -0,0 +1,143 @@ +import * as zksync from 'zksync'; +import { TestVector, TestVectorEntry } from '../types'; +import { utils } from 'ethers'; + +/** + * Utilities test vector consist of several independent test vectors. + */ +export interface UtilsVectors { + amountPacking: TestVector; + feePacking: TestVector; + tokenFormatting: TestVector; +} + +/** + * Test vector for packability checks. + */ +export interface PackingTestEntry extends TestVectorEntry { + inputs: { + // Value that should be checked to be packable. + value: string; + }; + outputs: { + // Whether provided amount is packable or not. + packable: boolean; + // Closest packable value. May be the same as `inputs.value` if value is packable. + closestPackable: string; + // Closest packable value packed into an byte array. Represented as a hexadecimal string. + packedValue: string; + }; +} + +/** + * Test vector for token formatting. + * Token formatting must be the same, since this algorithm is used in the Ethereum signature messages. + */ +export interface TokenFormattingEntry extends TestVectorEntry { + inputs: { + // Token symbol, e.g. "ETH" + token: string; + // Amount of token decimals, e.g. 6 or 18 + decimals: number; + // Amount of token as a string. + amount: string; + }; + outputs: { + // Formatted string, e.g. `0.001 ETH`. + formatted: string; + }; +} + +export function generateUtilsVectors(): UtilsVectors { + const amountPacking = generateAmountPackingVector(); + const feePacking = generateFeePackingVector(); + const tokenFormatting = generateTokenFormattingVector(); + + return { + amountPacking, + feePacking, + tokenFormatting + }; +} + +function generateFeePackingVector(): TestVector { + const test_vector = ['0', '1000', '1111', '474732833474', '474732833400', '10000000000000']; + const items = []; + for (let value of test_vector) { + const packable = zksync.utils.isTransactionFeePackable(value); + const closestPackable = zksync.utils.closestPackableTransactionFee(value); + const packed = zksync.utils.packFeeChecked(closestPackable); + items.push({ + inputs: { + value + }, + outputs: { + packable, + closestPackable: closestPackable.toString(), + packedValue: utils.hexlify(packed) + } + }); + } + + const vector = { + description: 'Checks for fee packing', + items + }; + + return vector; +} + +function generateAmountPackingVector(): TestVector { + const testVector = ['0', '1000', '1111', '474732833474', '474732833400', '10000000000000']; + const items = []; + for (const value of testVector) { + const packable = zksync.utils.isTransactionAmountPackable(value); + const closestPackable = zksync.utils.closestPackableTransactionAmount(value); + const packed = zksync.utils.packAmountChecked(closestPackable); + items.push({ + inputs: { + value + }, + outputs: { + packable, + closestPackable: closestPackable.toString(), + packedValue: utils.hexlify(packed) + } + }); + } + + const vector = { + description: 'Checks for amount packing', + items + }; + + return vector; +} + +function generateTokenFormattingVector(): TestVector { + const testVector = [ + { token: 'NNM', decimals: 0, amount: '1000000000000000100000', formatted: '1000000000000000100000.0 NNM' }, + { token: 'DAI', decimals: 6, amount: '1000000', formatted: '1.0 DAI' }, + { token: 'ZRO', decimals: 11, amount: '0', formatted: '0.0 ZRO' }, + { token: 'ETH', decimals: 18, amount: '1000000000000000100000', formatted: '1000.0000000000001 ETH' } + ]; + + const items = []; + for (const value of testVector) { + items.push({ + inputs: { + token: value.token, + decimals: value.decimals, + amount: value.amount + }, + outputs: { + formatted: value.formatted + } + }); + } + + return { + description: 'Checks for token amount formatting', + items + }; +} diff --git a/infrastructure/sdk-test-vector-generator/tsconfig.json b/infrastructure/sdk-test-vector-generator/tsconfig.json new file mode 100644 index 0000000000..6c8907a860 --- /dev/null +++ b/infrastructure/sdk-test-vector-generator/tsconfig.json @@ -0,0 +1,9 @@ +{ + "compilerOptions": { + "target": "es2019", + "module": "commonjs", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true + } +} diff --git a/infrastructure/zcli/test/commands.test.ts b/infrastructure/zcli/test/commands.test.ts index c788c3581f..2859d50f7a 100644 --- a/infrastructure/zcli/test/commands.test.ts +++ b/infrastructure/zcli/test/commands.test.ts @@ -1,6 +1,7 @@ import { expect, use } from 'chai'; import chaiAsPromised from 'chai-as-promised'; import fs from 'fs'; +import * as path from 'path'; import mock from 'mock-fs'; import type { Network, Config } from '../src/types'; import * as ethers from 'ethers'; @@ -10,6 +11,9 @@ import { saveConfig, loadConfig, configLocation, DEFAULT_CONFIG } from '../src/c use(chaiAsPromised); +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); + describe('Fetching Information', () => { let ethDepositor: string; let alice: ethers.Wallet; @@ -21,7 +25,7 @@ describe('Fetching Information', () => { before('make some deposits & transactions', async () => { const ethProvider = new ethers.providers.JsonRpcProvider(); const syncProvider = await zksync.getDefaultProvider('localhost', 'HTTP'); - const ethWallet = ethers.Wallet.fromMnemonic(process.env.TEST_MNEMONIC as string, "m/44'/60'/0'/0/0").connect( + const ethWallet = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/0").connect( ethProvider ); ethDepositor = ethWallet.address; @@ -255,7 +259,7 @@ describe('Config Management', () => { }); describe('Making Transactions', () => { - const rich = ethers.Wallet.fromMnemonic(process.env.TEST_MNEMONIC as string, "m/44'/60'/0'/0/0"); + const rich = ethers.Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, "m/44'/60'/0'/0/0"); const poor1 = ethers.Wallet.createRandom(); const poor2 = ethers.Wallet.createRandom(); diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index a1db422125..eb2ef74e79 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -1,7 +1,18 @@ import { Command } from 'commander'; import * as utils from './utils'; -const IGNORED = ['target', 'node_modules', 'volumes', 'build', 'dist', '.git']; +const IGNORED = [ + 'target', + 'node_modules', + 'volumes', + 'build', + 'dist', + '.git', + // Below are generated contracts. + 'generated', + 'KeysWithPlonkVerifier.sol', + 'TokenInit.sol' +]; const EXTENSIONS = ['ts', 'md', 'sol']; // If you wonder why this is written so obscurely through find and not through .prettierignore and globs, diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index 510bfa56fd..a3dd63ce00 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -2,6 +2,7 @@ import { Command } from 'commander'; import * as utils from '../utils'; import { Wallet } from 'ethers'; import fs from 'fs'; +import * as path from 'path'; import * as verifyKeys from './verify-keys'; import * as dataRestore from './data-restore'; @@ -81,11 +82,13 @@ export async function catLogs(exitCode?: number) { } export async function testAccounts() { + const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); + const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); const NUM_TEST_WALLETS = 10; const baseWalletPath = "m/44'/60'/0'/0/"; const walletKeys = []; for (let i = 0; i < NUM_TEST_WALLETS; ++i) { - const ethWallet = Wallet.fromMnemonic(process.env.TEST_MNEMONIC as string, baseWalletPath + i); + const ethWallet = Wallet.fromMnemonic(ethTestConfig.test_mnemonic as string, baseWalletPath + i); walletKeys.push({ address: ethWallet.address, privateKey: ethWallet.privateKey diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index 7f60412e3a..847137aa63 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -82,7 +82,7 @@ command .option('--no-reset', 'do not reset the database before test starting') .allowUnknownOption() .action(async (cmd: Command, options: string[] | undefined) => { - await db(!cmd.reset, ...(options || [])); + await db(cmd.reset, ...(options || [])); }); command diff --git a/package.json b/package.json index 312a260ff2..43cfb6c757 100644 --- a/package.json +++ b/package.json @@ -9,6 +9,7 @@ "contracts", "infrastructure/analytics", "infrastructure/fee-seller", + "infrastructure/sdk-test-vector-generator", "infrastructure/zcli", "infrastructure/explorer", "infrastructure/zk", diff --git a/sdk/zksync-crypto/Cargo.toml b/sdk/zksync-crypto/Cargo.toml index c0e9141176..fa1766a5d7 100644 --- a/sdk/zksync-crypto/Cargo.toml +++ b/sdk/zksync-crypto/Cargo.toml @@ -30,7 +30,7 @@ sha2 = "0.8" # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for # code size when deploying. -console_error_panic_hook = { version = "0.1.1", optional = true } +console_error_panic_hook = { version = "0.1.6", optional = true } # `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size # compared to the default allocator's ~10K. It is slower than the default diff --git a/sdk/zksync-crypto/src/lib.rs b/sdk/zksync-crypto/src/lib.rs index 7db4899da3..e7e769466f 100644 --- a/sdk/zksync-crypto/src/lib.rs +++ b/sdk/zksync-crypto/src/lib.rs @@ -36,7 +36,7 @@ use sha2::{Digest, Sha256}; #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; -#[wasm_bindgen] +#[wasm_bindgen(start)] /// This method initializes params for current thread, otherwise they will be initialized when signing /// first message. pub fn zksync_crypto_init() { @@ -46,9 +46,9 @@ pub fn zksync_crypto_init() { } #[wasm_bindgen(js_name = privateKeyFromSeed)] -pub fn private_key_from_seed(seed: &[u8]) -> Vec { +pub fn private_key_from_seed(seed: &[u8]) -> Result, JsValue> { if seed.len() < 32 { - panic!("Seed is too short"); + return Err(JsValue::from_str("Seed is too short")); }; let sha256_bytes = |input: &[u8]| -> Vec { @@ -66,45 +66,47 @@ pub fn private_key_from_seed(seed: &[u8]) -> Vec { .read_be(&raw_priv_key[..]) .expect("failed to read raw_priv_key"); if Fs::from_repr(fs_repr).is_ok() { - return raw_priv_key; + return Ok(raw_priv_key); } else { effective_seed = raw_priv_key; } } } -fn read_signing_key(private_key: &[u8]) -> PrivateKey { +fn read_signing_key(private_key: &[u8]) -> Result, JsValue> { let mut fs_repr = FsRepr::default(); fs_repr .read_be(private_key) - .expect("couldn't read private key repr"); - PrivateKey::(Fs::from_repr(fs_repr).expect("couldn't read private key from repr")) + .map_err(|_| JsValue::from_str("couldn't read private key repr"))?; + Ok(PrivateKey::( + Fs::from_repr(fs_repr).expect("couldn't read private key from repr"), + )) } -fn privkey_to_pubkey_internal(private_key: &[u8]) -> PublicKey { +fn privkey_to_pubkey_internal(private_key: &[u8]) -> Result, JsValue> { let p_g = FixedGenerators::SpendingKeyGenerator; - let sk = read_signing_key(private_key); + let sk = read_signing_key(private_key)?; - JUBJUB_PARAMS.with(|params| PublicKey::from_private(&sk, p_g, params)) + Ok(JUBJUB_PARAMS.with(|params| PublicKey::from_private(&sk, p_g, params))) } #[wasm_bindgen] -pub fn private_key_to_pubkey_hash(private_key: &[u8]) -> Vec { - pub_key_hash(&privkey_to_pubkey_internal(private_key)) +pub fn private_key_to_pubkey_hash(private_key: &[u8]) -> Result, JsValue> { + Ok(pub_key_hash(&privkey_to_pubkey_internal(private_key)?)) } #[wasm_bindgen] -pub fn private_key_to_pubkey(private_key: &[u8]) -> Vec { +pub fn private_key_to_pubkey(private_key: &[u8]) -> Result, JsValue> { let mut pubkey_buf = Vec::with_capacity(PACKED_POINT_SIZE); - let pubkey = privkey_to_pubkey_internal(private_key); + let pubkey = privkey_to_pubkey_internal(private_key)?; pubkey .write(&mut pubkey_buf) .expect("failed to write pubkey to buffer"); - pubkey_buf + Ok(pubkey_buf) } #[wasm_bindgen] @@ -114,11 +116,11 @@ pub fn private_key_to_pubkey(private_key: &[u8]) -> Vec { /// [0..32] - packed public key of signer. /// [32..64] - packed r point of the signature. /// [64..96] - s poing of the signature. -pub fn sign_musig(private_key: &[u8], msg: &[u8]) -> Vec { +pub fn sign_musig(private_key: &[u8], msg: &[u8]) -> Result, JsValue> { let mut packed_full_signature = Vec::with_capacity(PACKED_POINT_SIZE + PACKED_SIGNATURE_SIZE); // let p_g = FixedGenerators::SpendingKeyGenerator; - let private_key = read_signing_key(private_key); + let private_key = read_signing_key(private_key)?; { let public_key = @@ -152,5 +154,5 @@ pub fn sign_musig(private_key: &[u8], msg: &[u8]) -> Vec { "incorrect signature size when signing" ); - packed_full_signature + Ok(packed_full_signature) } diff --git a/sdk/zksync-crypto/src/tests.rs b/sdk/zksync-crypto/src/tests.rs index c88fa7d71d..7c8d0d4cc6 100644 --- a/sdk/zksync-crypto/src/tests.rs +++ b/sdk/zksync-crypto/src/tests.rs @@ -23,7 +23,7 @@ fn gen_private_key_and_its_be_bytes() -> (PrivateKey, Vec) { fn test_private_key_read() { let (zksync_types_pk, serialized_pk) = gen_private_key_and_its_be_bytes(); - let wasm_pk = read_signing_key(&serialized_pk); + let wasm_pk = read_signing_key(&serialized_pk).unwrap(); assert_eq!(ff::to_hex(&wasm_pk.0), ff::to_hex(&zksync_types_pk.0)); } @@ -31,7 +31,7 @@ fn test_private_key_read() { fn test_pubkey_hash() { let (pk, serialized_pk) = gen_private_key_and_its_be_bytes(); - let wasm_pubkey_hash = private_key_to_pubkey_hash(&serialized_pk); + let wasm_pubkey_hash = private_key_to_pubkey_hash(&serialized_pk).unwrap(); let zksync_types_pubkey_hash = PubKeyHash::from_privkey(&pk).data.to_vec(); assert_eq!(wasm_pubkey_hash, zksync_types_pubkey_hash); } @@ -47,7 +47,7 @@ fn test_signature() { for msg_len in &[0, 2, 4, 5, 32, 128] { let msg = random_msg(*msg_len); - let wasm_signature = sign_musig(&serialized_pk, &msg); + let wasm_signature = sign_musig(&serialized_pk, &msg).unwrap(); let wasm_unpacked_signature = TxSignature::deserialize_from_packed_bytes(&wasm_signature) .expect("failed to unpack signature"); diff --git a/sdk/zksync-rs/src/error.rs b/sdk/zksync-rs/src/error.rs index 002ac5c9d8..e4b6fff31d 100644 --- a/sdk/zksync-rs/src/error.rs +++ b/sdk/zksync-rs/src/error.rs @@ -6,7 +6,7 @@ use zksync_eth_signer::error::SignerError; pub enum ClientError { #[error("Network '{0}' is not supported")] NetworkNotSupported(String), - #[error("Unable to decode server response")] + #[error("Unable to decode server response: {0}")] MalformedResponse(String), #[error("RPC error: {0:?}")] RpcError(RpcFailure), diff --git a/sdk/zksync.js/package.json b/sdk/zksync.js/package.json index 4d2d64a286..ebd385354a 100644 --- a/sdk/zksync.js/package.json +++ b/sdk/zksync.js/package.json @@ -1,6 +1,6 @@ { "name": "zksync", - "version": "0.8.1", + "version": "0.8.3", "license": "MIT", "main": "build/index.js", "types": "build/index.d.ts", @@ -8,7 +8,7 @@ "axios": "^0.21.0", "websocket": "^1.0.30", "websocket-as-promised": "^1.1.0", - "zksync-crypto": "^0.4.1" + "zksync-crypto": "^0.4.2" }, "peerDependencies": { "@ethersproject/logger": "^5.0.0", @@ -37,6 +37,7 @@ "tests": "mocha -r ts-node/register tests/**/*.test.ts", "build": "tsc", "watch": "tsc --watch", - "prepublish": "yarn build && rollup -c" + "prepublish": "yarn build && rollup -c", + "generate-test-vectors": "yarn ts-node tests/test-generator.ts" } } diff --git a/sdk/zksync.js/src/eth-message-signer.ts b/sdk/zksync.js/src/eth-message-signer.ts new file mode 100644 index 0000000000..246f294e93 --- /dev/null +++ b/sdk/zksync.js/src/eth-message-signer.ts @@ -0,0 +1,98 @@ +import * as ethers from 'ethers'; +import { TxEthSignature, EthSignerType, PubKeyHash } from './types'; +import { getSignedBytesFromMessage, signMessagePersonalAPI, getChangePubkeyMessage } from './utils'; + +/** + * Wrapper around `ethers.Signer` which provides convenient methods to get and sign messages required for zkSync. + */ +export class EthMessageSigner { + constructor(private ethSigner: ethers.Signer, private ethSignerType?: EthSignerType) {} + + async getEthMessageSignature(message: ethers.utils.BytesLike): Promise { + if (this.ethSignerType == null) { + throw new Error('ethSignerType is unknown'); + } + + const signedBytes = getSignedBytesFromMessage(message, !this.ethSignerType.isSignedMsgPrefixed); + + const signature = await signMessagePersonalAPI(this.ethSigner, signedBytes); + + return { + type: this.ethSignerType.verificationMethod === 'ECDSA' ? 'EthereumSignature' : 'EIP1271Signature', + signature + }; + } + + getTransferEthSignMessage(transfer: { + stringAmount: string; + stringToken: string; + stringFee: string; + to: string; + nonce: number; + accountId: number; + }): string { + const humanReadableTxInfo = + `Transfer ${transfer.stringAmount} ${transfer.stringToken}\n` + + `To: ${transfer.to.toLowerCase()}\n` + + `Nonce: ${transfer.nonce}\n` + + `Fee: ${transfer.stringFee} ${transfer.stringToken}\n` + + `Account Id: ${transfer.accountId}`; + + return humanReadableTxInfo; + } + + async ethSignTransfer(transfer: { + stringAmount: string; + stringToken: string; + stringFee: string; + to: string; + nonce: number; + accountId: number; + }): Promise { + const message = this.getTransferEthSignMessage(transfer); + return await this.getEthMessageSignature(message); + } + + getWithdrawEthSignMessage(withdraw: { + stringAmount: string; + stringToken: string; + stringFee: string; + ethAddress: string; + nonce: number; + accountId: number; + }): string { + const humanReadableTxInfo = + `Withdraw ${withdraw.stringAmount} ${withdraw.stringToken}\n` + + `To: ${withdraw.ethAddress.toLowerCase()}\n` + + `Nonce: ${withdraw.nonce}\n` + + `Fee: ${withdraw.stringFee} ${withdraw.stringToken}\n` + + `Account Id: ${withdraw.accountId}`; + + return humanReadableTxInfo; + } + + async ethSignWithdraw(withdraw: { + stringAmount: string; + stringToken: string; + stringFee: string; + ethAddress: string; + nonce: number; + accountId: number; + }): Promise { + const message = this.getWithdrawEthSignMessage(withdraw); + return await this.getEthMessageSignature(message); + } + + getChangePubKeyEthSignMessage(changePubKey: { pubKeyHash: PubKeyHash; nonce: number; accountId: number }): string { + return getChangePubkeyMessage(changePubKey.pubKeyHash, changePubKey.nonce, changePubKey.accountId); + } + + async ethSignChangePubKey(changePubKey: { + pubKeyHash: PubKeyHash; + nonce: number; + accountId: number; + }): Promise { + const message = this.getChangePubKeyEthSignMessage(changePubKey); + return await this.getEthMessageSignature(message); + } +} diff --git a/sdk/zksync.js/src/index.ts b/sdk/zksync.js/src/index.ts index 7c8aaa3d61..f2a67147a2 100644 --- a/sdk/zksync.js/src/index.ts +++ b/sdk/zksync.js/src/index.ts @@ -2,6 +2,7 @@ import { Wallet } from './wallet'; import { Provider, ETHProxy, getDefaultProvider } from './provider'; import { Signer } from './signer'; import { closestPackableTransactionAmount, closestPackableTransactionFee } from './utils'; +import { EthMessageSigner } from './eth-message-signer'; import * as wallet from './wallet'; import * as types from './types'; @@ -13,6 +14,7 @@ export { Signer, Provider, ETHProxy, + EthMessageSigner, closestPackableTransactionFee, closestPackableTransactionAmount, getDefaultProvider, diff --git a/sdk/zksync.js/src/provider.ts b/sdk/zksync.js/src/provider.ts index ae33a98a3d..0099f2c523 100644 --- a/sdk/zksync.js/src/provider.ts +++ b/sdk/zksync.js/src/provider.ts @@ -11,14 +11,12 @@ import { TokenAddress, TxEthSignature, Fee, - ChangePubKeyFee + ChangePubKeyFee, + Network } from './types'; import { isTokenETH, sleep, SYNC_GOV_CONTRACT_INTERFACE, SYNC_MAIN_CONTRACT_INTERFACE, TokenSet } from './utils'; -export async function getDefaultProvider( - network: 'localhost' | 'rinkeby' | 'ropsten' | 'mainnet', - transport: 'WS' | 'HTTP' = 'WS' -): Promise { +export async function getDefaultProvider(network: Network, transport: 'WS' | 'HTTP' = 'WS'): Promise { if (network === 'localhost') { if (transport === 'WS') { return await Provider.newWebsocketProvider('ws://127.0.0.1:3031'); diff --git a/sdk/zksync.js/src/signer.ts b/sdk/zksync.js/src/signer.ts index 45804499b7..8dcc2aa4e2 100644 --- a/sdk/zksync.js/src/signer.ts +++ b/sdk/zksync.js/src/signer.ts @@ -15,17 +15,17 @@ import { import { Address, EthSignerType, PubKeyHash, Transfer, Withdraw, ForcedExit, ChangePubKey } from './types'; export class Signer { - readonly privateKey: Uint8Array; + readonly #privateKey: Uint8Array; private constructor(privKey: Uint8Array) { - this.privateKey = privKey; + this.#privateKey = privKey; } async pubKeyHash(): Promise { - return await privateKeyToPubKeyHash(this.privateKey); + return await privateKeyToPubKeyHash(this.#privateKey); } - async signSyncTransfer(transfer: { + transferSignBytes(transfer: { accountId: number; from: Address; to: Address; @@ -33,7 +33,7 @@ export class Signer { amount: BigNumberish; fee: BigNumberish; nonce: number; - }): Promise { + }): Uint8Array { const type = new Uint8Array([5]); // tx type const accountId = serializeAccountId(transfer.accountId); const from = serializeAddress(transfer.from); @@ -44,7 +44,20 @@ export class Signer { const nonce = serializeNonce(transfer.nonce); const msgBytes = ethers.utils.concat([type, accountId, from, to, token, amount, fee, nonce]); - const signature = await signTransactionBytes(this.privateKey, msgBytes); + return msgBytes; + } + + async signSyncTransfer(transfer: { + accountId: number; + from: Address; + to: Address; + tokenId: number; + amount: BigNumberish; + fee: BigNumberish; + nonce: number; + }): Promise { + const msgBytes = this.transferSignBytes(transfer); + const signature = await signTransactionBytes(this.#privateKey, msgBytes); return { type: 'Transfer', @@ -59,7 +72,7 @@ export class Signer { }; } - async signSyncWithdraw(withdraw: { + withdrawSignBytes(withdraw: { accountId: number; from: Address; ethAddress: string; @@ -67,7 +80,7 @@ export class Signer { amount: BigNumberish; fee: BigNumberish; nonce: number; - }): Promise { + }): Uint8Array { const typeBytes = new Uint8Array([3]); const accountId = serializeAccountId(withdraw.accountId); const accountBytes = serializeAddress(withdraw.from); @@ -86,7 +99,21 @@ export class Signer { feeBytes, nonceBytes ]); - const signature = await signTransactionBytes(this.privateKey, msgBytes); + + return msgBytes; + } + + async signSyncWithdraw(withdraw: { + accountId: number; + from: Address; + ethAddress: string; + tokenId: number; + amount: BigNumberish; + fee: BigNumberish; + nonce: number; + }): Promise { + const msgBytes = this.withdrawSignBytes(withdraw); + const signature = await signTransactionBytes(this.#privateKey, msgBytes); return { type: 'Withdraw', @@ -101,13 +128,13 @@ export class Signer { }; } - async signSyncForcedExit(forcedExit: { + forcedExitSignBytes(forcedExit: { initiatorAccountId: number; target: Address; tokenId: number; fee: BigNumberish; nonce: number; - }): Promise { + }): Uint8Array { const typeBytes = new Uint8Array([8]); const initiatorAccountIdBytes = serializeAccountId(forcedExit.initiatorAccountId); const targetBytes = serializeAddress(forcedExit.target); @@ -122,7 +149,19 @@ export class Signer { feeBytes, nonceBytes ]); - const signature = await signTransactionBytes(this.privateKey, msgBytes); + + return msgBytes; + } + + async signSyncForcedExit(forcedExit: { + initiatorAccountId: number; + target: Address; + tokenId: number; + fee: BigNumberish; + nonce: number; + }): Promise { + const msgBytes = this.forcedExitSignBytes(forcedExit); + const signature = await signTransactionBytes(this.#privateKey, msgBytes); return { type: 'ForcedExit', initiatorAccountId: forcedExit.initiatorAccountId, @@ -134,14 +173,14 @@ export class Signer { }; } - async signSyncChangePubKey(changePubKey: { + changePubKeySignBytes(changePubKey: { accountId: number; account: Address; newPkHash: PubKeyHash; feeTokenId: number; fee: BigNumberish; nonce: number; - }): Promise { + }): Uint8Array { const typeBytes = new Uint8Array([7]); // Tx type (1 byte) const accountIdBytes = serializeAccountId(changePubKey.accountId); const accountBytes = serializeAddress(changePubKey.account); @@ -158,7 +197,20 @@ export class Signer { feeBytes, nonceBytes ]); - const signature = await signTransactionBytes(this.privateKey, msgBytes); + + return msgBytes; + } + + async signSyncChangePubKey(changePubKey: { + accountId: number; + account: Address; + newPkHash: PubKeyHash; + feeTokenId: number; + fee: BigNumberish; + nonce: number; + }): Promise { + const msgBytes = this.changePubKeySignBytes(changePubKey); + const signature = await signTransactionBytes(this.#privateKey, msgBytes); return { type: 'ChangePubKey', accountId: changePubKey.accountId, diff --git a/sdk/zksync.js/src/types.ts b/sdk/zksync.js/src/types.ts index 950a57f22e..93667600fe 100644 --- a/sdk/zksync.js/src/types.ts +++ b/sdk/zksync.js/src/types.ts @@ -14,6 +14,8 @@ export type TokenAddress = string; export type Nonce = number | 'committed'; +export type Network = 'localhost' | 'rinkeby' | 'ropsten' | 'mainnet'; + export interface AccountState { address: Address; id?: number; diff --git a/sdk/zksync.js/src/wallet.ts b/sdk/zksync.js/src/wallet.ts index 00da3fc6cd..8896b2c7f6 100644 --- a/sdk/zksync.js/src/wallet.ts +++ b/sdk/zksync.js/src/wallet.ts @@ -1,5 +1,6 @@ import { BigNumber, BigNumberish, Contract, ContractTransaction, ethers } from 'ethers'; import { ErrorCode } from '@ethersproject/logger'; +import { EthMessageSigner } from './eth-message-signer'; import { ETHProxy, Provider } from './provider'; import { Signer } from './signer'; import { BatchBuilder } from './batch-builder'; @@ -46,6 +47,7 @@ export class Wallet { private constructor( public ethSigner: ethers.Signer, + public ethMessageSigner: EthMessageSigner, public cachedAddress: Address, public signer?: Signer, public accountId?: number, @@ -72,7 +74,15 @@ export class Wallet { throw new Error('If you passed signer, you must also pass ethSignerType.'); } - const wallet = new Wallet(ethWallet, await ethWallet.getAddress(), signer, accountId, ethSignerType); + const ethMessageSigner = new EthMessageSigner(ethWallet, ethSignerType); + const wallet = new Wallet( + ethWallet, + ethMessageSigner, + await ethWallet.getAddress(), + signer, + accountId, + ethSignerType + ); wallet.connect(provider); return wallet; @@ -84,7 +94,15 @@ export class Wallet { accountId?: number, ethSignerType?: EthSignerType ): Promise { - const wallet = new Wallet(ethWallet, await ethWallet.getAddress(), undefined, accountId, ethSignerType); + const ethMessageSigner = new EthMessageSigner(ethWallet, ethSignerType); + const wallet = new Wallet( + ethWallet, + ethMessageSigner, + await ethWallet.getAddress(), + undefined, + accountId, + ethSignerType + ); wallet.connect(provider); return wallet; } @@ -148,14 +166,14 @@ export class Wallet { const stringAmount = this.provider.tokenSet.formatToken(transfer.token, transfer.amount); const stringFee = this.provider.tokenSet.formatToken(transfer.token, transfer.fee); const stringToken = this.provider.tokenSet.resolveTokenSymbol(transfer.token); - const humanReadableTxInfo = - `Transfer ${stringAmount} ${stringToken}\n` + - `To: ${transfer.to.toLowerCase()}\n` + - `Nonce: ${transfer.nonce}\n` + - `Fee: ${stringFee} ${stringToken}\n` + - `Account Id: ${this.accountId}`; - - const txMessageEthSignature = await this.getEthMessageSignature(humanReadableTxInfo); + const txMessageEthSignature = await this.ethMessageSigner.ethSignTransfer({ + stringAmount, + stringFee, + stringToken, + to: transfer.to, + nonce: transfer.nonce, + accountId: this.accountId + }); return { tx: signedTransferTransaction, ethereumSignature: txMessageEthSignature @@ -269,7 +287,7 @@ export class Wallet { } const hash = ethers.utils.keccak256(bytes).slice(2); const message = Uint8Array.from(Buffer.from(hash, 'hex')); - const ethSignature = await this.getEthMessageSignature(message); + const ethSignature = await this.ethMessageSigner.getEthMessageSignature(message); const transactionHashes = await this.provider.submitTxsBatch(batch, ethSignature); return transactionHashes.map((txHash, idx) => new Transaction(batch[idx], txHash, this.provider)); @@ -330,14 +348,14 @@ export class Wallet { const stringAmount = this.provider.tokenSet.formatToken(withdraw.token, withdraw.amount); const stringFee = this.provider.tokenSet.formatToken(withdraw.token, withdraw.fee); const stringToken = this.provider.tokenSet.resolveTokenSymbol(withdraw.token); - const humanReadableTxInfo = - `Withdraw ${stringAmount} ${stringToken}\n` + - `To: ${withdraw.ethAddress.toLowerCase()}\n` + - `Nonce: ${withdraw.nonce}\n` + - `Fee: ${stringFee} ${stringToken}\n` + - `Account Id: ${this.accountId}`; - - const txMessageEthSignature = await this.getEthMessageSignature(humanReadableTxInfo); + const txMessageEthSignature = await this.ethMessageSigner.ethSignWithdraw({ + stringAmount, + stringFee, + stringToken, + ethAddress: withdraw.ethAddress, + nonce: withdraw.nonce, + accountId: this.accountId + }); return { tx: signedWithdrawTransaction, diff --git a/yarn.lock b/yarn.lock index 5732b41710..8ddffd6607 100644 --- a/yarn.lock +++ b/yarn.lock @@ -14571,15 +14571,15 @@ yorkie@^2.0.0: normalize-path "^1.0.0" strip-indent "^2.0.0" -zksync-crypto@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/zksync-crypto/-/zksync-crypto-0.4.1.tgz#1093f2dac2f6c126effa878f25fa44fa48f12246" - integrity sha512-y8GwqIKEbnJjnHKYBffnhvSmkxp43lLOApI2VuZ3kzIni+HFTlSsNDDSJAvXv10GRldMomv2MzTJq/LxZJgKmA== +zksync-crypto@^0.4.2: + version "0.4.2" + resolved "https://registry.yarnpkg.com/zksync-crypto/-/zksync-crypto-0.4.2.tgz#58dd8629ce222f14b3ad83f2c50a68fec6363974" + integrity sha512-PQw0e7t80uP7c5LHonEh4dS4wGpK2TC4uYR/16jcDCaCS2wY/exF37hAOeQiskb0hnU2C6/jqtvBMbsMTzVh7Q== "zksync@link:sdk/zksync.js": - version "0.8.1" + version "0.8.3" dependencies: axios "^0.21.0" websocket "^1.0.30" websocket-as-promised "^1.1.0" - zksync-crypto "^0.4.1" + zksync-crypto "^0.4.2"