diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 29ec6b018d..86d8b74b31 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -141,6 +141,9 @@ jobs: - name: integration-rust-sdk run: ci_run zk test i rust-sdk + + - name: integration-withdrawal-helpers + run: ci_run zk test i withdrawal-helpers - name: Show logs run: | diff --git a/Cargo.lock b/Cargo.lock index 6a8995e5f0..bd2416045f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6173,6 +6173,33 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_forced_exit_requests" +version = "1.0.0" +dependencies = [ + "actix-web", + "anyhow", + "async-trait", + "chrono", + "ethabi", + "hex", + "log 0.4.11", + "metrics", + "num", + "tokio 0.2.22", + "vlog", + "web3", + "zksync_api", + "zksync_config", + "zksync_contracts", + "zksync_core", + "zksync_crypto", + "zksync_eth_signer", + "zksync_storage", + "zksync_test_account", + "zksync_types", +] + [[package]] name = "zksync_prometheus_exporter" version = "1.0.0" @@ -6263,6 +6290,7 @@ dependencies = [ "zksync_core", "zksync_crypto", "zksync_eth_sender", + "zksync_forced_exit_requests", "zksync_prometheus_exporter", "zksync_prover", "zksync_storage", diff --git a/Cargo.toml b/Cargo.toml index ea4e1e8408..9ab826e689 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "core/bin/zksync_core", "core/bin/zksync_eth_sender", "core/bin/zksync_witness_generator", + "core/bin/zksync_forced_exit_requests", # Libraries "core/lib/circuit", diff --git a/changelog/core.md b/changelog/core.md index 02be5f4c29..911c612c69 100644 --- a/changelog/core.md +++ b/changelog/core.md @@ -43,6 +43,8 @@ All notable changes to the core components will be documented in this file. - Added a stressing dev fee ticker scenario to the loadtest. - Added a `--sloppy` mode to the `dev-fee-ticker-server` to simulate bad networks with the random delays and fails. +- Added `forced_exit_requests` functionality, which allows users to pay for ForcedExits from L1. Note that a few env + variables were added that control the behaviour of the tool. - Possibility to use CREATE2 ChangePubKey and Transfer in a single batch. ### Fixed diff --git a/contracts/contracts/ForcedExit.sol b/contracts/contracts/ForcedExit.sol new file mode 100644 index 0000000000..af62d2e5d9 --- /dev/null +++ b/contracts/contracts/ForcedExit.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.7.0; + +pragma experimental ABIEncoderV2; + +import "./Utils.sol"; +import "./Ownable.sol"; +import "./ReentrancyGuard.sol"; + +contract ForcedExit is Ownable, ReentrancyGuard { + // This is the role of the zkSync server + // that will be able to withdraw the funds + address payable public receiver; + + bool public enabled = true; + + constructor(address _master, address _receiver) Ownable(_master) { + initializeReentrancyGuard(); + + // The master is the default receiver + receiver = payable(_receiver); + } + + event FundsReceived(uint256 _amount); + + function setReceiver(address payable _newReceiver) external { + requireMaster(msg.sender); + + receiver = _newReceiver; + } + + function withdrawPendingFunds(address payable _to) external nonReentrant { + require( + msg.sender == receiver || msg.sender == getMaster(), + "Only the receiver or master can withdraw funds from the smart contract" + ); + + uint256 balance = address(this).balance; + + (bool success, ) = _to.call{value: balance}(""); + require(success, "ETH withdraw failed"); + } + + // We have to use fallback instead of `receive` since the ethabi + // library can't decode the receive function: + // https://github.com/rust-ethereum/ethabi/issues/185 + fallback() external payable { + emit FundsReceived(msg.value); + } +} diff --git a/contracts/contracts/dev-contracts/SelfDestruct.sol b/contracts/contracts/dev-contracts/SelfDestruct.sol new file mode 100644 index 0000000000..e168be2e78 --- /dev/null +++ b/contracts/contracts/dev-contracts/SelfDestruct.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.7.0; + +pragma experimental ABIEncoderV2; + +contract SelfDestruct { + function destroy(address payable to) external { + selfdestruct(to); + } + + // Need this to send some funds to the contract + receive() external payable {} +} diff --git a/contracts/scripts/deploy.ts b/contracts/scripts/deploy.ts index e6ac82add4..28bc1c4429 100644 --- a/contracts/scripts/deploy.ts +++ b/contracts/scripts/deploy.ts @@ -66,6 +66,10 @@ async function main() { if (args.contract === 'Proxies' || args.contract == null) { await deployer.deployProxiesAndGatekeeper({ gasPrice, nonce: args.nonce }); } + + if (args.contract === 'ForcedExit' || args.contract == null) { + await deployer.deployForcedExit({ gasPrice, nonce: args.nonce }); + } } main() diff --git a/contracts/src.ts/deploy.ts b/contracts/src.ts/deploy.ts index 14eb9f4c61..407b371a32 100644 --- a/contracts/src.ts/deploy.ts +++ b/contracts/src.ts/deploy.ts @@ -16,7 +16,9 @@ import { Verifier, VerifierFactory, ZkSync, - ZkSyncFactory + ZkSyncFactory, + ForcedExit, + ForcedExitFactory } from '../typechain'; export interface Contracts { @@ -25,6 +27,7 @@ export interface Contracts { verifier; proxy; upgradeGatekeeper; + forcedExit; } export interface DeployedAddresses { @@ -36,6 +39,7 @@ export interface DeployedAddresses { ZkSync: string; ZkSyncTarget: string; DeployFactory: string; + ForcedExit: string; } export interface DeployerConfig { @@ -58,17 +62,8 @@ export function readProductionContracts(): Contracts { zkSync: readContractCode('ZkSync'), verifier: readContractCode('Verifier'), proxy: readContractCode('Proxy'), - upgradeGatekeeper: readContractCode('UpgradeGatekeeper') - }; -} - -export function readTestContracts(): Contracts { - return { - governance: readContractCode('GovernanceTest'), - zkSync: readContractCode('ZkSyncTest'), - verifier: readContractCode('VerifierTest'), - proxy: readContractCode('Proxy'), - upgradeGatekeeper: readContractCode('UpgradeGatekeeperTest') + upgradeGatekeeper: readContractCode('UpgradeGatekeeper'), + forcedExit: readContractCode('ForcedExit') }; } @@ -81,7 +76,8 @@ export function deployedAddressesFromEnv(): DeployedAddresses { Verifier: process.env.CONTRACTS_VERIFIER_ADDR, VerifierTarget: process.env.CONTRACTS_VERIFIER_TARGET_ADDR, ZkSync: process.env.CONTRACTS_CONTRACT_ADDR, - ZkSyncTarget: process.env.CONTRACTS_CONTRACT_TARGET_ADDR + ZkSyncTarget: process.env.CONTRACTS_CONTRACT_TARGET_ADDR, + ForcedExit: process.env.CONTRACTS_FORCED_EXIT_ADDR }; } @@ -215,6 +211,38 @@ export class Deployer { } } + public async deployForcedExit(ethTxOptions?: ethers.providers.TransactionRequest) { + if (this.verbose) { + console.log('Deploying ForcedExit contract'); + } + + // Choose the this.deployWallet.address as the default receiver if the + // FORCED_EXIT_REQUESTS_SENDER_ACCOUNT_ADDRESS is not present + const receiver = process.env.FORCED_EXIT_REQUESTS_SENDER_ACCOUNT_ADDRESS || this.deployWallet.address; + + const forcedExitContract = await deployContract( + this.deployWallet, + this.contracts.forcedExit, + [this.deployWallet.address, receiver], + { + gasLimit: 6000000, + ...ethTxOptions + } + ); + const zksRec = await forcedExitContract.deployTransaction.wait(); + const zksGasUsed = zksRec.gasUsed; + const gasPrice = forcedExitContract.deployTransaction.gasPrice; + if (this.verbose) { + console.log(`CONTRACTS_FORCED_EXIT_ADDR=${forcedExitContract.address}`); + console.log( + `ForcedExit contract deployed, gasUsed: ${zksGasUsed.toString()}, eth spent: ${formatEther( + zksGasUsed.mul(gasPrice) + )}` + ); + } + this.addresses.ForcedExit = forcedExitContract.address; + } + public async publishSourcesToTesseracts() { console.log('Publishing ABI for UpgradeGatekeeper'); await publishAbiToTesseracts(this.addresses.UpgradeGatekeeper, this.contracts.upgradeGatekeeper); @@ -224,6 +252,8 @@ export class Deployer { await publishAbiToTesseracts(this.addresses.Verifier, this.contracts.verifier); console.log('Publishing ABI for Governance (proxy)'); await publishAbiToTesseracts(this.addresses.Governance, this.contracts.governance); + console.log('Publishing ABI for ForcedExit'); + await publishAbiToTesseracts(this.addresses.ForcedExit, this.contracts.forcedExit); } public async publishSourcesToEtherscan() { @@ -271,6 +301,9 @@ export class Deployer { ['address'] ) ); + + console.log('Publishing sourcecode for ForcedExit', this.addresses.ForcedExit); + await publishSourceCodeToEtherscan(this.addresses.ForcedExit, 'ForcedExit', ''); } public async deployAll(ethTxOptions?: ethers.providers.TransactionRequest) { @@ -278,6 +311,7 @@ export class Deployer { await this.deployGovernanceTarget(ethTxOptions); await this.deployVerifierTarget(ethTxOptions); await this.deployProxiesAndGatekeeper(ethTxOptions); + await this.deployForcedExit(ethTxOptions); } public governanceContract(signerOrProvider: Signer | providers.Provider): Governance { @@ -295,4 +329,8 @@ export class Deployer { public upgradeGatekeeperContract(signerOrProvider: Signer | providers.Provider): UpgradeGatekeeper { return UpgradeGatekeeperFactory.connect(this.addresses.UpgradeGatekeeper, signerOrProvider); } + + public forcedExitContract(signerOrProvider: Signer | providers.Provider): ForcedExit { + return ForcedExitFactory.connect(this.addresses.ForcedExit, signerOrProvider); + } } diff --git a/contracts/test/unit_tests/forced_exit_test.ts b/contracts/test/unit_tests/forced_exit_test.ts new file mode 100644 index 0000000000..eaaf40ff95 --- /dev/null +++ b/contracts/test/unit_tests/forced_exit_test.ts @@ -0,0 +1,53 @@ +import { expect, use } from 'chai'; +import { solidity } from 'ethereum-waffle'; +import { Signer, utils } from 'ethers'; +import { ForcedExit } from '../../typechain/ForcedExit'; +import { ForcedExitFactory } from '../../typechain/ForcedExitFactory'; + +import * as hardhat from 'hardhat'; + +const TX_AMOUNT = utils.parseEther('1.0'); + +use(solidity); + +describe('ForcedExit unit tests', function () { + this.timeout(50000); + + let forcedExitContract: ForcedExit; + let wallet1: Signer; + let wallet2: Signer; + let wallet3: Signer; + let wallet4: Signer; + + before(async () => { + [wallet1, wallet2, wallet3, wallet4] = await hardhat.ethers.getSigners(); + + const forcedExitContractFactory = await hardhat.ethers.getContractFactory('ForcedExit'); + const contract = await forcedExitContractFactory.deploy(wallet1.getAddress(), wallet2.getAddress()); + // Connecting the wallet to a potential receiver, who can withdraw the funds + // on the master's behalf + forcedExitContract = ForcedExitFactory.connect(contract.address, wallet2); + }); + + it('Check withdrawing fees', async () => { + // The test checks the ability to withdraw the funds from the contract + // after the user has sent them + + // Code style note: Could not use nested expects because + // changeEtherBalance does not allow it + + // User sends funds to the contract + const transferTxHandle = await wallet3.sendTransaction({ + to: forcedExitContract.address, + value: TX_AMOUNT + }); + // Check that the `FundsReceived` event was emitted + expect(transferTxHandle).to.emit(forcedExitContract, 'FundsReceived').withArgs(TX_AMOUNT); + + // Withdrawing the funds from the contract to the wallet4 + const withdrawTxHandle = await forcedExitContract.withdrawPendingFunds(await wallet4.getAddress()); + + // The pending funds have been received + expect(withdrawTxHandle).to.changeEtherBalance(wallet4, TX_AMOUNT); + }); +}); diff --git a/core/bin/server/Cargo.toml b/core/bin/server/Cargo.toml index 1932ad25e1..c935db67b2 100644 --- a/core/bin/server/Cargo.toml +++ b/core/bin/server/Cargo.toml @@ -15,6 +15,7 @@ zksync_api = { path = "../zksync_api", version = "1.0" } zksync_core = { path = "../zksync_core", version = "1.0" } zksync_witness_generator = { path = "../zksync_witness_generator", version = "1.0" } zksync_eth_sender = { path = "../zksync_eth_sender", version = "1.0" } +zksync_forced_exit_requests = { path = "../zksync_forced_exit_requests", version = "1.0" } zksync_prometheus_exporter = { path = "../../lib/prometheus_exporter", version = "1.0" } zksync_config = { path = "../../lib/config", version = "1.0" } diff --git a/core/bin/server/src/main.rs b/core/bin/server/src/main.rs index 3ff380ae8e..e382c3a3b4 100644 --- a/core/bin/server/src/main.rs +++ b/core/bin/server/src/main.rs @@ -4,6 +4,7 @@ use structopt::StructOpt; use zksync_api::run_api; use zksync_core::{genesis_init, run_core, wait_for_tasks}; use zksync_eth_sender::run_eth_sender; +use zksync_forced_exit_requests::run_forced_exit_requests_actors; use zksync_prometheus_exporter::run_prometheus_exporter; use zksync_witness_generator::run_prover_server; @@ -78,9 +79,12 @@ async fn main() -> anyhow::Result<()> { // Run prover server & witness generator. vlog::info!("Starting the Prover server actors"); - let database = zksync_witness_generator::database::Database::new(connection_pool); + let database = zksync_witness_generator::database::Database::new(connection_pool.clone()); run_prover_server(database, stop_signal_sender, ZkSyncConfig::from_env()); + vlog::info!("Starting the ForcedExitRequests actors"); + let forced_exit_requests_task_handle = run_forced_exit_requests_actors(connection_pool, config); + tokio::select! { _ = async { wait_for_tasks(core_task_handles).await } => { // We don't need to do anything here, since Core actors will panic upon future resolving. @@ -97,6 +101,9 @@ async fn main() -> anyhow::Result<()> { _ = async { counter_task_handle.unwrap().await } => { panic!("Operation counting actor is not supposed to finish its execution") }, + _ = async { forced_exit_requests_task_handle.await } => { + panic!("ForcedExitRequests actor is not supposed to finish its execution") + }, _ = async { stop_signal_receiver.next().await } => { vlog::warn!("Stop signal received, shutting down"); } diff --git a/core/bin/zksync_api/src/api_server/forced_exit_checker.rs b/core/bin/zksync_api/src/api_server/forced_exit_checker.rs new file mode 100644 index 0000000000..032525a3f7 --- /dev/null +++ b/core/bin/zksync_api/src/api_server/forced_exit_checker.rs @@ -0,0 +1,106 @@ +use crate::api_server::tx_sender::SubmitError; +use zksync_config::ZkSyncConfig; +use zksync_storage::StorageProcessor; +use zksync_types::Address; + +use crate::internal_error; + +use chrono::Utc; + +#[async_trait::async_trait] +pub trait ForcedExitAccountAgeChecker { + async fn check_forced_exit( + &self, + storage: &mut StorageProcessor<'_>, + target_account_address: Address, + ) -> Result; + + async fn validate_forced_exit( + &self, + storage: &mut StorageProcessor<'_>, + target_account_address: Address, + ) -> Result<(), SubmitError>; +} + +#[derive(Clone)] +pub struct ForcedExitChecker { + /// Mimimum age of the account for `ForcedExit` operations to be allowed. + pub forced_exit_minimum_account_age: chrono::Duration, +} + +impl ForcedExitChecker { + pub fn new(config: &ZkSyncConfig) -> Self { + let forced_exit_minimum_account_age = chrono::Duration::seconds( + config.api.common.forced_exit_minimum_account_age_secs as i64, + ); + + Self { + forced_exit_minimum_account_age, + } + } +} + +#[async_trait::async_trait] +impl ForcedExitAccountAgeChecker for ForcedExitChecker { + async fn check_forced_exit( + &self, + storage: &mut StorageProcessor<'_>, + target_account_address: Address, + ) -> Result { + let account_age = storage + .chain() + .operations_ext_schema() + .account_created_on(&target_account_address) + .await + .map_err(|err| internal_error!(err, target_account_address))?; + + match account_age { + Some(age) if Utc::now() - age < self.forced_exit_minimum_account_age => Ok(false), + None => Err(SubmitError::invalid_params("Target account does not exist")), + + Some(..) => Ok(true), + } + } + + async fn validate_forced_exit( + &self, + storage: &mut StorageProcessor<'_>, + target_account_address: Address, + ) -> Result<(), SubmitError> { + let eligible = self + .check_forced_exit(storage, target_account_address) + .await?; + + if eligible { + Ok(()) + } else { + let msg = format!( + "Target account exists less than required minimum amount ({} hours)", + self.forced_exit_minimum_account_age.num_hours() + ); + + Err(SubmitError::InvalidParams(msg)) + } + } +} + +pub struct DummyForcedExitChecker; + +#[async_trait::async_trait] +impl ForcedExitAccountAgeChecker for DummyForcedExitChecker { + async fn check_forced_exit( + &self, + _storage: &mut StorageProcessor<'_>, + _target_account_address: Address, + ) -> Result { + Ok(true) + } + + async fn validate_forced_exit( + &self, + _storage: &mut StorageProcessor<'_>, + _target_account_address: Address, + ) -> Result<(), SubmitError> { + Ok(()) + } +} diff --git a/core/bin/zksync_api/src/api_server/mod.rs b/core/bin/zksync_api/src/api_server/mod.rs index 4423c907b4..85cd136663 100644 --- a/core/bin/zksync_api/src/api_server/mod.rs +++ b/core/bin/zksync_api/src/api_server/mod.rs @@ -18,6 +18,7 @@ use crate::signature_checker; mod admin_server; mod event_notify; +pub mod forced_exit_checker; mod helpers; mod rest; pub mod rpc_server; diff --git a/core/bin/zksync_api/src/api_server/rest/forced_exit_requests/mod.rs b/core/bin/zksync_api/src/api_server/rest/forced_exit_requests/mod.rs new file mode 100644 index 0000000000..73a3475309 --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/forced_exit_requests/mod.rs @@ -0,0 +1,23 @@ +// External uses +use actix_web::{web, Scope}; + +// Workspace uses +pub use zksync_api_client::rest::v1::{ + Client, ClientError, FastProcessingQuery, IncomingTx, IncomingTxBatch, Pagination, + PaginationQuery, Receipt, TxData, MAX_LIMIT, +}; +use zksync_config::ZkSyncConfig; +use zksync_storage::ConnectionPool; + +// Local uses +use crate::api_server::forced_exit_checker::ForcedExitChecker; +mod v01; + +pub(crate) fn api_scope(connection_pool: ConnectionPool, config: &ZkSyncConfig) -> Scope { + let fe_age_checker = ForcedExitChecker::new(&config); + web::scope("/api/forced_exit_requests").service(v01::api_scope( + connection_pool, + config, + Box::new(fe_age_checker), + )) +} diff --git a/core/bin/zksync_api/src/api_server/rest/forced_exit_requests/v01.rs b/core/bin/zksync_api/src/api_server/rest/forced_exit_requests/v01.rs new file mode 100644 index 0000000000..c38cfd9aa6 --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/forced_exit_requests/v01.rs @@ -0,0 +1,478 @@ +//! Transactions part of API implementation. + +// Built-in uses + +// External uses +use actix_web::{ + web::{self, Json}, + Scope, +}; + +use bigdecimal::{BigDecimal, FromPrimitive}; +use chrono::{Duration, Utc}; +use num::{bigint::ToBigInt, BigUint}; +use std::time::Instant; +use std::{convert::TryInto, ops::Add}; +// Workspace uses +pub use zksync_api_client::rest::forced_exit_requests::{ + ForcedExitRegisterRequest, ForcedExitRequestStatus, +}; +pub use zksync_api_client::rest::v1::{ + FastProcessingQuery, IncomingTx, IncomingTxBatch, Receipt, TxData, +}; + +use zksync_api_client::rest::forced_exit_requests::ConfigInfo; +use zksync_config::ZkSyncConfig; +use zksync_storage::ConnectionPool; +use zksync_types::{ + forced_exit_requests::{ + ForcedExitEligibilityResponse, ForcedExitRequest, ForcedExitRequestId, + SaveForcedExitRequestQuery, + }, + Address, TokenLike, +}; + +// Local uses +use crate::api_server::rest::v1::{Error as ApiError, JsonResult}; + +use crate::api_server::forced_exit_checker::ForcedExitAccountAgeChecker; + +/// Shared data between `/api/forced_exit_requests/v0.1/` endpoints. +pub struct ApiForcedExitRequestsData { + pub(crate) connection_pool: ConnectionPool, + pub(crate) forced_exit_checker: Box, + + pub(crate) is_enabled: bool, + pub(crate) max_tokens_per_request: u8, + pub(crate) digits_in_id: u8, + pub(crate) recomended_tx_interval_millisecs: i64, + pub(crate) max_tx_interval_millisecs: i64, + pub(crate) price_per_token: i64, + pub(crate) forced_exit_contract_address: Address, + pub(crate) wait_confirmations: u64, +} + +impl ApiForcedExitRequestsData { + fn new( + connection_pool: ConnectionPool, + config: &ZkSyncConfig, + forced_exit_checker: Box, + ) -> Self { + Self { + connection_pool, + forced_exit_checker, + + is_enabled: config.forced_exit_requests.enabled, + price_per_token: config.forced_exit_requests.price_per_token, + max_tokens_per_request: config.forced_exit_requests.max_tokens_per_request, + recomended_tx_interval_millisecs: config.forced_exit_requests.recomended_tx_interval, + max_tx_interval_millisecs: config.forced_exit_requests.max_tx_interval, + forced_exit_contract_address: config.contracts.forced_exit_addr, + digits_in_id: config.forced_exit_requests.digits_in_id, + wait_confirmations: config.forced_exit_requests.wait_confirmations, + } + } +} + +async fn get_status( + data: web::Data, +) -> JsonResult { + let start = Instant::now(); + + let response = if data.is_enabled { + ForcedExitRequestStatus::Enabled(ConfigInfo { + request_fee: BigUint::from(data.price_per_token as u64), + max_tokens_per_request: data.max_tokens_per_request, + recomended_tx_interval_millis: data.recomended_tx_interval_millisecs, + forced_exit_contract_address: data.forced_exit_contract_address, + wait_confirmations: data.wait_confirmations, + }) + } else { + ForcedExitRequestStatus::Disabled + }; + + metrics::histogram!("api.forced_exit_requests.v01.status", start.elapsed()); + Ok(Json(response)) +} + +pub async fn submit_request( + data: web::Data, + params: web::Json, +) -> JsonResult { + let start = Instant::now(); + + let mut storage = data + .connection_pool + .access_storage() + .await + .map_err(warn_err) + .map_err(ApiError::internal)?; + + if params.tokens.len() > data.max_tokens_per_request as usize { + return Err(ApiError::bad_request( + "Maximum number of tokens per ForcedExit request exceeded", + )); + } + + data.forced_exit_checker + .validate_forced_exit(&mut storage, params.target) + .await + .map_err(ApiError::from)?; + + let price_of_one_exit = BigDecimal::from(data.price_per_token); + let price_of_request = price_of_one_exit * BigDecimal::from_usize(params.tokens.len()).unwrap(); + + let user_fee = params.price_in_wei.to_bigint().unwrap(); + let user_fee = BigDecimal::from(user_fee); + + if user_fee != price_of_request { + return Err(ApiError::bad_request( + "The amount should be exactly the price of the supplied withdrawals", + )); + } + + let mut tokens_schema = storage.tokens_schema(); + + for token_id in params.tokens.iter() { + // The result is going nowhere. + // This is simply to make sure that the tokens + // that were supplied do indeed exist + tokens_schema + .get_token(TokenLike::Id(*token_id)) + .await + .map_err(|_| ApiError::bad_request("One of the tokens does no exist"))?; + } + + let mut fe_schema = storage.forced_exit_requests_schema(); + + let created_at = Utc::now(); + let valid_until = created_at.add(Duration::milliseconds(data.max_tx_interval_millisecs)); + + let saved_fe_request = fe_schema + .store_request(SaveForcedExitRequestQuery { + target: params.target, + tokens: params.tokens.clone(), + price_in_wei: params.price_in_wei.clone(), + created_at, + valid_until, + }) + .await + .map_err(|_| ApiError::internal(""))?; + + check_address_space_overflow(saved_fe_request.id, data.digits_in_id); + + metrics::histogram!( + "api.forced_exit_requests.v01.submit_request", + start.elapsed() + ); + Ok(Json(saved_fe_request)) +} + +pub async fn get_request_by_id( + data: web::Data, + web::Path(request_id): web::Path, +) -> JsonResult { + let start = Instant::now(); + + let mut storage = data + .connection_pool + .access_storage() + .await + .map_err(warn_err) + .map_err(ApiError::internal)?; + + let mut fe_requests_schema = storage.forced_exit_requests_schema(); + + metrics::histogram!( + "api.forced_exit_requests.v01.get_request_by_id", + start.elapsed() + ); + + let fe_request_from_db = fe_requests_schema + .get_request_by_id(request_id) + .await + .map_err(ApiError::internal)?; + + match fe_request_from_db { + Some(fe_request) => Ok(Json(fe_request)), + None => Err(ApiError::not_found("Request with such id does not exist")), + } +} + +// Checks if the account is eligible for forced_exit in terms of +// existing enough time +pub async fn check_account_eligibility( + data: web::Data, + web::Path(account): web::Path
, +) -> JsonResult { + let mut storage = data + .connection_pool + .access_storage() + .await + .map_err(warn_err) + .map_err(ApiError::internal)?; + + let eligible = data + .forced_exit_checker + .check_forced_exit(&mut storage, account) + .await + .map_err(ApiError::from)?; + + let result = ForcedExitEligibilityResponse { eligible }; + + Ok(Json(result)) +} + +pub fn api_scope( + connection_pool: ConnectionPool, + config: &ZkSyncConfig, + fe_checker: Box, +) -> Scope { + let data = ApiForcedExitRequestsData::new(connection_pool, config, fe_checker); + + // `enabled` endpoint should always be there + let scope = web::scope("v0.1") + .data(data) + .route("status", web::get().to(get_status)); + + if config.forced_exit_requests.enabled { + scope + .route("/submit", web::post().to(submit_request)) + .route("/requests/{id}", web::get().to(get_request_by_id)) + .route( + "/checks/eligibility/{account}", + web::get().to(check_account_eligibility), + ) + } else { + scope + } +} + +#[cfg(test)] +mod tests { + use std::ops::Mul; + use std::str::FromStr; + + use num::BigUint; + + use zksync_api_client::rest::v1::Client; + use zksync_config::ForcedExitRequestsConfig; + use zksync_storage::ConnectionPool; + use zksync_types::{Address, TokenId}; + + use super::*; + use crate::api_server::forced_exit_checker::DummyForcedExitChecker; + use crate::api_server::v1::test_utils::TestServerConfig; + + struct TestServer { + api_server: actix_web::test::TestServer, + #[allow(dead_code)] + pool: ConnectionPool, + } + + impl TestServer { + async fn from_config(cfg: TestServerConfig) -> anyhow::Result<(Client, Self)> { + let pool = cfg.pool.clone(); + + let (api_client, api_server) = + cfg.start_server_with_scope(String::from("api/forced_exit_requests"), move |cfg| { + api_scope( + cfg.pool.clone(), + &cfg.config, + Box::new(DummyForcedExitChecker {}), + ) + }); + + Ok((api_client, Self { api_server, pool })) + } + + async fn stop(self) { + self.api_server.stop().await; + } + } + + fn get_test_config_from_forced_exit_requests( + forced_exit_requests: ForcedExitRequestsConfig, + ) -> TestServerConfig { + let config_from_env = ZkSyncConfig::from_env(); + let config = ZkSyncConfig { + forced_exit_requests, + ..config_from_env + }; + + TestServerConfig { + config, + pool: ConnectionPool::new(Some(1)), + } + } + + #[actix_rt::test] + #[cfg_attr( + not(feature = "api_test"), + ignore = "Use `zk test rust-api` command to perform this test" + )] + async fn test_disabled_forced_exit_requests() -> anyhow::Result<()> { + let forced_exit_requests = ForcedExitRequestsConfig::from_env(); + let test_config = get_test_config_from_forced_exit_requests(ForcedExitRequestsConfig { + enabled: false, + ..forced_exit_requests + }); + + let (client, server) = TestServer::from_config(test_config).await?; + + let status = client.get_forced_exit_requests_status().await?; + + assert_eq!(status, ForcedExitRequestStatus::Disabled); + + let register_request = ForcedExitRegisterRequest { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(0)], + price_in_wei: BigUint::from_str("1212").unwrap(), + }; + + client + .submit_forced_exit_request(register_request) + .await + .expect_err("Forced-exit related requests don't fail when it's disabled"); + + server.stop().await; + Ok(()) + } + + #[actix_rt::test] + #[cfg_attr( + not(feature = "api_test"), + ignore = "Use `zk test rust-api` command to perform this test" + )] + async fn test_forced_exit_requests_get_fee() -> anyhow::Result<()> { + let forced_exit_requests = ForcedExitRequestsConfig::from_env(); + let test_config = get_test_config_from_forced_exit_requests(ForcedExitRequestsConfig { + price_per_token: 1000000000, + ..forced_exit_requests + }); + + let (client, server) = TestServer::from_config(test_config).await?; + + let status = client.get_forced_exit_requests_status().await?; + + match status { + ForcedExitRequestStatus::Enabled(config_info) => { + assert_eq!( + config_info.request_fee, + BigUint::from_u32(1000000000).unwrap() + ); + } + ForcedExitRequestStatus::Disabled => { + panic!("ForcedExitRequests feature is not disabled"); + } + } + + server.stop().await; + Ok(()) + } + + #[actix_rt::test] + #[cfg_attr( + not(feature = "api_test"), + ignore = "Use `zk test rust-api` command to perform this test" + )] + async fn test_forced_exit_requests_wrong_tokens_number() -> anyhow::Result<()> { + let forced_exit_requests_config = ForcedExitRequestsConfig::from_env(); + let test_config = get_test_config_from_forced_exit_requests(ForcedExitRequestsConfig { + max_tokens_per_request: 5, + ..forced_exit_requests_config + }); + + let (client, server) = TestServer::from_config(test_config).await?; + + let status = client.get_forced_exit_requests_status().await?; + assert_ne!(status, ForcedExitRequestStatus::Disabled); + + let price_per_token = forced_exit_requests_config.price_per_token; + // 6 tokens: + let tokens: Vec = vec![0, 1, 2, 3, 4, 5]; + let tokens: Vec = tokens.iter().map(|t| TokenId(*t)).collect(); + let price_in_wei = BigUint::from_i64(price_per_token) + .unwrap() + .mul(tokens.len()); + + let register_request = ForcedExitRegisterRequest { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens, + price_in_wei, + }; + + client + .submit_forced_exit_request(register_request) + .await + .expect_err("Api does not take the limit on the number of tokens into account"); + + server.stop().await; + Ok(()) + } + + #[actix_rt::test] + #[cfg_attr( + not(feature = "api_test"), + ignore = "Use `zk test rust-api` command to perform this test" + )] + async fn test_forced_exit_requests_submit() -> anyhow::Result<()> { + let price_per_token: i64 = 1000000000000000000; + let max_tokens_per_request = 3; + let server_config = get_test_config_from_forced_exit_requests(ForcedExitRequestsConfig { + max_tokens_per_request, + price_per_token, + ..ForcedExitRequestsConfig::from_env() + }); + + let (client, server) = TestServer::from_config(server_config).await?; + + let status = client.get_forced_exit_requests_status().await?; + assert!(matches!(status, ForcedExitRequestStatus::Enabled(_))); + + let tokens: Vec = vec![0, 1, 2]; + let tokens: Vec = tokens.iter().map(|t| TokenId(*t)).collect(); + + let price_in_wei = BigUint::from_i64(price_per_token) + .unwrap() + .mul(tokens.len()); + + let target = Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(); + + let fe_request = ForcedExitRegisterRequest { + target, + tokens: tokens.clone(), + price_in_wei: price_in_wei.clone(), + }; + + let submit_result = client.submit_forced_exit_request(fe_request).await?; + + assert_eq!(submit_result.price_in_wei, price_in_wei); + assert_eq!(submit_result.tokens, tokens); + assert_eq!(submit_result.target, target); + + server.stop().await; + Ok(()) + } +} + +fn warn_err(err: T) -> T { + vlog::warn!("Internal Server Error: '{}';", err); + err +} + +// Checks if the id exceeds half of the address space +// If it exceeds the half at all the alert should be triggerred +// since it it a sign of a possible DoS attack +pub fn check_address_space_overflow(id: i64, digits_in_id: u8) { + let address_space = 10_i64.saturating_pow(digits_in_id as u32); + + let exceeding_rate = id.saturating_sub(address_space / 2); + // Need this for metrics + let exceeding_rate: u64 = exceeding_rate.max(0).try_into().unwrap(); + + metrics::histogram!( + "forced_exit_requests.address_space_overflow", + exceeding_rate + ); +} diff --git a/core/bin/zksync_api/src/api_server/rest/mod.rs b/core/bin/zksync_api/src/api_server/rest/mod.rs index e150ab2e59..fb86a46a26 100644 --- a/core/bin/zksync_api/src/api_server/rest/mod.rs +++ b/core/bin/zksync_api/src/api_server/rest/mod.rs @@ -13,6 +13,7 @@ use crate::{fee_ticker::TickerRequest, signature_checker::VerifySignatureRequest use super::tx_sender::TxSender; use zksync_config::ZkSyncConfig; +mod forced_exit_requests; mod helpers; mod v01; pub mod v02; @@ -37,6 +38,9 @@ async fn start_server( v1::api_scope(tx_sender, &api_v01.config) }; + let forced_exit_requests_api_scope = + forced_exit_requests::api_scope(api_v01.connection_pool.clone(), &api_v01.config); + let api_v02_scope = { let tx_sender = TxSender::new( api_v01.connection_pool.clone(), @@ -51,6 +55,7 @@ async fn start_server( .wrap(vlog::actix_middleware()) .service(api_v01.into_scope()) .service(api_v1_scope) + .service(forced_exit_requests_api_scope) .service(api_v02_scope) // Endpoint needed for js isReachable .route( diff --git a/core/bin/zksync_api/src/api_server/rest/v1/error.rs b/core/bin/zksync_api/src/api_server/rest/v1/error.rs index 83fa29c9f9..3379a3da9d 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/error.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/error.rs @@ -35,6 +35,11 @@ impl Error { Self::with_code(StatusCode::NOT_IMPLEMENTED, title) } + /// Creates a new Error with the NOT_FOUND (404) status code. + pub fn not_found(title: impl Display) -> Self { + Self::with_code(StatusCode::NOT_FOUND, title) + } + fn with_code(http_code: StatusCode, title: impl Display) -> Self { Self { http_code, diff --git a/core/bin/zksync_api/src/api_server/rest/v1/mod.rs b/core/bin/zksync_api/src/api_server/rest/v1/mod.rs index 0664c5cacb..1eaeb56b1f 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/mod.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/mod.rs @@ -6,7 +6,7 @@ use actix_web::{ Scope, }; -use Error as ApiError; +pub use Error as ApiError; // Workspace uses pub use zksync_api_client::rest::v1::{ Client, ClientError, Pagination, PaginationQuery, MAX_LIMIT, @@ -22,15 +22,15 @@ pub use self::error::{Error, ErrorBody}; pub(crate) mod accounts; mod blocks; mod config; -mod error; +pub mod error; mod operations; mod search; #[cfg(test)] -mod test_utils; +pub mod test_utils; mod tokens; mod transactions; -type JsonResult = std::result::Result, Error>; +pub type JsonResult = std::result::Result, Error>; pub(crate) fn api_scope(tx_sender: TxSender, zk_config: &ZkSyncConfig) -> Scope { web::scope("/api/v1") diff --git a/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs b/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs index 8a33589e2b..7904b40ade 100644 --- a/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs +++ b/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs @@ -71,13 +71,17 @@ pub struct TestTransactions { } impl TestServerConfig { - pub fn start_server(&self, scope_factory: F) -> (Client, actix_web::test::TestServer) + pub fn start_server_with_scope( + &self, + scope: String, + scope_factory: F, + ) -> (Client, actix_web::test::TestServer) where F: Fn(&TestServerConfig) -> Scope + Clone + Send + 'static, { let this = self.clone(); let server = actix_web::test::start(move || { - App::new().service(web::scope("/api/v1").service(scope_factory(&this))) + App::new().service(web::scope(scope.as_ref()).service(scope_factory(&this))) }); let url = server.url("").trim_end_matches('/').to_owned(); @@ -86,6 +90,13 @@ impl TestServerConfig { (client, server) } + pub fn start_server(&self, scope_factory: F) -> (Client, actix_web::test::TestServer) + where + F: Fn(&TestServerConfig) -> Scope + Clone + Send + 'static, + { + self.start_server_with_scope(String::from("/api/v1"), scope_factory) + } + /// Creates several transactions and the corresponding executed operations. pub fn gen_zk_txs(fee: u64) -> TestTransactions { Self::gen_zk_txs_for_account(AccountId(0xdead), ZkSyncAccount::rand().address, fee) diff --git a/core/bin/zksync_api/src/api_server/tx_sender.rs b/core/bin/zksync_api/src/api_server/tx_sender.rs index b81e5b0b62..e3eb398303 100644 --- a/core/bin/zksync_api/src/api_server/tx_sender.rs +++ b/core/bin/zksync_api/src/api_server/tx_sender.rs @@ -33,6 +33,7 @@ use zksync_utils::ratio_to_big_decimal; // Local uses use crate::{ + api_server::forced_exit_checker::{ForcedExitAccountAgeChecker, ForcedExitChecker}, api_server::rpc_server::types::TxWithSignature, core_api_client::CoreApiClient, fee_ticker::{ResponseBatchFee, ResponseFee, TickerRequest, TokenPriceRequestType}, @@ -49,9 +50,9 @@ pub struct TxSender { pub pool: ConnectionPool, pub tokens: TokenDBCache, + + pub forced_exit_checker: ForcedExitChecker, pub blocks: BlockDetailsCache, - /// Mimimum age of the account for `ForcedExit` operations to be allowed. - pub forced_exit_minimum_account_age: chrono::Duration, /// List of account IDs that do not have to pay fees for operations. pub fee_free_accounts: HashSet, pub enforce_pubkey_change_fee: bool, @@ -153,23 +154,24 @@ pub enum SubmitError { } impl SubmitError { - fn internal(inner: impl Into) -> Self { + pub fn internal(inner: impl Into) -> Self { Self::Internal(inner.into()) } - fn other(msg: impl Display) -> Self { + pub fn other(msg: impl Display) -> Self { Self::Other(msg.to_string()) } - fn communication_core_server(msg: impl Display) -> Self { + pub fn communication_core_server(msg: impl Display) -> Self { Self::CommunicationCoreServer(msg.to_string()) } - fn invalid_params(msg: impl Display) -> Self { + pub fn invalid_params(msg: impl Display) -> Self { Self::InvalidParams(msg.to_string()) } } +#[macro_export] macro_rules! internal_error { ($err:tt, $input:tt) => {{ vlog::warn!("Internal Server error: {}, input: {:?}", $err, $input); @@ -206,10 +208,6 @@ impl TxSender { ticker_request_sender: mpsc::Sender, config: &ZkSyncConfig, ) -> Self { - let forced_exit_minimum_account_age = chrono::Duration::seconds( - config.api.common.forced_exit_minimum_account_age_secs as i64, - ); - let max_number_of_transactions_per_batch = config.api.common.max_number_of_transactions_per_batch as usize; let max_number_of_authors_per_batch = @@ -223,10 +221,10 @@ impl TxSender { sign_verify_requests: sign_verify_request_sender, ticker_requests: ticker_request_sender, tokens: TokenDBCache::new(), + forced_exit_checker: ForcedExitChecker::new(config), + enforce_pubkey_change_fee: config.api.common.enforce_pubkey_change_fee, blocks: BlockDetailsCache::new(config.api.common.caches_size), - enforce_pubkey_change_fee: config.api.common.enforce_pubkey_change_fee, - forced_exit_minimum_account_age, fee_free_accounts: HashSet::from_iter(config.api.common.fee_free_accounts.clone()), max_number_of_transactions_per_batch, max_number_of_authors_per_batch, @@ -720,28 +718,9 @@ impl TxSender { .await .map_err(SubmitError::internal)?; - let target_account_address = forced_exit.target; - - let account_age = storage - .chain() - .operations_ext_schema() - .account_created_on(&target_account_address) + self.forced_exit_checker + .validate_forced_exit(&mut storage, forced_exit.target) .await - .map_err(|err| internal_error!(err, forced_exit))?; - - match account_age { - Some(age) if Utc::now() - age < self.forced_exit_minimum_account_age => { - let msg = format!( - "Target account exists less than required minimum amount ({} hours)", - self.forced_exit_minimum_account_age.num_hours() - ); - - Err(SubmitError::InvalidParams(msg)) - } - None => Err(SubmitError::invalid_params("Target account does not exist")), - - Some(..) => Ok(()), - } } /// Returns a message that user has to sign to send the transaction. @@ -828,7 +807,7 @@ impl TxSender { resp.map_err(|err| internal_error!(err)) } - async fn token_allowed_for_fees( + pub async fn token_allowed_for_fees( mut ticker_request_sender: mpsc::Sender, token: TokenLike, ) -> Result { @@ -846,7 +825,7 @@ impl TxSender { .map_err(SubmitError::internal) } - async fn ticker_price_request( + pub async fn ticker_price_request( mut ticker_request_sender: mpsc::Sender, token: TokenLike, req_type: TokenPriceRequestType, diff --git a/core/bin/zksync_core/src/eth_watch/client.rs b/core/bin/zksync_core/src/eth_watch/client.rs index b866e0150c..916b13a984 100644 --- a/core/bin/zksync_core/src/eth_watch/client.rs +++ b/core/bin/zksync_core/src/eth_watch/client.rs @@ -5,7 +5,9 @@ use ethabi::Hash; use std::fmt::Debug; use web3::{ contract::Options, + transports::http, types::{BlockNumber, FilterBuilder, Log}, + Web3, }; use zksync_contracts::zksync_contract; @@ -144,3 +146,9 @@ impl EthClient for EthHttpClient { .map(|res: U256| res.as_u64()) } } + +pub async fn get_web3_block_number(web3: &Web3) -> anyhow::Result { + let block_number = web3.eth().block_number().await?.as_u64(); + + Ok(block_number) +} diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index 781ca2d5f4..50c1a6bd15 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -31,7 +31,7 @@ use self::{ received_ops::{sift_outdated_ops, ReceivedPriorityOp}, }; -pub use client::EthHttpClient; +pub use client::{get_web3_block_number, EthHttpClient}; use zksync_config::ZkSyncConfig; use zksync_eth_client::ethereum_gateway::EthereumGateway; @@ -55,7 +55,7 @@ const RATE_LIMIT_DELAY: Duration = Duration::from_secs(30); /// watcher goes into "backoff" mode in which polling is disabled for a /// certain amount of time. #[derive(Debug)] -enum WatcherMode { +pub enum WatcherMode { /// ETHWatcher operates normally. Working, /// Polling is currently disabled. diff --git a/core/bin/zksync_forced_exit_requests/Cargo.toml b/core/bin/zksync_forced_exit_requests/Cargo.toml new file mode 100644 index 0000000000..305251d5dc --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "zksync_forced_exit_requests" +version = "1.0.0" +edition = "2018" +authors = ["The Matter Labs Team "] +homepage = "https://zksync.io/" +repository = "https://github.com/matter-labs/zksync" +license = "Apache-2.0" +keywords = ["blockchain", "zksync"] +categories = ["cryptography"] +publish = false # We don't want to publish our binaries. + +[dependencies] +zksync_types = { path = "../../lib/types", version = "1.0" } +zksync_storage = { path = "../../lib/storage", version = "1.0" } + +zksync_config = { path = "../../lib/config", version = "1.0" } +zksync_contracts = { path = "../../lib/contracts", version = "1.0" } + +zksync_crypto = { path = "../../lib/crypto", version = "1.0" } +zksync_eth_signer = { path = "../../lib/eth_signer", version = "1.0" } +zksync_test_account = { path = "../../tests/test_account", version = "1.0" } + +vlog = { path = "../../lib/vlog", version = "1.0" } + +zksync_core = { path = "../zksync_core", version = "1.0" } +zksync_api = { path = "../zksync_api", version = "1.0" } +actix-web = "3.0.0" +ethabi = "12.0.0" +web3 = "0.13.0" +log = "0.4" +hex = "0.4" +metrics = "0.13.0-alpha.8" +chrono = { version = "0.4", features = ["serde", "rustc-serialize"] } + +tokio = { version = "0.2", features = ["full"] } +anyhow = "1.0" +async-trait = "0.1" + +num = { version = "0.3.1", features = ["serde"] } diff --git a/core/bin/zksync_forced_exit_requests/src/core_interaction_wrapper.rs b/core/bin/zksync_forced_exit_requests/src/core_interaction_wrapper.rs new file mode 100644 index 0000000000..7399961656 --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/core_interaction_wrapper.rs @@ -0,0 +1,200 @@ +use chrono::Utc; +use num::Zero; +use zksync_config::ZkSyncConfig; +use zksync_storage::{chain::operations_ext::records::TxReceiptResponse, ConnectionPool}; +use zksync_types::{ + forced_exit_requests::{ForcedExitRequest, ForcedExitRequestId}, + tx::TxHash, + AccountId, Nonce, +}; + +use zksync_api::{ + api_server::forced_exit_checker::{ForcedExitAccountAgeChecker, ForcedExitChecker}, + core_api_client::CoreApiClient, +}; +use zksync_types::SignedZkSyncTx; + +// We could use `db reset` and test the db the same way as in rust_api +// but it seemed to be an overkill here, so it was decided to use +// traits for unit-testing. Also it gives a much broader level of control +// over what's going on +#[async_trait::async_trait] +pub trait CoreInteractionWrapper { + async fn get_nonce(&self, account_id: AccountId) -> anyhow::Result>; + async fn get_unconfirmed_requests(&self) -> anyhow::Result>; + async fn set_fulfilled_at(&self, id: i64) -> anyhow::Result<()>; + async fn set_fulfilled_by( + &self, + id: ForcedExitRequestId, + value: Option>, + ) -> anyhow::Result<()>; + async fn get_request_by_id(&self, id: i64) -> anyhow::Result>; + async fn get_receipt(&self, tx_hash: TxHash) -> anyhow::Result>; + async fn send_and_save_txs_batch( + &self, + request: &ForcedExitRequest, + txs: Vec, + ) -> anyhow::Result>; + async fn get_oldest_unfulfilled_request(&self) -> anyhow::Result>; + async fn delete_old_unfulfilled_requests( + &self, + deleting_threshold: chrono::Duration, + ) -> anyhow::Result<()>; + async fn check_forced_exit_request(&self, request: &ForcedExitRequest) -> anyhow::Result; +} + +#[derive(Clone)] +pub struct MempoolCoreInteractionWrapper { + core_api_client: CoreApiClient, + connection_pool: ConnectionPool, + forced_exit_checker: ForcedExitChecker, +} + +impl MempoolCoreInteractionWrapper { + pub fn new( + config: ZkSyncConfig, + core_api_client: CoreApiClient, + connection_pool: ConnectionPool, + ) -> Self { + let forced_exit_checker = ForcedExitChecker::new(&config); + Self { + core_api_client, + connection_pool, + forced_exit_checker, + } + } +} + +#[async_trait::async_trait] +impl CoreInteractionWrapper for MempoolCoreInteractionWrapper { + async fn get_nonce(&self, account_id: AccountId) -> anyhow::Result> { + let mut storage = self.connection_pool.access_storage().await?; + let mut account_schema = storage.chain().account_schema(); + + let sender_state = account_schema + .last_committed_state_for_account(account_id) + .await?; + + Ok(sender_state.map(|state| state.nonce)) + } + + async fn get_unconfirmed_requests(&self) -> anyhow::Result> { + let mut storage = self.connection_pool.access_storage().await?; + let mut forced_exit_requests_schema = storage.forced_exit_requests_schema(); + let requests = forced_exit_requests_schema + .get_unconfirmed_requests() + .await?; + + Ok(requests) + } + + async fn set_fulfilled_at(&self, id: i64) -> anyhow::Result<()> { + let mut storage = self.connection_pool.access_storage().await?; + let mut fe_schema = storage.forced_exit_requests_schema(); + + fe_schema.set_fulfilled_at(id, Utc::now()).await?; + + vlog::info!("ForcedExit request with id {} was fulfilled", id); + + Ok(()) + } + + async fn set_fulfilled_by( + &self, + id: ForcedExitRequestId, + value: Option>, + ) -> anyhow::Result<()> { + let mut storage = self.connection_pool.access_storage().await?; + let mut forced_exit_requests_schema = storage.forced_exit_requests_schema(); + forced_exit_requests_schema + .set_fulfilled_by(id, value) + .await?; + + Ok(()) + } + + async fn get_receipt(&self, tx_hash: TxHash) -> anyhow::Result> { + let mut storage = self.connection_pool.access_storage().await?; + let receipt = storage + .chain() + .operations_ext_schema() + .tx_receipt(tx_hash.as_ref()) + .await?; + + Ok(receipt) + } + + async fn get_request_by_id(&self, id: i64) -> anyhow::Result> { + let mut storage = self.connection_pool.access_storage().await?; + let mut fe_schema = storage.forced_exit_requests_schema(); + + let request = fe_schema.get_request_by_id(id).await?; + Ok(request) + } + + async fn send_and_save_txs_batch( + &self, + request: &ForcedExitRequest, + txs: Vec, + ) -> anyhow::Result> { + let mut storage = self.connection_pool.access_storage().await?; + let mut schema = storage.forced_exit_requests_schema(); + + let hashes: Vec = txs.iter().map(|tx| tx.hash()).collect(); + self.core_api_client.send_txs_batch(txs, vec![]).await??; + + schema + .set_fulfilled_by(request.id, Some(hashes.clone())) + .await?; + + Ok(hashes) + } + + async fn get_oldest_unfulfilled_request(&self) -> anyhow::Result> { + let mut storage = self.connection_pool.access_storage().await?; + let request = storage + .forced_exit_requests_schema() + .get_oldest_unfulfilled_request() + .await?; + + Ok(request) + } + + async fn delete_old_unfulfilled_requests( + &self, + deleting_threshold: chrono::Duration, + ) -> anyhow::Result<()> { + let mut storage = self.connection_pool.access_storage().await?; + storage + .forced_exit_requests_schema() + .delete_old_unfulfilled_requests(deleting_threshold) + .await?; + + Ok(()) + } + + async fn check_forced_exit_request(&self, request: &ForcedExitRequest) -> anyhow::Result { + let mut storage = self.connection_pool.access_storage().await?; + let target = request.target; + let eligible = self + .forced_exit_checker + .check_forced_exit(&mut storage, target) + .await?; + + let mut account_schema = storage.chain().account_schema(); + + let target_state = account_schema.account_state_by_address(target).await?; + let target_nonce = target_state.committed.map(|state| state.1.nonce); + + if let Some(nonce) = target_nonce { + // The forced exit is possible is the account is eligile (existed for long enough) + // and its nonce is zero + let possible = nonce.is_zero() && eligible; + Ok(possible) + } else { + // The account does exist. The ForcedExit can not be applied to account + // which does not exist in the network + Ok(false) + } + } +} diff --git a/core/bin/zksync_forced_exit_requests/src/eth_watch.rs b/core/bin/zksync_forced_exit_requests/src/eth_watch.rs new file mode 100644 index 0000000000..4cbc192e25 --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/eth_watch.rs @@ -0,0 +1,731 @@ +use chrono::{DateTime, TimeZone, Utc}; +use ethabi::{Address, Hash}; +use std::{ + convert::TryFrom, + ops::Sub, + time::{Duration, Instant}, +}; +use std::{convert::TryInto, fmt::Debug}; +use tokio::task::JoinHandle; +use tokio::time; +use web3::{ + contract::Contract, + transports::Http, + types::{BlockNumber, FilterBuilder, Log}, + Web3, +}; +use zksync_config::ZkSyncConfig; +use zksync_storage::ConnectionPool; + +use zksync_contracts::forced_exit_contract; +use zksync_types::H160; + +use zksync_api::core_api_client::CoreApiClient; +use zksync_core::eth_watch::{get_web3_block_number, WatcherMode}; +use zksync_types::forced_exit_requests::FundsReceivedEvent; + +use super::prepare_forced_exit_sender::prepare_forced_exit_sender_account; +use crate::{ + core_interaction_wrapper::{CoreInteractionWrapper, MempoolCoreInteractionWrapper}, + forced_exit_sender::MempoolForcedExitSender, +}; + +use super::ForcedExitSender; + +/// As `infura` may limit the requests, upon error we need to wait for a while +/// before repeating the request. +const RATE_LIMIT_DELAY: Duration = Duration::from_secs(30); + +struct ContractTopics { + pub funds_received: Hash, +} + +impl ContractTopics { + fn new(contract: ðabi::Contract) -> Self { + Self { + funds_received: contract + .event("FundsReceived") + .expect("forced_exit contract abi error") + .signature(), + } + } +} + +#[async_trait::async_trait] +pub trait EthClient { + async fn get_funds_received_events( + &self, + from: u64, + to: u64, + ) -> anyhow::Result>; + async fn block_number(&self) -> anyhow::Result; +} + +pub struct EthHttpClient { + web3: Web3, + forced_exit_contract: Contract, + topics: ContractTopics, +} + +impl EthHttpClient { + pub fn new(web3: Web3, zksync_contract_addr: H160) -> Self { + let forced_exit_contract = + Contract::new(web3.eth(), zksync_contract_addr, forced_exit_contract()); + + let topics = ContractTopics::new(forced_exit_contract.abi()); + Self { + forced_exit_contract, + web3, + topics, + } + } + + async fn get_events(&self, from: u64, to: u64, topics: Vec) -> anyhow::Result> + where + T: TryFrom, + T::Error: Debug, + { + let from = BlockNumber::from(from); + let to = BlockNumber::from(to); + get_contract_events( + &self.web3, + self.forced_exit_contract.address(), + from, + to, + topics, + ) + .await + } +} + +#[async_trait::async_trait] +impl EthClient for EthHttpClient { + async fn get_funds_received_events( + &self, + from: u64, + to: u64, + ) -> anyhow::Result> { + let start = Instant::now(); + let result = self + .get_events(from, to, vec![self.topics.funds_received]) + .await; + + metrics::histogram!( + "forced_exit_requests.get_funds_received_events", + start.elapsed() + ); + result + } + + async fn block_number(&self) -> anyhow::Result { + get_web3_block_number(&self.web3).await + } +} + +struct ForcedExitContractWatcher +where + Sender: ForcedExitSender, + Client: EthClient, + Interactor: CoreInteractionWrapper, +{ + core_interaction_wrapper: Interactor, + config: ZkSyncConfig, + eth_client: Client, + last_viewed_block: u64, + forced_exit_sender: Sender, + + mode: WatcherMode, + db_cleanup_interval: chrono::Duration, + last_db_cleanup_time: DateTime, +} + +// Usually blocks are created much slower (at rate 1 block per 10-20s), +// but the block time falls through time, so just to double-check +const MILLIS_PER_BLOCK_LOWER: u64 = 5000; +const MILLIS_PER_BLOCK_UPPER: u64 = 25000; + +// Returns the upper bound of the number of blocks that +// should have been created during the time +fn time_range_to_block_diff(from: DateTime, to: DateTime) -> u64 { + // Timestamps should never be negative + let millis_from: u64 = from.timestamp_millis().try_into().unwrap(); + let millis_to: u64 = to.timestamp_millis().try_into().unwrap(); + + // It does not matter whether to ceil or floor the division + millis_to.saturating_sub(millis_from) / MILLIS_PER_BLOCK_LOWER +} + +// Returns the upper bound of the time that should have +// passed between the block range +fn block_diff_to_time_range(block_from: u64, block_to: u64) -> chrono::Duration { + let block_diff = block_to.saturating_sub(block_from); + + chrono::Duration::milliseconds( + block_diff + .saturating_mul(MILLIS_PER_BLOCK_UPPER) + .try_into() + .unwrap(), + ) +} + +// Lower bound on the time when was the block created +fn lower_bound_block_time(block: u64, current_block: u64) -> DateTime { + let time_diff = block_diff_to_time_range(block, current_block); + + Utc::now().sub(time_diff) +} + +impl ForcedExitContractWatcher +where + Sender: ForcedExitSender, + Client: EthClient, + Interactor: CoreInteractionWrapper, +{ + pub fn new( + core_interaction_wrapper: Interactor, + config: ZkSyncConfig, + eth_client: Client, + forced_exit_sender: Sender, + db_cleanup_interval: chrono::Duration, + ) -> Self { + Self { + core_interaction_wrapper, + config, + eth_client, + forced_exit_sender, + + last_viewed_block: 0, + mode: WatcherMode::Working, + db_cleanup_interval, + // Zero timestamp, has never deleted anything + last_db_cleanup_time: Utc.timestamp(0, 0), + } + } + + pub async fn restore_state_from_eth(&mut self, block: u64) -> anyhow::Result<()> { + let oldest_request = self + .core_interaction_wrapper + .get_oldest_unfulfilled_request() + .await?; + let wait_confirmations = self.config.forced_exit_requests.wait_confirmations; + + // No oldest request means that there are no requests that were possibly ignored + let oldest_request = match oldest_request { + Some(r) => r, + None => { + self.last_viewed_block = block - wait_confirmations; + return Ok(()); + } + }; + + let block_diff = time_range_to_block_diff(oldest_request.created_at, Utc::now()); + let max_possible_viewed_block = block - wait_confirmations; + + // If the last block is too young, then we will use max_possible_viewed_block, + // otherwise we will use block - block_diff + self.last_viewed_block = std::cmp::min(block - block_diff, max_possible_viewed_block); + + Ok(()) + } + + fn is_backoff_requested(&self, error: &anyhow::Error) -> bool { + error.to_string().contains("429 Too Many Requests") + } + + fn enter_backoff_mode(&mut self) { + let backoff_until = Instant::now() + RATE_LIMIT_DELAY; + self.mode = WatcherMode::Backoff(backoff_until); + // This is needed to track how much time is spent in backoff mode + // and trigger grafana alerts + metrics::histogram!( + "forced_exit_requests.eth_watcher.enter_backoff_mode", + RATE_LIMIT_DELAY + ); + } + + fn polling_allowed(&mut self) -> bool { + match self.mode { + WatcherMode::Working => true, + WatcherMode::Backoff(delay_until) => { + if Instant::now() >= delay_until { + vlog::info!("Exiting the backoff mode"); + self.mode = WatcherMode::Working; + true + } else { + // We have to wait more until backoff is disabled. + false + } + } + } + } + + fn handle_infura_error(&mut self, error: anyhow::Error) { + if self.is_backoff_requested(&error) { + vlog::warn!( + "Rate limit was reached, as reported by Ethereum node. \ + Entering the backoff mode" + ); + self.enter_backoff_mode(); + } else { + // Some unexpected kind of error, we won't shutdown the node because of it, + // but rather expect node administrators to handle the situation. + vlog::error!("Failed to process new blocks {}", error); + } + } + + pub async fn delete_expired(&mut self) -> anyhow::Result<()> { + let expiration_time = chrono::Duration::milliseconds( + self.config + .forced_exit_requests + .expiration_period + .try_into() + .expect("Failed to convert expiration period to i64"), + ); + + self.core_interaction_wrapper + .delete_old_unfulfilled_requests(expiration_time) + .await + } + + pub async fn poll(&mut self) { + if !self.polling_allowed() { + // Polling is currently disabled, skip it. + return; + } + + let last_block = match self.eth_client.block_number().await { + Ok(block) => block, + Err(error) => { + self.handle_infura_error(error); + return; + } + }; + + let wait_confirmations = self.config.forced_exit_requests.wait_confirmations; + let last_confirmed_block = last_block.saturating_sub(wait_confirmations); + if last_confirmed_block <= self.last_viewed_block { + return; + }; + + let events = self + .eth_client + .get_funds_received_events(self.last_viewed_block + 1, last_confirmed_block) + .await; + + let events = match events { + Ok(e) => e, + Err(error) => { + self.handle_infura_error(error); + return; + } + }; + + for e in events { + self.forced_exit_sender + .process_request(e.amount, lower_bound_block_time(e.block_number, last_block)) + .await; + } + + self.last_viewed_block = last_confirmed_block; + + if Utc::now().sub(self.db_cleanup_interval) > self.last_db_cleanup_time { + if let Err(err) = self.delete_expired().await { + // If an error during deletion occures we should be notified, however + // it is not a reason to panic or revert the updates from the poll + log::warn!( + "An error occured when deleting the expired requests: {}", + err + ); + } else { + self.last_db_cleanup_time = Utc::now(); + } + } + } + + pub async fn run(mut self) { + // As infura may be not responsive, we want to retry the query until we've actually got the + // block number. + // Normally, however, this loop is not expected to last more than one iteration. + let block = loop { + let block = self.eth_client.block_number().await; + + match block { + Ok(block) => { + break block; + } + Err(error) => { + vlog::warn!( + "Unable to fetch last block number: '{}'. Retrying again in {} seconds", + error, + RATE_LIMIT_DELAY.as_secs() + ); + + time::delay_for(RATE_LIMIT_DELAY).await; + } + } + }; + + // We don't expect rate limiting to happen again + self.restore_state_from_eth(block) + .await + .expect("Failed to restore state for ForcedExit eth_watcher"); + + let mut timer = time::interval(Duration::from_secs(1)); + + loop { + timer.tick().await; + self.poll().await; + } + } +} + +pub fn run_forced_exit_contract_watcher( + core_api_client: CoreApiClient, + connection_pool: ConnectionPool, + config: ZkSyncConfig, +) -> JoinHandle<()> { + let transport = web3::transports::Http::new(&config.eth_client.web3_url[0]).unwrap(); + let web3 = web3::Web3::new(transport); + let eth_client = EthHttpClient::new(web3, config.contracts.forced_exit_addr); + + tokio::spawn(async move { + // We should not proceed if the feature is disabled + if !config.forced_exit_requests.enabled { + infinite_async_loop().await + } + + // It is fine to unwrap here, since without it there is not way we + // can be sure that the forced exit sender will work properly + let id = prepare_forced_exit_sender_account( + connection_pool.clone(), + core_api_client.clone(), + &config, + ) + .await + .unwrap(); + + let core_interaction_wrapper = MempoolCoreInteractionWrapper::new( + config.clone(), + core_api_client, + connection_pool.clone(), + ); + // It is ok to unwrap here, since if forced_exit_sender is not created, then + // the watcher is meaningless + let mut forced_exit_sender = + MempoolForcedExitSender::new(core_interaction_wrapper.clone(), config.clone(), id); + + // In case there were some transactions which were submitted + // but were not committed we will try to wait until they are committed + forced_exit_sender.await_unconfirmed().await.expect( + "Unexpected error while trying to wait for unconfirmed forced_exit transactions", + ); + + let contract_watcher = ForcedExitContractWatcher::new( + core_interaction_wrapper, + config, + eth_client, + forced_exit_sender, + chrono::Duration::minutes(5), + ); + + contract_watcher.run().await; + }) +} + +pub async fn get_contract_events( + web3: &Web3, + contract_address: Address, + from: BlockNumber, + to: BlockNumber, + topics: Vec, +) -> anyhow::Result> +where + T: TryFrom, + T::Error: Debug, +{ + let filter = FilterBuilder::default() + .address(vec![contract_address]) + .from_block(from) + .to_block(to) + .topics(Some(topics), None, None, None) + .build(); + + web3.eth() + .logs(filter) + .await? + .into_iter() + .filter_map(|event| { + if let Ok(event) = T::try_from(event) { + Some(Ok(event)) + } else { + None + } + }) + .collect() +} + +pub async fn infinite_async_loop() { + // We use a 1 day interval instead of a simple loop to free the execution thread + let mut timer = time::interval(Duration::from_secs(60 * 60 * 24)); + loop { + timer.tick().await; + } +} + +#[cfg(test)] +mod test { + use num::{BigUint, FromPrimitive}; + use std::{str::FromStr, sync::Mutex}; + use zksync_config::ZkSyncConfig; + use zksync_types::{forced_exit_requests::ForcedExitRequest, Address, TokenId}; + + use super::*; + use crate::test::{add_request, MockCoreInteractionWrapper}; + + const TEST_FIRST_CURRENT_BLOCK: u64 = 10000000; + struct MockEthClient { + pub events: Vec, + pub current_block_number: u64, + } + + #[async_trait::async_trait] + impl EthClient for MockEthClient { + async fn get_funds_received_events( + &self, + from: u64, + to: u64, + ) -> anyhow::Result> { + let events = self + .events + .iter() + .filter(|&x| x.block_number >= from && x.block_number <= to) + .cloned() + .collect(); + Ok(events) + } + + async fn block_number(&self) -> anyhow::Result { + Ok(self.current_block_number) + } + } + struct DummyForcedExitSender { + pub processed_requests: Mutex)>>, + } + + impl DummyForcedExitSender { + pub fn new() -> Self { + Self { + processed_requests: Mutex::new(vec![]), + } + } + } + + #[async_trait::async_trait] + impl ForcedExitSender for DummyForcedExitSender { + async fn process_request(&self, amount: BigUint, submission_time: DateTime) { + let mut write_lock = self + .processed_requests + .lock() + .expect("Failed to get write lock for processed_requests"); + (*write_lock).push((amount, submission_time)); + } + } + + type TestForcedExitContractWatcher = + ForcedExitContractWatcher; + + fn get_test_forced_exit_contract_watcher() -> TestForcedExitContractWatcher { + let core_interaction_wrapper = MockCoreInteractionWrapper::default(); + let config = ZkSyncConfig::from_env(); + let eth_client = MockEthClient { + events: vec![], + current_block_number: TEST_FIRST_CURRENT_BLOCK, + }; + let forced_exit_sender = DummyForcedExitSender::new(); + + ForcedExitContractWatcher::new( + core_interaction_wrapper, + config, + eth_client, + forced_exit_sender, + chrono::Duration::minutes(5), + ) + } + // Unfortunately, I had to forcefully silence clippy due to + // https://github.com/rust-lang/rust-clippy/issues/6446 + // The mutexes are used only in testing, so it does not undermine unit-testing. + #[allow(clippy::await_holding_lock)] + #[tokio::test] + async fn test_watcher_deleting_old_requests() { + let week = chrono::Duration::weeks(1); + let three_days = chrono::Duration::days(3); + + let mut watcher = get_test_forced_exit_contract_watcher(); + + let old_request = ForcedExitRequest { + id: 1, + target: Address::random(), + tokens: vec![TokenId(0)], + price_in_wei: BigUint::from_i64(12).unwrap(), + valid_until: Utc::now().sub(week), + // Outdated by far + created_at: Utc::now().sub(week).sub(three_days), + fulfilled_at: None, + fulfilled_by: None, + }; + + add_request( + &watcher.core_interaction_wrapper.requests, + old_request.clone(), + ); + + watcher + .restore_state_from_eth(TEST_FIRST_CURRENT_BLOCK) + .await + .expect("Failed to restore state from eth"); + + watcher.poll().await; + + let requests_lock_deleted = watcher.core_interaction_wrapper.requests.lock().unwrap(); + // The old request should have been deleted + assert_eq!(requests_lock_deleted.len(), 0); + // Need to do this to drop mutex + drop(requests_lock_deleted); + + add_request(&watcher.core_interaction_wrapper.requests, old_request); + watcher.poll().await; + + let requests_lock_stored = watcher.core_interaction_wrapper.requests.lock().unwrap(); + // Not enough time has passed. The request should not be deleted + assert_eq!(requests_lock_stored.len(), 1); + } + + #[tokio::test] + async fn test_watcher_restore_state() { + // This test should not depend on the constants or the way + // that the last calculated block works. This test is more of a sanity check: + // that both wait_confirmations and the time of creation of the oldest unfulfilled request + // is taken into account + + let confirmations_time = ZkSyncConfig::from_env() + .forced_exit_requests + .wait_confirmations; + + // Case 1. No requests => choose the youngest stable block + let mut watcher = get_test_forced_exit_contract_watcher(); + + watcher + .restore_state_from_eth(TEST_FIRST_CURRENT_BLOCK) + .await + .expect("Failed to restore state from ethereum"); + + assert_eq!( + watcher.last_viewed_block, + TEST_FIRST_CURRENT_BLOCK - confirmations_time + ); + + // Case 2. Very young requests => choose the youngest stable block + let mut watcher = get_test_forced_exit_contract_watcher(); + watcher.core_interaction_wrapper.requests = Mutex::new(vec![ForcedExitRequest { + id: 1, + target: Address::random(), + tokens: vec![TokenId(0)], + price_in_wei: BigUint::from_i64(12).unwrap(), + // does not matter in these tests + valid_until: Utc::now(), + // millisecond ago is quite young + created_at: Utc::now().sub(chrono::Duration::milliseconds(1)), + fulfilled_at: None, + fulfilled_by: None, + }]); + + watcher + .restore_state_from_eth(TEST_FIRST_CURRENT_BLOCK) + .await + .expect("Failed to restore state from ethereum"); + + assert_eq!( + watcher.last_viewed_block, + TEST_FIRST_CURRENT_BLOCK - confirmations_time + ); + + // Case 3. Very old requests => choose the old stable block + let mut watcher = get_test_forced_exit_contract_watcher(); + watcher.core_interaction_wrapper.requests = Mutex::new(vec![ForcedExitRequest { + id: 1, + target: Address::random(), + tokens: vec![TokenId(0)], + price_in_wei: BigUint::from_i64(12).unwrap(), + // does not matter in these tests + valid_until: Utc::now(), + // 1 week ago is quite old + created_at: Utc::now().sub(chrono::Duration::weeks(1)), + fulfilled_at: None, + fulfilled_by: None, + }]); + + watcher + .restore_state_from_eth(TEST_FIRST_CURRENT_BLOCK) + .await + .expect("Failed to restore state from ethereum"); + + assert!(watcher.last_viewed_block < TEST_FIRST_CURRENT_BLOCK - confirmations_time); + } + + #[tokio::test] + async fn test_watcher_processing_requests() { + // Here we have to test that events are processed + + let mut watcher = get_test_forced_exit_contract_watcher(); + + let wait_confirmations = 5; + watcher.config.forced_exit_requests.wait_confirmations = wait_confirmations; + + watcher.eth_client.events = vec![ + FundsReceivedEvent { + // Should be processed + amount: BigUint::from_str("1000000001").unwrap(), + block_number: TEST_FIRST_CURRENT_BLOCK - 2 * wait_confirmations, + }, + FundsReceivedEvent { + amount: BigUint::from_str("1000000002").unwrap(), + // Should be processed + block_number: TEST_FIRST_CURRENT_BLOCK - wait_confirmations - 1, + }, + FundsReceivedEvent { + amount: BigUint::from_str("1000000003").unwrap(), + // Should not be processed + block_number: TEST_FIRST_CURRENT_BLOCK - 1, + }, + ]; + + // 100 is just some small block number + watcher + .restore_state_from_eth(100) + .await + .expect("Failed to restore state from eth"); + + // Now it seems like a lot of new blocks have been created + watcher.eth_client.current_block_number = TEST_FIRST_CURRENT_BLOCK; + + watcher.poll().await; + + let processed_requests = watcher + .forced_exit_sender + .processed_requests + .lock() + .unwrap(); + + // The order does not really matter, but it is how it works in production + // and it is easier to test this way + assert_eq!(processed_requests.len(), 2); + assert_eq!( + processed_requests[0].0, + BigUint::from_str("1000000001").unwrap() + ); + assert_eq!( + processed_requests[1].0, + BigUint::from_str("1000000002").unwrap() + ); + } +} diff --git a/core/bin/zksync_forced_exit_requests/src/forced_exit_sender.rs b/core/bin/zksync_forced_exit_requests/src/forced_exit_sender.rs new file mode 100644 index 0000000000..b68c9f2111 --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/forced_exit_sender.rs @@ -0,0 +1,388 @@ +use std::ops::AddAssign; + +use chrono::{DateTime, Utc}; +use num::BigUint; +use tokio::time; + +use zksync_config::ZkSyncConfig; + +use zksync_types::{ + forced_exit_requests::ForcedExitRequest, tx::TimeRange, tx::TxHash, AccountId, Address, Nonce, + TokenId, ZkSyncTx, +}; + +use zksync_types::ForcedExit; +use zksync_types::SignedZkSyncTx; + +use crate::{core_interaction_wrapper::CoreInteractionWrapper, utils}; + +use super::utils::{Engine, PrivateKey}; +use crate::utils::read_signing_key; + +// We try to process a request 3 times before sending warnings in the console +const PROCESSING_ATTEMPTS: u32 = 3; + +#[async_trait::async_trait] +pub trait ForcedExitSender { + async fn process_request(&self, amount: BigUint, submission_time: DateTime); +} + +pub struct MempoolForcedExitSender { + core_interaction_wrapper: T, + config: ZkSyncConfig, + forced_exit_sender_account_id: AccountId, + sender_private_key: PrivateKey, +} + +#[async_trait::async_trait] +impl ForcedExitSender for MempoolForcedExitSender { + async fn process_request(&self, amount: BigUint, submission_time: DateTime) { + let mut attempts: u32 = 0; + // Typically this should not run any longer than 1 iteration + // In case something bad happens we do not want the server crush because + // of the forced_exit_requests component + loop { + let processing_attempt = self + .try_process_request(amount.clone(), submission_time) + .await; + + if processing_attempt.is_ok() { + return; + } else { + attempts += 1; + } + + if attempts >= PROCESSING_ATTEMPTS { + // We should not get stuck processing requests that possibly could never be processed + break; + } + } + } +} + +impl MempoolForcedExitSender { + pub fn new( + core_interaction_wrapper: T, + config: ZkSyncConfig, + forced_exit_sender_account_id: AccountId, + ) -> Self { + let sender_private_key = hex::decode(&config.forced_exit_requests.sender_private_key[2..]) + .expect("Decoding private key failed"); + let sender_private_key = + read_signing_key(&sender_private_key).expect("Reading private key failed"); + + Self { + core_interaction_wrapper, + forced_exit_sender_account_id, + config, + sender_private_key, + } + } + + pub fn build_forced_exit( + &self, + nonce: Nonce, + target: Address, + token: TokenId, + ) -> SignedZkSyncTx { + let tx = ForcedExit::new_signed( + self.forced_exit_sender_account_id, + target, + token, + BigUint::from(0u32), + nonce, + TimeRange::default(), + &self.sender_private_key, + ) + .expect("Failed to create signed ForcedExit transaction"); + + SignedZkSyncTx { + tx: ZkSyncTx::ForcedExit(Box::new(tx)), + eth_sign_data: None, + } + } + + pub async fn build_transactions( + &self, + // storage: &mut StorageProcessor<'_>, + fe_request: ForcedExitRequest, + ) -> anyhow::Result> { + let mut sender_nonce = self + .core_interaction_wrapper + .get_nonce(self.forced_exit_sender_account_id) + .await? + .expect("Forced Exit sender account does not have nonce"); + + let mut transactions: Vec = vec![]; + + for token in fe_request.tokens.into_iter() { + transactions.push(self.build_forced_exit(sender_nonce, fe_request.target, token)); + sender_nonce.add_assign(1); + } + + Ok(transactions) + } + + // Returns the id the request if it should be fulfilled, + // error otherwise + pub fn check_request( + &self, + amount: BigUint, + submission_time: DateTime, + request: Option, + ) -> bool { + let request = match request { + Some(r) => r, + None => { + // The request does not exit, we should not process it + return false; + } + }; + + if request.fulfilled_at.is_some() { + // We should not re-process requests that were fulfilled before + return false; + } + + request.valid_until > submission_time && request.price_in_wei == amount + } + + // Awaits until the request is complete + pub async fn await_unconfirmed_request( + &self, + request: &ForcedExitRequest, + ) -> anyhow::Result<()> { + let hashes = request.fulfilled_by.clone(); + + if let Some(hashes) = hashes { + for hash in hashes.into_iter() { + self.wait_until_comitted(hash).await?; + self.core_interaction_wrapper + .set_fulfilled_at(request.id) + .await?; + } + } + Ok(()) + } + + pub async fn await_unconfirmed(&mut self) -> anyhow::Result<()> { + let unfullied_requests = self + .core_interaction_wrapper + .get_unconfirmed_requests() + .await?; + + for request in unfullied_requests.into_iter() { + let await_result = self.await_unconfirmed_request(&request).await; + + if await_result.is_err() { + // A transaction has failed. That is not intended. + // We can safely cancel such transaction, since we will re-try to + // send it again later + vlog::error!( + "A previously sent forced exit transaction has failed. Canceling the tx." + ); + self.core_interaction_wrapper + .set_fulfilled_by(request.id, None) + .await?; + } + } + + Ok(()) + } + + pub async fn wait_until_comitted(&self, tx_hash: TxHash) -> anyhow::Result<()> { + let timeout_millis: u64 = 120000; + let poll_interval_millis: u64 = 200; + let poll_interval = time::Duration::from_millis(poll_interval_millis); + let mut timer = time::interval(poll_interval); + + let mut time_passed: u64 = 0; + + loop { + if time_passed >= timeout_millis { + // If a transaction takes more than 2 minutes to commit we consider the server + // broken and panic + panic!("Comitting ForcedExit transaction failed!"); + } + + let receipt = self.core_interaction_wrapper.get_receipt(tx_hash).await?; + + if let Some(tx_receipt) = receipt { + if tx_receipt.success { + return Ok(()); + } else { + return Err(anyhow::Error::msg("ForcedExit transaction failed")); + } + } + + timer.tick().await; + time_passed += poll_interval_millis; + } + } + + pub async fn try_process_request( + &self, + amount: BigUint, + submission_time: DateTime, + ) -> anyhow::Result<()> { + let (id, amount) = utils::extract_id_from_amount( + amount, + self.config.forced_exit_requests.digits_in_id as u32, + ); + + let fe_request = self.core_interaction_wrapper.get_request_by_id(id).await?; + + let fe_request = if self.check_request(amount, submission_time, fe_request.clone()) { + // The self.check_request already checked that the fe_request is Some(_) + fe_request.unwrap() + } else { + // The request was not valid, that's fine + return Ok(()); + }; + + let txs = self.build_transactions(fe_request.clone()).await?; + + // Right before sending the transactions we must check if the request is possible at all + let is_request_possible = self + .core_interaction_wrapper + .check_forced_exit_request(&fe_request) + .await?; + if !is_request_possible { + // If not possible at all, return without sending any transactions + return Ok(()); + } + let hashes = self + .core_interaction_wrapper + .send_and_save_txs_batch(&fe_request, txs) + .await?; + + // We wait only for the first transaction to complete since the transactions + // are sent in a batch + self.wait_until_comitted(hashes[0]).await?; + self.core_interaction_wrapper.set_fulfilled_at(id).await?; + + Ok(()) + } +} +#[cfg(test)] +mod test { + use std::{ + ops::{Add, Mul}, + str::FromStr, + }; + + use zksync_config::ForcedExitRequestsConfig; + + use super::*; + use crate::test::{add_request, MockCoreInteractionWrapper}; + + // Just a random number for tests + const TEST_ACCOUNT_FORCED_EXIT_SENDER_ID: u32 = 12; + + fn get_test_forced_exit_sender( + config: Option, + ) -> MempoolForcedExitSender { + let core_interaction_wrapper = MockCoreInteractionWrapper::default(); + + let config = config.unwrap_or_else(ZkSyncConfig::from_env); + + MempoolForcedExitSender::new( + core_interaction_wrapper, + config, + AccountId(TEST_ACCOUNT_FORCED_EXIT_SENDER_ID), + ) + } + + #[tokio::test] + async fn test_forced_exit_sender() { + let day = chrono::Duration::days(1); + + let config = ZkSyncConfig::from_env(); + let forced_exit_requests = ForcedExitRequestsConfig { + // There must be 10 digits in id + digits_in_id: 10, + ..config.forced_exit_requests + }; + let config = ZkSyncConfig { + forced_exit_requests, + ..config + }; + + let forced_exit_sender = get_test_forced_exit_sender(Some(config)); + + add_request( + &forced_exit_sender.core_interaction_wrapper.requests, + ForcedExitRequest { + id: 12, + target: Address::random(), + tokens: vec![TokenId(1)], + price_in_wei: BigUint::from_str("10000000000").unwrap(), + valid_until: Utc::now().add(day), + created_at: Utc::now(), + fulfilled_by: None, + fulfilled_at: None, + }, + ); + + // Not the right amount, because not enough zeroes + forced_exit_sender + .process_request(BigUint::from_str("1000000012").unwrap(), Utc::now()) + .await; + assert_eq!( + forced_exit_sender + .core_interaction_wrapper + .sent_txs + .lock() + .unwrap() + .len(), + 0 + ); + + // Not the right amount, because id is not correct + forced_exit_sender + .process_request(BigUint::from_str("10000000001").unwrap(), Utc::now()) + .await; + assert_eq!( + forced_exit_sender + .core_interaction_wrapper + .sent_txs + .lock() + .unwrap() + .len(), + 0 + ); + + // The tranasction is correct, buuut it is expired + forced_exit_sender + .process_request( + BigUint::from_str("10000000001").unwrap(), + Utc::now().add(day.mul(3)), + ) + .await; + + assert_eq!( + forced_exit_sender + .core_interaction_wrapper + .sent_txs + .lock() + .unwrap() + .len(), + 0 + ); + + // The transaction is correct + forced_exit_sender + .process_request(BigUint::from_str("10000000012").unwrap(), Utc::now()) + .await; + + assert_eq!( + forced_exit_sender + .core_interaction_wrapper + .sent_txs + .lock() + .unwrap() + .len(), + 1 + ); + } +} diff --git a/core/bin/zksync_forced_exit_requests/src/lib.rs b/core/bin/zksync_forced_exit_requests/src/lib.rs new file mode 100644 index 0000000000..1588238edc --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/lib.rs @@ -0,0 +1,25 @@ +use tokio::task::JoinHandle; +use zksync_config::ZkSyncConfig; +use zksync_storage::ConnectionPool; + +use zksync_api::core_api_client::CoreApiClient; + +use forced_exit_sender::ForcedExitSender; + +mod core_interaction_wrapper; +pub mod eth_watch; +pub mod forced_exit_sender; +pub mod prepare_forced_exit_sender; +mod utils; + +#[cfg(test)] +pub mod test; + +#[must_use] +pub fn run_forced_exit_requests_actors( + pool: ConnectionPool, + config: ZkSyncConfig, +) -> JoinHandle<()> { + let core_api_client = CoreApiClient::new(config.api.private.url.clone()); + eth_watch::run_forced_exit_contract_watcher(core_api_client, pool, config) +} diff --git a/core/bin/zksync_forced_exit_requests/src/prepare_forced_exit_sender.rs b/core/bin/zksync_forced_exit_requests/src/prepare_forced_exit_sender.rs new file mode 100644 index 0000000000..8afa2163ce --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/prepare_forced_exit_sender.rs @@ -0,0 +1,217 @@ +use num::BigUint; +use std::time::Duration; +use zksync_config::ZkSyncConfig; +use zksync_storage::{ + chain::operations_ext::records::TxReceiptResponse, ConnectionPool, StorageProcessor, +}; + +use zksync_api::core_api_client::CoreApiClient; +use zksync_types::{ + tx::{ChangePubKeyType, TimeRange, TxHash}, + AccountId, Address, PubKeyHash, ZkSyncTx, H256, +}; + +use zksync_types::{Nonce, SignedZkSyncTx, TokenId}; + +use zksync_crypto::franklin_crypto::eddsa::PrivateKey; + +use tokio::time; + +use zksync_test_account::{ZkSyncAccount, ZkSyncETHAccountData}; + +use super::utils::{read_signing_key, Engine}; + +pub async fn prepare_forced_exit_sender_account( + connection_pool: ConnectionPool, + api_client: CoreApiClient, + config: &ZkSyncConfig, +) -> anyhow::Result { + let mut storage = connection_pool + .access_storage() + .await + .expect("forced_exit_requests: Failed to get the connection to storage"); + + let sender_sk = hex::decode(&config.forced_exit_requests.sender_private_key[2..]) + .expect("Failed to decode forced_exit_sender sk"); + let sender_sk = read_signing_key(&sender_sk).expect("Failed to read forced exit sender sk"); + let sender_address = config.forced_exit_requests.sender_account_address; + let sender_eth_private_key = config.forced_exit_requests.sender_eth_private_key; + + let is_sender_prepared = + check_forced_exit_sender_prepared(&mut storage, &sender_sk, sender_address) + .await + .expect("Failed to check if the sender is prepared"); + + if let Some(id) = is_sender_prepared { + return Ok(id); + } + + // The sender is not prepared. This should not ever happen in production, but handling + // such step is vital for testing locally. + + // Waiting until the sender has an id (sending funds to the account should be done by an external script) + let id = wait_for_account_id(&mut storage, sender_address) + .await + .expect("Failed to get account id for forced exit sender"); + + register_signing_key( + &mut storage, + id, + api_client, + sender_address, + sender_eth_private_key, + sender_sk, + ) + .await?; + + Ok(id) +} + +pub async fn check_forced_exit_sender_prepared( + storage: &mut StorageProcessor<'_>, + sender_sk: &PrivateKey, + sender_address: Address, +) -> anyhow::Result> { + let mut accounts_schema = storage.chain().account_schema(); + + let state = accounts_schema + .account_state_by_address(sender_address) + .await? + .committed; + + match state { + Some(account_state) => { + let pk_hash = account_state.1.pub_key_hash; + + let sk_pub_key_hash = PubKeyHash::from_privkey(sender_sk); + + if pk_hash == sk_pub_key_hash { + Ok(Some(account_state.0)) + } else { + Ok(None) + } + } + None => Ok(None), + } +} + +pub async fn wait_for_account_id( + storage: &mut StorageProcessor<'_>, + sender_address: Address, +) -> anyhow::Result { + vlog::info!("Forced exit sender account is not yet prepared. Waiting for account id..."); + + let mut account_schema = storage.chain().account_schema(); + let mut timer = time::interval(Duration::from_secs(1)); + + loop { + let account_id = account_schema.account_id_by_address(sender_address).await?; + + match account_id { + Some(id) => { + vlog::info!("Forced exit sender account has account id = {}", 1); + return Ok(id); + } + None => { + timer.tick().await; + } + } + } +} + +async fn get_receipt( + storage: &mut StorageProcessor<'_>, + tx_hash: TxHash, +) -> anyhow::Result> { + storage + .chain() + .operations_ext_schema() + .tx_receipt(tx_hash.as_ref()) + .await +} + +pub async fn wait_for_change_pub_key_tx( + storage: &mut StorageProcessor<'_>, + tx_hash: TxHash, +) -> anyhow::Result<()> { + vlog::info!( + "Forced exit sender account is not yet prepared. Waiting for public key to be set..." + ); + + let mut timer = time::interval(Duration::from_secs(1)); + + loop { + let tx_receipt = get_receipt(storage, tx_hash) + .await + .expect("Faield t oget the traecipt pf ChangePubKey transaction"); + + match tx_receipt { + Some(receipt) => { + if receipt.success { + vlog::info!("Public key of the forced exit sender successfully set"); + return Ok(()); + } else { + let fail_reason = receipt + .fail_reason + .unwrap_or_else(|| String::from("unknown")); + panic!( + "Failed to set public key for forced exit sedner. Reason: {}", + fail_reason + ); + } + } + None => { + timer.tick().await; + } + } + } +} + +pub async fn register_signing_key( + storage: &mut StorageProcessor<'_>, + sender_id: AccountId, + api_client: CoreApiClient, + sender_address: Address, + sender_eth_private_key: H256, + sender_sk: PrivateKey, +) -> anyhow::Result<()> { + let eth_account_data = ZkSyncETHAccountData::EOA { + eth_private_key: sender_eth_private_key, + }; + + let sender_account = ZkSyncAccount::new( + sender_sk, + // The account is changing public key for the first time, so nonce is 0 + Nonce(0), + sender_address, + eth_account_data, + ); + sender_account.set_account_id(Some(sender_id)); + + let cpk = sender_account.sign_change_pubkey_tx( + Some(Nonce(0)), + true, + TokenId(0), + BigUint::from(0u8), + ChangePubKeyType::ECDSA, + TimeRange::default(), + ); + + let tx = ZkSyncTx::ChangePubKey(Box::new(cpk)); + let tx_hash = tx.hash(); + + api_client + .send_tx(SignedZkSyncTx { + tx, + eth_sign_data: None, + }) + .await + .expect("Failed to send CPK transaction") + .expect("Failed to send"); + + wait_for_change_pub_key_tx(storage, tx_hash) + .await + .expect("Failed to wait for ChangePubKey tx"); + + Ok(()) +} diff --git a/core/bin/zksync_forced_exit_requests/src/test.rs b/core/bin/zksync_forced_exit_requests/src/test.rs new file mode 100644 index 0000000000..cbbd155c4f --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/test.rs @@ -0,0 +1,177 @@ +use std::{ops::Sub, sync::Mutex}; + +use chrono::Utc; +use zksync_storage::chain::operations_ext::records::TxReceiptResponse; +use zksync_types::Nonce; +use zksync_types::{ + forced_exit_requests::{ForcedExitRequest, ForcedExitRequestId}, + tx::TxHash, + AccountId, SignedZkSyncTx, +}; + +use super::core_interaction_wrapper::CoreInteractionWrapper; + +pub struct MockCoreInteractionWrapper { + pub nonce: Nonce, + pub requests: Mutex>, + pub tx_receipt: Option, + pub sent_txs: Mutex>, + // It is easier when keeping track of the deleted txs + pub deleted_requests: Mutex>, +} + +impl Default for MockCoreInteractionWrapper { + fn default() -> Self { + Self { + nonce: Nonce(0), + requests: Mutex::new(vec![]), + tx_receipt: Some(TxReceiptResponse { + // All the values here don't matter except for success = true + tx_hash: String::from("1212"), + block_number: 120, + success: true, + verified: false, + fail_reason: None, + prover_run: None, + }), + sent_txs: Mutex::new(vec![]), + deleted_requests: Mutex::new(vec![]), + } + } +} + +impl MockCoreInteractionWrapper { + fn lock_requests(&self) -> std::sync::MutexGuard<'_, Vec> { + self.requests.lock().expect("Failed to get the write lock") + } + + fn get_request_index_by_id(&self, id: ForcedExitRequestId) -> anyhow::Result { + let lock = self.lock_requests(); + + let index_and_request = (*lock).iter().enumerate().find(|(_, item)| item.id == id); + + let index_option = index_and_request.map(|(index, _)| index); + + index_option.ok_or_else(|| anyhow::Error::msg("Element not found")) + } + + fn lock_sent_txs(&self) -> std::sync::MutexGuard<'_, Vec> { + self.sent_txs.lock().expect("Failed to get the write lock") + } + + fn lock_deleted_requests(&self) -> std::sync::MutexGuard<'_, Vec> { + self.deleted_requests + .lock() + .expect("Failed to allocate deleted requests") + } +} + +#[async_trait::async_trait] +impl CoreInteractionWrapper for MockCoreInteractionWrapper { + async fn get_nonce(&self, _account_id: AccountId) -> anyhow::Result> { + Ok(Some(self.nonce)) + } + async fn get_unconfirmed_requests(&self) -> anyhow::Result> { + let requests = self.lock_requests(); + + let unconfirmed_requests = requests + .iter() + .filter(|r| r.fulfilled_at.is_none()) + .cloned() + .collect(); + + Ok(unconfirmed_requests) + } + async fn set_fulfilled_at(&self, id: i64) -> anyhow::Result<()> { + let index = self.get_request_index_by_id(id)?; + let mut requests = self.lock_requests(); + + requests[index].fulfilled_at = Some(Utc::now()); + + Ok(()) + } + async fn set_fulfilled_by( + &self, + id: ForcedExitRequestId, + value: Option>, + ) -> anyhow::Result<()> { + let index = self.get_request_index_by_id(id)?; + let mut requests = self.lock_requests(); + + requests[index].fulfilled_by = value; + + Ok(()) + } + async fn get_request_by_id(&self, id: i64) -> anyhow::Result> { + let index = self.get_request_index_by_id(id); + + match index { + Ok(i) => { + let requests = self.lock_requests(); + Ok(Some(requests[i].clone())) + } + Err(_) => Ok(None), + } + } + + async fn get_receipt(&self, _tx_hash: TxHash) -> anyhow::Result> { + Ok(self.tx_receipt.clone()) + } + + async fn send_and_save_txs_batch( + &self, + request: &ForcedExitRequest, + mut txs: Vec, + ) -> anyhow::Result> { + let hashes: Vec = txs.iter().map(|tx| tx.hash()).collect(); + + self.lock_sent_txs().append(&mut txs); + + self.set_fulfilled_by(request.id, Some(hashes.clone())) + .await?; + + Ok(hashes) + } + + async fn get_oldest_unfulfilled_request(&self) -> anyhow::Result> { + let requests = self.lock_requests(); + let unfulfilled_requests = requests.iter().filter(|r| r.fulfilled_by.is_none()); + let oldest = unfulfilled_requests.min_by_key(|req| req.created_at); + + Ok(oldest.cloned()) + } + + async fn delete_old_unfulfilled_requests( + &self, + deleting_threshold: chrono::Duration, + ) -> anyhow::Result<()> { + let mut requests = self.lock_requests(); + let mut deleted_requests = self.lock_deleted_requests(); + + let oldest_allowed = Utc::now().sub(deleting_threshold); + let (mut to_delete, mut to_remain): (Vec<_>, Vec<_>) = requests + .iter() + .cloned() + .partition(|req| req.valid_until < oldest_allowed); + + requests.clear(); + requests.append(&mut to_remain); + + deleted_requests.append(&mut to_delete); + Ok(()) + } + + async fn check_forced_exit_request( + &self, + _request: &ForcedExitRequest, + ) -> anyhow::Result { + // For tests it is better to just return true all the time + Ok(true) + } +} + +pub fn add_request(requests: &Mutex>, new_request: ForcedExitRequest) { + let mut lock = requests.lock().unwrap(); + + lock.push(new_request); +} diff --git a/core/bin/zksync_forced_exit_requests/src/utils.rs b/core/bin/zksync_forced_exit_requests/src/utils.rs new file mode 100644 index 0000000000..3601defcbf --- /dev/null +++ b/core/bin/zksync_forced_exit_requests/src/utils.rs @@ -0,0 +1,85 @@ +use std::{convert::TryInto, ops::Sub}; + +use num::BigUint; +use num::FromPrimitive; +use zksync_crypto::ff::PrimeField; +pub use zksync_crypto::franklin_crypto::{eddsa::PrivateKey, jubjub::JubjubEngine}; + +pub use zksync_crypto::franklin_crypto::{ + alt_babyjubjub::fs::FsRepr, + bellman::{pairing::bn256, PrimeFieldRepr}, +}; + +pub type Engine = bn256::Bn256; + +pub type Fs = ::Fs; + +pub fn read_signing_key(private_key: &[u8]) -> anyhow::Result> { + let mut fs_repr = FsRepr::default(); + fs_repr.read_be(private_key)?; + Ok(PrivateKey::( + Fs::from_repr(fs_repr).expect("couldn't read private key from repr"), + )) +} + +pub fn extract_id_from_amount(amount: BigUint, digits_in_id: u32) -> (i64, BigUint) { + let id_space_size: i64 = 10_i64.pow(digits_in_id); + + let id_space_size = BigUint::from_i64(id_space_size).unwrap(); + + // Taking to the power of 1 and finding mod + // is the only way to find mod of BigUint + let one = BigUint::from_u8(1u8).unwrap(); + let id = amount.modpow(&one, &id_space_size); + + // After extracting the id we need to delete it + // to make sure that amount is the same as in the db + let amount = amount.sub(&id); + + (id.try_into().unwrap(), amount) +} + +#[cfg(test)] +mod test { + use std::ops::Add; + use std::str::FromStr; + + use num::Zero; + + use super::*; + + fn test_extraction_for_id_amount( + amount: BigUint, + digits_in_id: u32, + expected_id: i64, + expected_amount: BigUint, + ) { + let (id, remain_amount) = extract_id_from_amount(amount, digits_in_id); + + assert_eq!(id, expected_id); + assert_eq!(remain_amount, expected_amount); + } + + #[test] + fn test_extract_id_from_amount() { + // Basic extraction + test_extraction_for_id_amount( + BigUint::from_str("12211").unwrap(), + 3, + 211, + BigUint::from_str("12000").unwrap(), + ); + + // Note that there are not enough digits in the sent amount + // Thus the amount should be equal to id + test_extraction_for_id_amount(BigUint::from_str("11").unwrap(), 3, 11, BigUint::zero()); + + // Here we test with some really large number, which could not possible + // fit into 2^64 + let ten = BigUint::from_str("10").unwrap(); + let id: u32 = 211; + let expected_amount = ten.pow(100); + let amount = expected_amount.clone().add(id); + test_extraction_for_id_amount(amount, 3, id.try_into().unwrap(), expected_amount); + } +} diff --git a/core/lib/api_client/src/rest/forced_exit_requests/mod.rs b/core/lib/api_client/src/rest/forced_exit_requests/mod.rs new file mode 100644 index 0000000000..eb38fb4c3f --- /dev/null +++ b/core/lib/api_client/src/rest/forced_exit_requests/mod.rs @@ -0,0 +1,65 @@ +//! Blocks part of API implementation. + +// Built-in uses + +// External uses +use serde::{Deserialize, Serialize}; + +// Workspace uses +use zksync_types::{forced_exit_requests::ForcedExitRequest, Address, TokenId}; +use zksync_utils::BigUintSerdeAsRadix10Str; + +use num::BigUint; + +// Local uses +use crate::rest::v1::Client; +use crate::rest::v1::ClientResult; + +// Data transfer objects. +#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[serde(rename_all = "camelCase")] +pub struct ConfigInfo { + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub request_fee: BigUint, + pub max_tokens_per_request: u8, + pub recomended_tx_interval_millis: i64, + pub forced_exit_contract_address: Address, + pub wait_confirmations: u64, +} + +#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[serde(tag = "status", rename_all = "camelCase")] +pub enum ForcedExitRequestStatus { + Enabled(ConfigInfo), + Disabled, +} + +#[derive(Deserialize, Serialize)] +pub struct ForcedExitRegisterRequest { + pub target: Address, + pub tokens: Vec, + // Even though the price is constant, we still need to specify it, + // since the price might change (with config) + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub price_in_wei: BigUint, +} + +const FORCED_EXIT_REQUESTS_SCOPE: &str = "/api/forced_exit_requests/v0.1/"; + +impl Client { + pub async fn get_forced_exit_requests_status(&self) -> ClientResult { + self.get_with_scope(FORCED_EXIT_REQUESTS_SCOPE, "status") + .send() + .await + } + + pub async fn submit_forced_exit_request( + &self, + regiter_request: ForcedExitRegisterRequest, + ) -> ClientResult { + self.post_with_scope(FORCED_EXIT_REQUESTS_SCOPE, "submit") + .body(®iter_request) + .send() + .await + } +} diff --git a/core/lib/api_client/src/rest/mod.rs b/core/lib/api_client/src/rest/mod.rs index a3a6d96c3f..cf50baa86b 100644 --- a/core/lib/api_client/src/rest/mod.rs +++ b/core/lib/api_client/src/rest/mod.rs @@ -1 +1,2 @@ +pub mod forced_exit_requests; pub mod v1; diff --git a/core/lib/api_client/src/rest/v1/client.rs b/core/lib/api_client/src/rest/v1/client.rs index e6c30bbb37..733934ef81 100644 --- a/core/lib/api_client/src/rest/v1/client.rs +++ b/core/lib/api_client/src/rest/v1/client.rs @@ -52,6 +52,8 @@ pub struct Client { url: String, } +const API_V1_SCOPE: &str = "/api/v1/"; + impl Client { /// Creates a new REST API client with the specified Url. pub fn new(url: String) -> Self { @@ -61,13 +63,21 @@ impl Client { } } - fn endpoint(&self, method: &str) -> String { - [&self.url, "/api/v1/", method].concat() + fn endpoint(&self, scope: &str, method: &str) -> String { + [&self.url, scope, method].concat() } /// Constructs GET request for the specified method. pub(crate) fn get(&self, method: impl AsRef) -> ClientRequestBuilder { - let url = self.endpoint(method.as_ref()); + self.get_with_scope(API_V1_SCOPE, method) + } + + pub(crate) fn get_with_scope( + &self, + scope: impl AsRef, + method: impl AsRef, + ) -> ClientRequestBuilder { + let url = self.endpoint(scope.as_ref(), method.as_ref()); ClientRequestBuilder { inner: self.inner.get(&url), url, @@ -76,7 +86,15 @@ impl Client { /// Constructs POST request for the specified method. pub(crate) fn post(&self, method: impl AsRef) -> ClientRequestBuilder { - let url = self.endpoint(method.as_ref()); + self.post_with_scope(API_V1_SCOPE, method) + } + + pub(crate) fn post_with_scope( + &self, + scope: impl AsRef, + method: impl AsRef, + ) -> ClientRequestBuilder { + let url = self.endpoint(scope.as_ref(), method.as_ref()); ClientRequestBuilder { inner: self.inner.post(&url), url, diff --git a/core/lib/api_client/src/rest/v1/mod.rs b/core/lib/api_client/src/rest/v1/mod.rs index e08ad9ec4e..eeb6b01ebf 100644 --- a/core/lib/api_client/src/rest/v1/mod.rs +++ b/core/lib/api_client/src/rest/v1/mod.rs @@ -9,7 +9,7 @@ use zksync_types::BlockNumber; // Public uses pub use self::{ blocks::{BlockInfo, TransactionInfo}, - client::{Client, ClientError}, + client::{Client, ClientError, Result as ClientResult}, config::Contracts, error::ErrorBody, operations::{PriorityOpData, PriorityOpQuery, PriorityOpQueryError, PriorityOpReceipt}, diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index bd418b3322..beaab32ff5 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -16,6 +16,7 @@ pub struct ContractsConfig { pub governance_addr: Address, pub verifier_addr: Address, pub deploy_factory_addr: Address, + pub forced_exit_addr: Address, pub genesis_tx_hash: H256, } @@ -40,6 +41,7 @@ mod tests { governance_addr: addr("5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9"), verifier_addr: addr("DAbb67b676F5b01FcC8997Cc8439846D0d8078ca"), deploy_factory_addr: addr("FC073319977e314F251EAE6ae6bE76B0B3BAeeCF"), + forced_exit_addr: addr("9c7AeE886D6FcFc14e37784f143a6dAccEf50Db7"), genesis_tx_hash: hash( "b99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e", ), @@ -57,6 +59,7 @@ CONTRACTS_CONTRACT_ADDR="0x70a0F165d6f8054d0d0CF8dFd4DD2005f0AF6B55" CONTRACTS_GOVERNANCE_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" CONTRACTS_VERIFIER_ADDR="0xDAbb67b676F5b01FcC8997Cc8439846D0d8078ca" CONTRACTS_DEPLOY_FACTORY_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_FORCED_EXIT_ADDR="0x9c7AeE886D6FcFc14e37784f143a6dAccEf50Db7" CONTRACTS_GENESIS_TX_HASH="0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" "#; set_env(config); diff --git a/core/lib/config/src/configs/forced_exit_requests.rs b/core/lib/config/src/configs/forced_exit_requests.rs new file mode 100644 index 0000000000..8a772c3ac9 --- /dev/null +++ b/core/lib/config/src/configs/forced_exit_requests.rs @@ -0,0 +1,84 @@ +use crate::envy_load; +/// External uses +use serde::Deserialize; +use zksync_types::{Address, H256}; + +// There are two types of configs: +// The original one (with tx_interval_scaling_factor) +// And the public one (with max_tx_interval) + +// It's easier for humans to think in factors +// But the rest of the codebase does not +// really care about the factor, it only needs the max_tx_interval + +#[derive(Debug, Deserialize, Clone, PartialEq)] +struct ForcedExitRequestsInternalConfig { + pub enabled: bool, + pub max_tokens_per_request: u8, + pub recomended_tx_interval: i64, + pub tx_interval_scaling_factor: f64, + pub price_per_token: i64, + pub digits_in_id: u8, + pub wait_confirmations: u64, + pub sender_private_key: String, + pub sender_eth_private_key: H256, + pub sender_account_address: Address, + pub expiration_period: u64, +} + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct ForcedExitRequestsConfig { + pub enabled: bool, + pub max_tokens_per_request: u8, + pub recomended_tx_interval: i64, + pub max_tx_interval: i64, + pub price_per_token: i64, + pub digits_in_id: u8, + pub wait_confirmations: u64, + pub sender_private_key: String, + pub sender_eth_private_key: H256, + pub sender_account_address: Address, + pub expiration_period: u64, +} + +// Checks that in no way the price will overlap with the requests id space +// +// The amount that the users have to send to pay for the ForcedExit request +// = (number of tokens) * (price_per_token) + id +// +// Thus we need to check that at least digits_in_id first digits +// are equal to zeroes in price_per_token +fn validate_price_with_id_space(price: i64, digits_in_id: u8) { + let id_space = (10_i64).saturating_pow(digits_in_id.into()); + + assert!( + price % id_space == 0, + "The price per token may overlap with request id" + ) +} + +impl ForcedExitRequestsConfig { + pub fn from_env() -> Self { + let config: ForcedExitRequestsInternalConfig = + envy_load!("forced_exit_requests", "FORCED_EXIT_REQUESTS_"); + + let max_tx_interval: f64 = + (config.recomended_tx_interval as f64) * config.tx_interval_scaling_factor; + + validate_price_with_id_space(config.price_per_token, config.digits_in_id); + + ForcedExitRequestsConfig { + enabled: config.enabled, + max_tokens_per_request: config.max_tokens_per_request, + recomended_tx_interval: config.recomended_tx_interval, + max_tx_interval: max_tx_interval.round() as i64, + digits_in_id: config.digits_in_id, + price_per_token: config.price_per_token, + wait_confirmations: config.wait_confirmations, + sender_private_key: config.sender_private_key, + sender_eth_private_key: config.sender_eth_private_key, + sender_account_address: config.sender_account_address, + expiration_period: config.expiration_period, + } + } +} diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 5aa1e9878b..43660e8896 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -2,7 +2,8 @@ pub use self::{ api::ApiConfig, chain::ChainConfig, contracts::ContractsConfig, database::DBConfig, dev_liquidity_token_watcher::DevLiquidityTokenWatcherConfig, eth_client::ETHClientConfig, - eth_sender::ETHSenderConfig, eth_watch::ETHWatchConfig, misc::MiscConfig, prover::ProverConfig, + eth_sender::ETHSenderConfig, eth_watch::ETHWatchConfig, + forced_exit_requests::ForcedExitRequestsConfig, misc::MiscConfig, prover::ProverConfig, ticker::TickerConfig, }; @@ -14,6 +15,7 @@ pub mod dev_liquidity_token_watcher; pub mod eth_client; pub mod eth_sender; pub mod eth_watch; +pub mod forced_exit_requests; pub mod misc; pub mod prover; pub mod ticker; diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index cd700ecd12..07bf5539bb 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -2,7 +2,8 @@ use serde::Deserialize; pub use crate::configs::{ ApiConfig, ChainConfig, ContractsConfig, DBConfig, DevLiquidityTokenWatcherConfig, - ETHClientConfig, ETHSenderConfig, ETHWatchConfig, MiscConfig, ProverConfig, TickerConfig, + ETHClientConfig, ETHSenderConfig, ETHWatchConfig, ForcedExitRequestsConfig, MiscConfig, + ProverConfig, TickerConfig, }; pub mod configs; @@ -19,6 +20,7 @@ pub struct ZkSyncConfig { pub eth_watch: ETHWatchConfig, pub prover: ProverConfig, pub ticker: TickerConfig, + pub forced_exit_requests: ForcedExitRequestsConfig, } impl ZkSyncConfig { @@ -33,6 +35,7 @@ impl ZkSyncConfig { eth_watch: ETHWatchConfig::from_env(), prover: ProverConfig::from_env(), ticker: TickerConfig::from_env(), + forced_exit_requests: ForcedExitRequestsConfig::from_env(), } } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index ae83a41d12..4a937b2ac0 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -17,6 +17,8 @@ const IEIP1271_CONTRACT_FILE: &str = "contracts/artifacts/cache/solpp-generated-contracts/dev-contracts/IEIP1271.sol/IEIP1271.json"; const UPGRADE_GATEKEEPER_CONTRACT_FILE: &str = "contracts/artifacts/cache/solpp-generated-contracts/UpgradeGatekeeper.sol/UpgradeGatekeeper.json"; +const FORCED_EXIT_CONTRACT_FILE: &str = + "contracts/artifacts/cache/solpp-generated-contracts/ForcedExit.sol/ForcedExit.json"; fn read_file_to_json_value(path: &str) -> io::Result { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); @@ -105,3 +107,12 @@ pub fn upgrade_gatekeeper() -> Contract { .to_string(); Contract::load(abi_string.as_bytes()).expect("gatekeeper contract abi") } + +pub fn forced_exit_contract() -> Contract { + let abi_string = read_file_to_json_value(FORCED_EXIT_CONTRACT_FILE) + .expect("couldn't read FORCED_EXIT_CONTRACT_FILE") + .get("abi") + .expect("couldn't get abi from FORCED_EXIT_CONTRACT_FILE") + .to_string(); + Contract::load(abi_string.as_bytes()).expect("forced_exit contract abi") +} diff --git a/core/lib/storage/migrations/2021-03-22-134435_forced_exit_requests/down.sql b/core/lib/storage/migrations/2021-03-22-134435_forced_exit_requests/down.sql new file mode 100644 index 0000000000..d033a6f858 --- /dev/null +++ b/core/lib/storage/migrations/2021-03-22-134435_forced_exit_requests/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS forced_exit_requests; diff --git a/core/lib/storage/migrations/2021-03-22-134435_forced_exit_requests/up.sql b/core/lib/storage/migrations/2021-03-22-134435_forced_exit_requests/up.sql new file mode 100644 index 0000000000..ab26a5296e --- /dev/null +++ b/core/lib/storage/migrations/2021-03-22-134435_forced_exit_requests/up.sql @@ -0,0 +1,10 @@ +CREATE TABLE forced_exit_requests ( + id BIGSERIAL PRIMARY KEY, + target TEXT NOT NULL, + tokens TEXT NOT NULL, -- comma-separated list of TokenIds + price_in_wei NUMERIC NOT NULL, + valid_until TIMESTAMP with time zone NOT NULL, + created_at TIMESTAMP with time zone NOT NULL, + fulfilled_by TEXT, -- comma-separated list of the hashes of ForcedExit transactions + fulfilled_at TIMESTAMP with time zone +); diff --git a/core/lib/storage/sqlx-data.json b/core/lib/storage/sqlx-data.json index 5392d9fcd2..1761eb8589 100644 --- a/core/lib/storage/sqlx-data.json +++ b/core/lib/storage/sqlx-data.json @@ -406,6 +406,66 @@ ] } }, + "0e43c955bab97c4e3c2d8566c1c32c8448e27f658db0dea9540679b903dcdfd7": { + "query": "\n SELECT * FROM forced_exit_requests\n WHERE fulfilled_at IS NULL AND fulfilled_by IS NOT NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "target", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "tokens", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "price_in_wei", + "type_info": "Numeric" + }, + { + "ordinal": 4, + "name": "valid_until", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "fulfilled_by", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "fulfilled_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true + ] + } + }, "0fbc25e0f2aab2b56acf7e09d75690a78f7c2df7cec0644a8e45461ee9aab75b": { "query": "SELECT * FROM data_restore_rollup_ops\n ORDER BY id ASC", "describe": { @@ -647,6 +707,19 @@ "nullable": [] } }, + "1e491f4afb54c10a9e4f2ea467bd7f219e7a32bdf741691cb6f350d50caae417": { + "query": "\n UPDATE forced_exit_requests\n SET fulfilled_at = $1\n WHERE id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Timestamptz", + "Int8" + ] + }, + "nullable": [] + } + }, "222e3946401772e3f6e0d9ce9909e8e7ac2dc830c5ecfcd522f56b3bf70fd679": { "query": "INSERT INTO data_restore_storage_state_update (storage_state) VALUES ($1)", "describe": { @@ -1603,6 +1676,66 @@ ] } }, + "502e94a5b03c686539721f133998c66fa53f50a620167666d2e1b6084d3832b9": { + "query": "\n SELECT * FROM forced_exit_requests\n WHERE fulfilled_at IS NULL AND created_at = (\n SELECT MIN(created_at) FROM forced_exit_requests\n WHERE fulfilled_at IS NULL\n )\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "target", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "tokens", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "price_in_wei", + "type_info": "Numeric" + }, + { + "ordinal": 4, + "name": "valid_until", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "fulfilled_by", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "fulfilled_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true + ] + } + }, "50a7a224aeba0065b57858fc989c3a09d45f833b68fbc9909a73817f782dd3c3": { "query": "\n WITH aggr_exec AS (\n SELECT \n aggregate_operations.confirmed, \n execute_aggregated_blocks_binding.block_number \n FROM aggregate_operations\n INNER JOIN execute_aggregated_blocks_binding ON aggregate_operations.id = execute_aggregated_blocks_binding.op_id\n WHERE aggregate_operations.confirmed = true \n ),\n transactions AS (\n SELECT\n *\n FROM (\n SELECT\n concat_ws(',', block_number, block_index) AS tx_id,\n tx,\n 'sync-tx:' || encode(tx_hash, 'hex') AS hash,\n null as pq_id,\n null as eth_block,\n success,\n fail_reason,\n block_number,\n created_at\n FROM\n executed_transactions\n WHERE\n from_account = $1\n or\n to_account = $1\n or\n primary_account_address = $1\n union all\n select\n concat_ws(',', block_number, block_index) as tx_id,\n operation as tx,\n '0x' || encode(eth_hash, 'hex') as hash,\n priority_op_serialid as pq_id,\n eth_block,\n true as success,\n null as fail_reason,\n block_number,\n created_at\n from \n executed_priority_operations\n where \n from_account = $1\n or\n to_account = $1) t\n order by\n block_number desc, created_at desc\n offset \n $2\n limit \n $3\n )\n select\n tx_id as \"tx_id!\",\n hash as \"hash?\",\n eth_block as \"eth_block?\",\n pq_id as \"pq_id?\",\n tx as \"tx!\",\n success as \"success?\",\n fail_reason as \"fail_reason?\",\n true as \"commited!\",\n coalesce(verified.confirmed, false) as \"verified!\",\n created_at as \"created_at!\"\n from transactions\n LEFT JOIN aggr_exec verified ON transactions.block_number = verified.block_number\n order by transactions.block_number desc, created_at desc\n ", "describe": { @@ -2137,6 +2270,19 @@ ] } }, + "7bc4a6d9e909dce159213d0826726c10c7ec4008db2a4f05cbe613aa849e8a40": { + "query": "\n UPDATE forced_exit_requests\n SET fulfilled_by = $1\n WHERE id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + }, + "nullable": [] + } + }, "7c51337430beeb0ed6e1f244da727797194ab44b5049b15cd2bcba4fc4642fb9": { "query": "SELECT * FROM server_config", "describe": { @@ -2201,6 +2347,68 @@ "nullable": [] } }, + "7dfa76c3e12c301dc3d7fbf820ecf0be45e0b1c5f01ce13f7cdc1a82880804c1": { + "query": "\n SELECT * FROM forced_exit_requests\n WHERE id = $1\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "target", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "tokens", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "price_in_wei", + "type_info": "Numeric" + }, + { + "ordinal": 4, + "name": "valid_until", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "fulfilled_by", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "fulfilled_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true + ] + } + }, "7ff98a4fddc441ea83f72a4a75a7caf53b9661c37f26a90984a349bfa5aeab70": { "query": "INSERT INTO eth_aggregated_ops_binding (op_id, eth_op_id) VALUES ($1, $2)", "describe": { @@ -2490,6 +2698,18 @@ "nullable": [] } }, + "963cad1979935b50bc5c2bbe174f5d94fbd5c38ea752d304f987229c89e6070a": { + "query": "\n DELETE FROM forced_exit_requests\n WHERE fulfilled_by IS NULL AND valid_until < $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Timestamptz" + ] + }, + "nullable": [] + } + }, "98f87793202531586603307eab53987f75f4e07614af8706e6180413f808a1b4": { "query": "INSERT INTO txs_batches_signatures VALUES($1, $2)", "describe": { @@ -3701,6 +3921,72 @@ "nullable": [] } }, + "dbd7cc6b289ab3a15781dac965f9e6f026c8e647b480b5dd0c3820948d6ba4ed": { + "query": "\n INSERT INTO forced_exit_requests ( target, tokens, price_in_wei, created_at, valid_until )\n VALUES ( $1, $2, $3, $4, $5 )\n RETURNING *\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "target", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "tokens", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "price_in_wei", + "type_info": "Numeric" + }, + { + "ordinal": 4, + "name": "valid_until", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "fulfilled_by", + "type_info": "Text" + }, + { + "ordinal": 7, + "name": "fulfilled_at", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Numeric", + "Timestamptz", + "Timestamptz" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true + ] + } + }, "debbe23f0c730c331482c798387d1739911923edcafc2bd80463464ff98f3b71": { "query": "SELECT * from mempool_txs\n WHERE tx_hash = $1", "describe": { diff --git a/core/lib/storage/src/forced_exit_requests/mod.rs b/core/lib/storage/src/forced_exit_requests/mod.rs new file mode 100644 index 0000000000..be4ac7c893 --- /dev/null +++ b/core/lib/storage/src/forced_exit_requests/mod.rs @@ -0,0 +1,220 @@ +use chrono::{DateTime, Utc}; +// Built-in deps +use num::BigInt; +use sqlx::types::BigDecimal; +use std::{ops::Sub, time::Instant}; +// External imports +// Workspace imports +// Local imports +use crate::{QueryResult, StorageProcessor}; +use zksync_types::forced_exit_requests::{ + ForcedExitRequest, ForcedExitRequestId, SaveForcedExitRequestQuery, +}; + +use zksync_types::tx::TxHash; + +pub mod records; + +mod utils; + +use records::DbForcedExitRequest; + +use crate::utils::address_to_stored_string; + +/// ForcedExitRequests schema handles the `forced_exit_requests` table, providing methods to +#[derive(Debug)] +pub struct ForcedExitRequestsSchema<'a, 'c>(pub &'a mut StorageProcessor<'c>); + +impl<'a, 'c> ForcedExitRequestsSchema<'a, 'c> { + pub async fn store_request( + &mut self, + request: SaveForcedExitRequestQuery, + ) -> QueryResult { + let start = Instant::now(); + let price_in_wei = BigDecimal::from(BigInt::from(request.price_in_wei.clone())); + + let target_str = address_to_stored_string(&request.target); + + let tokens = utils::vec_to_comma_list(request.tokens.clone()); + + let stored_request: DbForcedExitRequest = sqlx::query_as!( + DbForcedExitRequest, + r#" + INSERT INTO forced_exit_requests ( target, tokens, price_in_wei, created_at, valid_until ) + VALUES ( $1, $2, $3, $4, $5 ) + RETURNING * + "#, + target_str, + &tokens, + price_in_wei, + // It is possible to generate created_at inside the db + // However, since the valid_until is generated outside the db (using config params) + // it was decided to set both values in the server for consistency + request.created_at, + request.valid_until + ) + .fetch_one(self.0.conn()) + .await?; + + metrics::histogram!("sql.forced_exit_requests.store_request", start.elapsed()); + Ok(stored_request.into()) + } + + pub async fn get_request_by_id( + &mut self, + id: ForcedExitRequestId, + ) -> QueryResult> { + let start = Instant::now(); + let request: Option = sqlx::query_as!( + DbForcedExitRequest, + r#" + SELECT * FROM forced_exit_requests + WHERE id = $1 + LIMIT 1 + "#, + id + ) + .fetch_optional(self.0.conn()) + .await? + .map(|r| r.into()); + + metrics::histogram!( + "sql.forced_exit_requests.get_request_by_id", + start.elapsed() + ); + + Ok(request) + } + + pub async fn set_fulfilled_at( + &mut self, + id: ForcedExitRequestId, + fulfilled_at: DateTime, + ) -> QueryResult<()> { + let start = Instant::now(); + + sqlx::query!( + r#" + UPDATE forced_exit_requests + SET fulfilled_at = $1 + WHERE id = $2 + "#, + fulfilled_at, + id + ) + .execute(self.0.conn()) + .await?; + + metrics::histogram!("sql.forced_exit_requests.set_fulfilled_at", start.elapsed()); + + Ok(()) + } + + pub async fn get_oldest_unfulfilled_request( + &mut self, + ) -> QueryResult> { + let start = Instant::now(); + + let request: Option = sqlx::query_as!( + DbForcedExitRequest, + r#" + SELECT * FROM forced_exit_requests + WHERE fulfilled_at IS NULL AND created_at = ( + SELECT MIN(created_at) FROM forced_exit_requests + WHERE fulfilled_at IS NULL + ) + LIMIT 1 + "# + ) + .fetch_optional(self.0.conn()) + .await? + .map(|r| r.into()); + + metrics::histogram!( + "sql.forced_exit_requests.get_oldest_unfulfilled_request", + start.elapsed() + ); + + Ok(request) + } + + pub async fn set_fulfilled_by( + &mut self, + id: ForcedExitRequestId, + tx_hashes: Option>, + ) -> QueryResult<()> { + let start = Instant::now(); + + let hash_str = tx_hashes.map(utils::vec_to_comma_list); + + sqlx::query!( + r#" + UPDATE forced_exit_requests + SET fulfilled_by = $1 + WHERE id = $2 + "#, + hash_str, + id + ) + .execute(self.0.conn()) + .await?; + + metrics::histogram!("sql.forced_exit_requests.set_fulfilled_by", start.elapsed()); + Ok(()) + } + + // Normally this function should not return any more + // than one request, but it was decided to make to more + // general from the start + pub async fn get_unconfirmed_requests(&mut self) -> QueryResult> { + let start = Instant::now(); + + let requests: Vec = sqlx::query_as!( + DbForcedExitRequest, + r#" + SELECT * FROM forced_exit_requests + WHERE fulfilled_at IS NULL AND fulfilled_by IS NOT NULL + "# + ) + .fetch_all(self.0.conn()) + .await? + .into_iter() + .map(|rec| rec.into()) + .collect(); + + metrics::histogram!( + "sql.forced_exit_requests.get_unconfirmed_requests", + start.elapsed() + ); + + Ok(requests) + } + + pub async fn delete_old_unfulfilled_requests( + &mut self, + // The time that has to be passed since the + // request has been considered invalid to delete it + deleting_threshold: chrono::Duration, + ) -> QueryResult<()> { + let start = Instant::now(); + + let oldest_allowed = Utc::now().sub(deleting_threshold); + + sqlx::query!( + r#" + DELETE FROM forced_exit_requests + WHERE fulfilled_by IS NULL AND valid_until < $1 + "#, + oldest_allowed + ) + .execute(self.0.conn()) + .await?; + + metrics::histogram!( + "sql.forced_exit_requests.delete_old_unfulfilled_requests", + start.elapsed() + ); + + Ok(()) + } +} diff --git a/core/lib/storage/src/forced_exit_requests/records.rs b/core/lib/storage/src/forced_exit_requests/records.rs new file mode 100644 index 0000000000..63499ee2af --- /dev/null +++ b/core/lib/storage/src/forced_exit_requests/records.rs @@ -0,0 +1,67 @@ +use crate::utils::{address_to_stored_string, stored_str_address_to_address}; +use chrono::{DateTime, Utc}; +use num::{bigint::ToBigInt, BigInt}; +use sqlx::types::BigDecimal; +use zksync_basic_types::TokenId; +use zksync_types::forced_exit_requests::ForcedExitRequest; +use zksync_types::tx::TxHash; + +use super::utils; + +#[derive(Debug, Clone)] +pub struct DbForcedExitRequest { + pub id: i64, + pub target: String, + pub tokens: String, + pub price_in_wei: BigDecimal, + pub valid_until: DateTime, + pub created_at: DateTime, + pub fulfilled_by: Option, + pub fulfilled_at: Option>, +} + +impl From for DbForcedExitRequest { + fn from(request: ForcedExitRequest) -> Self { + let price_in_wei = BigDecimal::from(BigInt::from(request.price_in_wei.clone())); + + let tokens = utils::vec_to_comma_list(request.tokens); + let fulfilled_by = request.fulfilled_by.map(utils::vec_to_comma_list); + Self { + id: request.id, + target: address_to_stored_string(&request.target), + tokens, + price_in_wei, + valid_until: request.valid_until, + created_at: request.created_at, + fulfilled_at: request.fulfilled_at, + fulfilled_by, + } + } +} + +impl Into for DbForcedExitRequest { + fn into(self) -> ForcedExitRequest { + let price_in_wei = self + .price_in_wei + .to_bigint() + .map(|int| int.to_biguint()) + .flatten() + // The fact that the request was found, but could not be convert into the ForcedExitRequest + // means that invalid data is stored in the DB + .expect("Invalid forced exit request has been stored"); + + let tokens: Vec = utils::comma_list_to_vec(self.tokens); + let fulfilled_by: Option> = self.fulfilled_by.map(utils::comma_list_to_vec); + + ForcedExitRequest { + id: self.id, + target: stored_str_address_to_address(&self.target), + tokens, + price_in_wei, + created_at: self.created_at, + valid_until: self.valid_until, + fulfilled_at: self.fulfilled_at, + fulfilled_by, + } + } +} diff --git a/core/lib/storage/src/forced_exit_requests/utils.rs b/core/lib/storage/src/forced_exit_requests/utils.rs new file mode 100644 index 0000000000..57bf3f333c --- /dev/null +++ b/core/lib/storage/src/forced_exit_requests/utils.rs @@ -0,0 +1,18 @@ +use std::fmt::Debug; +use std::{str::FromStr, string::ToString}; + +pub fn vec_to_comma_list(elems: Vec) -> String { + let strs: Vec = elems.iter().map(|elem| (*elem).to_string()).collect(); + + strs.join(",") +} + +pub fn comma_list_to_vec(elems: String) -> Vec +where + ::Err: Debug, +{ + elems + .split(',') + .map(|str| T::from_str(str).expect("Failed to deserialize stored item")) + .collect() +} diff --git a/core/lib/storage/src/lib.rs b/core/lib/storage/src/lib.rs index 2fec9005a3..b24fa7cf02 100644 --- a/core/lib/storage/src/lib.rs +++ b/core/lib/storage/src/lib.rs @@ -92,9 +92,13 @@ pub mod connection; pub mod data_restore; pub mod diff; pub mod ethereum; +pub mod forced_exit_requests; pub mod prover; pub mod test_data; pub mod tokens; +mod utils; + +use forced_exit_requests::ForcedExitRequestsSchema; pub use crate::connection::ConnectionPool; pub type QueryResult = Result; @@ -212,6 +216,10 @@ impl<'a> StorageProcessor<'a> { tokens::TokensSchema(self) } + pub fn forced_exit_requests_schema(&mut self) -> ForcedExitRequestsSchema<'_, 'a> { + ForcedExitRequestsSchema(self) + } + fn conn(&mut self) -> &mut PgConnection { match &mut self.conn { ConnectionHolder::Pooled(conn) => conn, diff --git a/core/lib/storage/src/tests/forced_exit_requests.rs b/core/lib/storage/src/tests/forced_exit_requests.rs new file mode 100644 index 0000000000..a461d26ec5 --- /dev/null +++ b/core/lib/storage/src/tests/forced_exit_requests.rs @@ -0,0 +1,203 @@ +use std::{ + ops::{Mul, Sub}, + str::FromStr, +}; + +use crate::forced_exit_requests::ForcedExitRequestsSchema; +use crate::tests::db_test; +use crate::QueryResult; +use crate::StorageProcessor; +use chrono::{Duration, Timelike, Utc}; +use num::{BigUint, FromPrimitive}; +use zksync_basic_types::Address; +use zksync_types::{ + forced_exit_requests::{ForcedExitRequest, SaveForcedExitRequestQuery}, + tx::TxHash, +}; + +use std::ops::Add; + +use zksync_types::TokenId; + +// Accepts an array of requests and stores them in the db +pub async fn store_requests( + storage: &mut StorageProcessor<'_>, + requests: Vec, +) -> Vec { + let mut stored_requests: Vec = vec![]; + for req in requests.into_iter() { + stored_requests.push( + ForcedExitRequestsSchema(storage) + .store_request(req) + .await + .unwrap(), + ); + } + stored_requests +} + +#[db_test] +async fn get_oldest_unfulfilled_request(mut storage: StorageProcessor<'_>) -> QueryResult<()> { + let mut now = Utc::now().with_nanosecond(0).unwrap(); + + // The requests have dummy created_at and valid_until values + // They will reassigned in the future cycle + let requests = vec![ + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(1)], + price_in_wei: BigUint::from_i32(212).unwrap(), + created_at: now, + valid_until: now, + }, + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(1)], + price_in_wei: BigUint::from_i32(1).unwrap(), + created_at: now, + valid_until: now, + }, + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(20)], + price_in_wei: BigUint::from_str("1000000000000000").unwrap(), + created_at: now, + valid_until: now, + }, + ]; + + let mut stored_requests: Vec = vec![]; + let interval = chrono::Duration::seconds(1); + + for req in requests.into_iter() { + now = now.add(interval); + let created_at = now; + let valid_until = now.add(chrono::Duration::hours(32)); + + stored_requests.push( + ForcedExitRequestsSchema(&mut storage) + .store_request(SaveForcedExitRequestQuery { + created_at, + valid_until, + ..req + }) + .await + .unwrap(), + ); + } + + ForcedExitRequestsSchema(&mut storage) + .set_fulfilled_at(stored_requests[0].id, Utc::now()) + .await?; + + let oldest_unfulfilled_request = ForcedExitRequestsSchema(&mut storage) + .get_oldest_unfulfilled_request() + .await? + .unwrap(); + // The first request has been fulfilled. Thus, the second one should be the oldest + assert_eq!(oldest_unfulfilled_request.id, stored_requests[1].id); + + // Now filling all the remaining requests + ForcedExitRequestsSchema(&mut storage) + .set_fulfilled_at(stored_requests[1].id, Utc::now()) + .await?; + ForcedExitRequestsSchema(&mut storage) + .set_fulfilled_at(stored_requests[2].id, Utc::now()) + .await?; + + let oldest_unfulfilled_request = ForcedExitRequestsSchema(&mut storage) + .get_oldest_unfulfilled_request() + .await?; + // The first request has been fulfilled. Thus, the second one should be the oldest + assert!(matches!(oldest_unfulfilled_request, None)); + + Ok(()) +} + +// Checks that during deletion of the old transactions +// are deleted and no more +#[db_test] +async fn delete_old_requests(mut storage: StorageProcessor<'_>) -> QueryResult<()> { + let now = Utc::now().with_nanosecond(0).unwrap(); + + let deleting_threshold = Duration::days(3); + let day = Duration::days(1); + let minute = Duration::minutes(1); + + // So here we imagine that the requests are valid for 2 days + // and we delete the old requests after at least 3 days have expired + let requests = vec![ + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(1)], + price_in_wei: BigUint::from_i32(212).unwrap(), + created_at: now.sub(day.mul(8)), + // Invalid for 6 days => should be deleted + valid_until: now.sub(day.mul(6)), + }, + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(1)], + price_in_wei: BigUint::from_i32(1).unwrap(), + created_at: now.sub(day.mul(5)).sub(minute), + // Invalid for 3 days and 1 minutes => should be deleted + valid_until: now.sub(day.mul(3)).sub(minute), + }, + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(20)], + price_in_wei: BigUint::from_str("1000000000000000").unwrap(), + created_at: now.sub(day.mul(5)).add(minute.mul(5)), + // Invalid for 3 days minus 5 minutes => should not be deleted + valid_until: now.sub(day.mul(3)).add(minute.mul(5)), + }, + SaveForcedExitRequestQuery { + target: Address::from_str("c0f97CC918C9d6fA4E9fc6be61a6a06589D199b2").unwrap(), + tokens: vec![TokenId(20)], + price_in_wei: BigUint::from_str("1000000000000000").unwrap(), + created_at: now.sub(day.mul(5)).add(minute.mul(5)), + // Is valid => should not be deleted + valid_until: now.sub(day.mul(3)).add(minute.mul(5)), + }, + ]; + + let stored_requests = store_requests(&mut storage, requests).await; + + // This a hash of a random transaction + let transaction_hash = TxHash::from_str( + "sync-tx:796018689b3e323894f44fb0093856ec3832908c626dea357a9bd1b25f9d11bf", + ) + .unwrap(); + + // Setting fullfilled_by for the oldest request + // so that it should not be deleted + ForcedExitRequestsSchema(&mut storage) + .set_fulfilled_by(stored_requests[0].id, Some(vec![transaction_hash])) + .await?; + + ForcedExitRequestsSchema(&mut storage) + .delete_old_unfulfilled_requests(deleting_threshold) + .await?; + + // true means should not have been deleted + // false means should have been deleted + // Note that we have set the fulfilled_by for the first tx, that's why it should + // not have been deleted + let should_remain = vec![true, false, true, true]; + + for (i, request) in stored_requests.into_iter().enumerate() { + let stored = ForcedExitRequestsSchema(&mut storage) + .get_request_by_id(request.id) + .await?; + + let processed_correctly = if should_remain[i] { + stored.is_some() + } else { + stored.is_none() + }; + + assert!(processed_correctly, "Deletion was not processed correctly"); + } + + Ok(()) +} diff --git a/core/lib/storage/src/tests/mod.rs b/core/lib/storage/src/tests/mod.rs index 7492f8c1cf..3fbb7bd25a 100644 --- a/core/lib/storage/src/tests/mod.rs +++ b/core/lib/storage/src/tests/mod.rs @@ -32,6 +32,7 @@ pub(crate) mod chain; mod config; mod data_restore; mod ethereum; +mod forced_exit_requests; mod prover; mod tokens; diff --git a/core/lib/storage/src/tokens/mod.rs b/core/lib/storage/src/tokens/mod.rs index 1a0f206828..df2568f84a 100644 --- a/core/lib/storage/src/tokens/mod.rs +++ b/core/lib/storage/src/tokens/mod.rs @@ -8,12 +8,11 @@ use zksync_types::{Token, TokenId, TokenLike, TokenPrice}; use zksync_utils::ratio_to_big_decimal; // Local imports use self::records::{DBMarketVolume, DbTickerPrice, DbToken}; -use crate::tokens::utils::address_to_stored_string; +use crate::utils::address_to_stored_string; use crate::{QueryResult, StorageProcessor}; use zksync_types::tokens::TokenMarketVolume; pub mod records; -mod utils; /// Precision of the USD price per token pub(crate) const STORED_USD_PRICE_PRECISION: usize = 6; diff --git a/core/lib/storage/src/tokens/records.rs b/core/lib/storage/src/tokens/records.rs index 321c7dcdbd..da07881301 100644 --- a/core/lib/storage/src/tokens/records.rs +++ b/core/lib/storage/src/tokens/records.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use sqlx::{types::BigDecimal, FromRow}; // Workspace imports // Local imports -use crate::tokens::utils::{address_to_stored_string, stored_str_address_to_address}; +use crate::utils::{address_to_stored_string, stored_str_address_to_address}; use chrono::{DateTime, Utc}; use zksync_types::tokens::{TokenMarketVolume, TokenPrice}; use zksync_types::{Token, TokenId}; diff --git a/core/lib/storage/src/tokens/utils.rs b/core/lib/storage/src/utils.rs similarity index 100% rename from core/lib/storage/src/tokens/utils.rs rename to core/lib/storage/src/utils.rs diff --git a/core/lib/types/src/forced_exit_requests.rs b/core/lib/types/src/forced_exit_requests.rs new file mode 100644 index 0000000000..15a85d4b10 --- /dev/null +++ b/core/lib/types/src/forced_exit_requests.rs @@ -0,0 +1,75 @@ +use chrono::{DateTime, Utc}; +use num::BigUint; +use zksync_basic_types::{Address, TokenId}; +use zksync_utils::BigUintSerdeAsRadix10Str; + +use serde::{Deserialize, Serialize}; + +pub type ForcedExitRequestId = i64; + +use anyhow::format_err; +use ethabi::{decode, ParamType}; +use std::convert::TryFrom; +use zksync_basic_types::Log; + +use crate::tx::TxHash; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +#[serde(rename_all = "camelCase")] +pub struct ForcedExitRequest { + pub id: ForcedExitRequestId, + pub target: Address, + pub tokens: Vec, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub price_in_wei: BigUint, + pub valid_until: DateTime, + pub created_at: DateTime, + pub fulfilled_by: Option>, + pub fulfilled_at: Option>, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] +pub struct SaveForcedExitRequestQuery { + pub target: Address, + pub tokens: Vec, + #[serde(with = "BigUintSerdeAsRadix10Str")] + pub price_in_wei: BigUint, + pub created_at: DateTime, + pub valid_until: DateTime, +} + +#[derive(Debug, Clone)] +pub struct FundsReceivedEvent { + pub amount: BigUint, + pub block_number: u64, +} + +#[derive(Serialize, Deserialize)] +pub struct ForcedExitEligibilityResponse { + pub eligible: bool, +} + +impl TryFrom for FundsReceivedEvent { + type Error = anyhow::Error; + + fn try_from(event: Log) -> Result { + let mut dec_ev = decode( + &[ + ParamType::Uint(256), // amount + ], + &event.data.0, + ) + .map_err(|e| format_err!("Event data decode: {:?}", e))?; + + let amount = dec_ev.remove(0).to_uint().unwrap(); + let block_number = event + .block_number + .expect("Trying to access pending block") + .as_u64(); + + Ok(FundsReceivedEvent { + amount: BigUint::from(amount.as_u128()), + block_number, + }) + } +} diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index a5ae051661..423d11e4ee 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -42,6 +42,7 @@ pub mod block; pub mod config; pub mod ethereum; pub mod fee; +pub mod forced_exit_requests; pub mod gas_counter; pub mod helpers; pub mod mempool; diff --git a/core/tests/ts-tests/tests/forced-exit-requests.ts b/core/tests/ts-tests/tests/forced-exit-requests.ts new file mode 100644 index 0000000000..65bf098ef4 --- /dev/null +++ b/core/tests/ts-tests/tests/forced-exit-requests.ts @@ -0,0 +1,165 @@ +import { Tester } from './tester'; +import { expect } from 'chai'; +import fs from 'fs'; +import fetch from 'node-fetch'; +import { Wallet, types, utils, wallet } from 'zksync'; +import { BigNumber, BigNumberish, ethers } from 'ethers'; +import * as path from 'path'; + +import { Address } from 'zksync/build/types'; +import { sleep } from 'zksync/build/utils'; + +import './transfer'; + +const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); +const apiTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/api.json`, { encoding: 'utf-8' })); +const apiUrl = `${apiTestConfig.rest_api_url}/api/forced_exit_requests/v0.1`; + +type TokenLike = types.TokenLike; + +declare module './tester' { + interface Tester { + testForcedExitRequestMultipleTokens( + from: Wallet, + payer: ethers.Signer, + to: Address, + tokens: TokenLike[], + value: BigNumber[] + ): Promise; + } +} + +interface StatusResponse { + status: 'enabled' | 'disabled'; + requestFee: string; + maxTokensPerRequest: number; + recomendedTxIntervalMillis: number; + forcedExitContractAddress: Address; + waitConfirmations: number; +} + +Tester.prototype.testForcedExitRequestMultipleTokens = async function ( + from: Wallet, + payer: ethers.Signer, + to: Address, + tokens: TokenLike[], + amounts: BigNumber[] +) { + const tokenAddresses = tokens.map((token) => this.syncProvider.tokenSet.resolveTokenAddress(token)); + + const toBalancesBeforePromises = tokens.map((token, i) => { + return getFullOnchainBalance(this, to, tokenAddresses[i]); + }); + + let toBalancesBefore = await Promise.all(toBalancesBeforePromises); + + const batchBuilder = from.batchBuilder(); + tokens.forEach((token, i) => { + batchBuilder.addTransfer({ + to, + token, + amount: amounts[i] + }); + }); + const batch = await batchBuilder.build('ETH'); + const handles = await wallet.submitSignedTransactionsBatch(from.provider, batch.txs, [batch.signature]); + + // Waiting only for the first tx since we send the transactions in batch + await handles[0].awaitReceipt(); + + const status = await getStatus(); + + expect(status.status).to.eq('enabled', 'Forced exit requests status is disabled'); + + const tokenIds = tokens.map((token) => this.syncProvider.tokenSet.resolveTokenId(token)); + + const requestPrice = BigNumber.from(status.requestFee).mul(tokens.length); + const request = await submitRequest(to, tokenIds, requestPrice.toString()); + + const contractAddress = status.forcedExitContractAddress; + + const amountToPay = requestPrice.add(BigNumber.from(request.id)); + + const gasPrice = (await payer.provider?.getGasPrice()) as BigNumberish; + + const txHandle = await payer.sendTransaction({ + value: amountToPay, + gasPrice, + to: contractAddress, + // Even though the standart payment gasLimit is 21k, the gasLimit needed for + // smart contract calls (even simply sending ether) is roughly 32k + // This the restriction that all the ERC-1271 wallets face, so we consider + // safe to assume that the gas limit is at least 32k + gasLimit: BigNumber.from('32000') + }); + + await txHandle.wait(); + + // We have to wait for verification and execution of the + // block with the forced exit, so waiting for a while is fine + const timeout = 60000; + const interval = 500; + + const iterations = timeout / interval; + + let expectedToBalance = toBalancesBefore.map((balance, i) => balance.add(amounts[i])); + for (let i = 0; i < iterations; i++) { + const balancesPromises = tokenAddresses.map((address) => getFullOnchainBalance(this, to, address)); + const balances = await Promise.all(balancesPromises); + + const allExpected = balances.every((bal, i) => bal.eq(expectedToBalance[i])); + + if (allExpected) { + break; + } + + await sleep(interval); + } + + const balancesPromises = tokenAddresses.map((address) => getFullOnchainBalance(this, to, address)); + const balances = await Promise.all(balancesPromises); + const allExpected = balances.every((bal, i) => bal.eq(expectedToBalance[i])); + + expect(allExpected, 'The ForcedExit has not completed').to.be.true; +}; + +async function getStatus() { + const endpoint = `${apiUrl}/status`; + + const response = await fetch(endpoint); + + return (await response.json()) as StatusResponse; +} + +async function submitRequest(address: string, tokens: number[], price_in_wei: string) { + const endpoint = `${apiUrl}/submit`; + + const data = { + target: address, + tokens, + price_in_wei + }; + + const response = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + redirect: 'follow', + body: JSON.stringify(data) + }); + + return await response.json(); +} + +async function getFullOnchainBalance(tester: Tester, address: Address, tokenAddress: Address) { + const onchainBalance = await utils.getEthereumBalance( + tester.ethProvider, + tester.syncProvider, + address, + tokenAddress + ); + const pendingToBeOnchain = await tester.contract.getPendingBalance(address, tokenAddress); + + return BigNumber.from(onchainBalance).add(BigNumber.from(pendingToBeOnchain)); +} diff --git a/core/tests/ts-tests/tests/withdrawal-helpers.test.ts b/core/tests/ts-tests/tests/withdrawal-helpers.test.ts index 82eb3d2ff8..d54a6c62c6 100644 --- a/core/tests/ts-tests/tests/withdrawal-helpers.test.ts +++ b/core/tests/ts-tests/tests/withdrawal-helpers.test.ts @@ -4,11 +4,12 @@ import { utils } from 'ethers'; import './priority-ops'; import './change-pub-key'; import './withdrawal-helpers'; +import './forced-exit-requests'; import { loadTestConfig } from 'reading-tool'; -const TX_AMOUNT = utils.parseEther('1'); -const DEPOSIT_AMOUNT = TX_AMOUNT.mul(200); +const TX_AMOUNT = utils.parseEther('0.1'); +const DEPOSIT_AMOUNT = TX_AMOUNT.mul(2000); const TEST_CONFIG = loadTestConfig(true); @@ -18,10 +19,14 @@ const erc20Token = 'wBTC'; describe('Withdrawal helpers tests', () => { let tester: Tester; let alice: Wallet; + let bob: Wallet; + let chuck: Wallet; before('create tester and test wallets', async () => { tester = await Tester.init('localhost', 'HTTP'); alice = await tester.fundedWallet('10.0'); + bob = await tester.fundedWallet('10.0'); + chuck = await tester.emptyWallet(); for (const token of ['ETH', erc20Token]) { await tester.testDeposit(alice, token, DEPOSIT_AMOUNT, true); @@ -60,4 +65,14 @@ describe('Withdrawal helpers tests', () => { [TX_AMOUNT, TX_AMOUNT] ); }); + + it('forced_exit_request should recover mutiple tokens', async () => { + await tester.testForcedExitRequestMultipleTokens( + alice, + bob.ethSigner, + chuck.address(), + ['ETH', erc20Token], + [TX_AMOUNT, TX_AMOUNT.mul(2)] + ); + }); }); diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index 2e9f9d24cc..c06487d2d1 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -9,6 +9,7 @@ CONTRACT_TARGET_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" CONTRACT_ADDR="0x70a0F165d6f8054d0d0CF8dFd4DD2005f0AF6B55" GOVERNANCE_ADDR="0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" VERIFIER_ADDR="0xDAbb67b676F5b01FcC8997Cc8439846D0d8078ca" +FORCED_EXIT_ADDR="0x9c7AeE886D6FcFc14e37784f143a6dAccEf50Db7" DEPLOY_FACTORY_ADDR="0xFC073319977e314F251EAE6ae6bE76B0B3BAeeCF" GENESIS_TX_HASH="0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" GENESIS_ROOT="0x2d5ab622df708ab44944bb02377be85b6f27812e9ae520734873b7a193898ba4" diff --git a/etc/env/base/forced_exit_requests.toml b/etc/env/base/forced_exit_requests.toml new file mode 100644 index 0000000000..c0c6689955 --- /dev/null +++ b/etc/env/base/forced_exit_requests.toml @@ -0,0 +1,38 @@ +# Options for L1-based ForcedExit utility +[forced_exit_requests] +# Whether the feature is enabled. Used to be able to quickly stop serving ForcedExit requests +# in times of attacks or upgrages +enabled=true + +max_tokens_per_request=10 + +# Recommended interval to send the transaction in milliseconds +recomended_tx_interval=300 + +# How many times the maximum acceptable interval will be longer +# than the recommended interval +tx_interval_scaling_factor=1.5 + +# Number of digits in id +digits_in_id=13 + +# Price per exit in wei (currently it's 0.03 ETH) +price_per_token=30000000000000000 + +# Wait confirmations +wait_confirmations=1 + +# The account of the ForcedExit sender +sender_account_address="0xe1faB3eFD74A77C23B426c302D96372140FF7d0C" + +# The time after which an invalid request is deleted in milliseconds +expiration_period=3000 + +# The minimum amount of ETH in wei that needs to be stored on the forced exit smart contract +# until it is ok to withdraw the funds from it +withdrawal_threshold=1000000000000000000 + +# The address which will receive the fees from ForcedExit automation +# Here it is set for some random account for the purpose of testing, but usually it is preferred +# to set the same account as the one that sends the txs for retrieving the fees from the smart contract +fee_receiver="0x1963917ba0b44A879cf6248387C1d51A0F11669d" diff --git a/etc/env/base/private.toml b/etc/env/base/private.toml index dc31bcb7e1..3ed0892a0e 100644 --- a/etc/env/base/private.toml +++ b/etc/env/base/private.toml @@ -28,3 +28,9 @@ secret_auth="sample" [misc] # Private key for the fee seller account fee_account_private_key="0x27593fea79697e947890ecbecce7901b0008345e5d7259710d0dd5e500d040be" + +[forced_exit_requests] +# L2 private key of the account that sends ForcedExits +sender_private_key="0x0092788f3890ed50dcab7f72fb574a0a9d30b1bc778ba076c609c311a8555352" +# L1 private key of the account that sends ForcedExits +sender_eth_private_key="0x0559b9f000b4e4bbb7fe02e1374cef9623c2ab7c3791204b490e1f229191d104" diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index b2c6be10f8..e9914ead52 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -22,7 +22,8 @@ exodus_test=info,\ loadtest=debug,\ kube=debug,\ dev_ticker=info,\ -block_sizes_test=info\ +block_sizes_test=info,\ +zksync_forced_exit_requests=info\ """ # `RUST_BACKTRACE` variable diff --git a/infrastructure/zk/src/config.ts b/infrastructure/zk/src/config.ts index c57adac568..1b65decb18 100644 --- a/infrastructure/zk/src/config.ts +++ b/infrastructure/zk/src/config.ts @@ -18,7 +18,8 @@ const CONFIG_FILES = [ 'dev_liquidity_token_watcher.toml', 'prover.toml', 'rust.toml', - 'private.toml' + 'private.toml', + 'forced_exit_requests.toml' ]; async function getEnvironment(): Promise { diff --git a/infrastructure/zk/src/contract.ts b/infrastructure/zk/src/contract.ts index b1f0a4f986..00204f46aa 100644 --- a/infrastructure/zk/src/contract.ts +++ b/infrastructure/zk/src/contract.ts @@ -44,6 +44,7 @@ export async function deploy() { 'CONTRACTS_VERIFIER_ADDR', 'CONTRACTS_UPGRADE_GATEKEEPER_ADDR', 'CONTRACTS_DEPLOY_FACTORY_ADDR', + 'CONTRACTS_FORCED_EXIT_ADDR', 'CONTRACTS_GENESIS_TX_HASH' ]; let updatedContracts = ''; diff --git a/infrastructure/zk/src/server.ts b/infrastructure/zk/src/server.ts index f9506aefde..137b2afa99 100644 --- a/infrastructure/zk/src/server.ts +++ b/infrastructure/zk/src/server.ts @@ -4,6 +4,8 @@ import * as env from './env'; import fs from 'fs'; import * as db from './db/db'; +import { ethers } from 'ethers'; + export async function server() { let child = utils.background('cargo run --bin zksync_server --release'); @@ -11,6 +13,12 @@ export async function server() { process.on('SIGINT', () => { child.kill('SIGINT'); }); + + // By the time this function is run the server is most likely not be running yet + // However, it does not matter, since the only thing the function does is depositing + // to the forced exit sender account, and server should be capable of recognizing + // priority operaitons that happened before it was booted + await prepareForcedExitRequestAccount(); } export async function genesis() { @@ -34,6 +42,49 @@ export async function genesis() { env.modify_contracts_toml('CONTRACTS_GENESIS_ROOT', genesisRoot); } +// This functions deposits funds onto the forced exit sender account +// This is needed to make sure that it has the account id +async function prepareForcedExitRequestAccount() { + console.log('Depositing to the forced exit sender account'); + const forcedExitAccount = process.env.FORCED_EXIT_REQUESTS_SENDER_ACCOUNT_ADDRESS as string; + + // This is the private key of the first test account + const ethProvider = new ethers.providers.JsonRpcProvider(process.env.ETH_CLIENT_WEB3_URL); + const ethRichWallet = new ethers.Wallet( + '0x7726827caac94a7f9e1b160f7ea819f172f7b6f9d2a97f992c38edeab82d4110' + ).connect(ethProvider); + + const gasPrice = await ethProvider.getGasPrice(); + + const topupTransaction = await ethRichWallet.sendTransaction({ + to: forcedExitAccount, + // The amount for deposit should be enough to send at least + // one transaction to retrieve the funds form the forced exit smart contract + value: ethers.utils.parseEther('100.0'), + gasPrice + }); + + await topupTransaction.wait(); + + const mainZkSyncContract = new ethers.Contract( + process.env.CONTRACTS_CONTRACT_ADDR as string, + await utils.readZkSyncAbi(), + ethRichWallet + ); + + const depositTransaction = (await mainZkSyncContract.depositETH(forcedExitAccount, { + // Here the amount to deposit does not really matter, as it is done purely + // to guarantee that the account exists in the network + value: ethers.utils.parseEther('1.0'), + gasLimit: ethers.BigNumber.from('200000'), + gasPrice + })) as ethers.ContractTransaction; + + await depositTransaction.wait(); + + console.log('Deposit to the forced exit sender account has been successfully completed'); +} + export const command = new Command('server') .description('start zksync server') .option('--genesis', 'generate genesis data via server') diff --git a/infrastructure/zk/src/utils.ts b/infrastructure/zk/src/utils.ts index 02a9ecedc7..181e75ece2 100644 --- a/infrastructure/zk/src/utils.ts +++ b/infrastructure/zk/src/utils.ts @@ -133,3 +133,14 @@ export function web3Url() { // @ts-ignore return process.env.ETH_CLIENT_WEB3_URL.split(',')[0] as string; } + +export async function readZkSyncAbi() { + const zksync = process.env.ZKSYNC_HOME; + const path = `${zksync}/contracts/artifacts/cache/solpp-generated-contracts/ZkSync.sol/ZkSync.json`; + + const fileContent = (await fs.promises.readFile(path)).toString(); + + const abi = JSON.parse(fileContent).abi; + + return abi; +} diff --git a/yarn.lock b/yarn.lock index 6497082e32..bec13d0835 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1654,7 +1654,7 @@ dependencies: "@types/chai" "*" -"@types/chai@*", "@types/chai@^4.2.12", "@types/chai@^4.2.14": +"@types/chai@*", "@types/chai@^4.2.14": version "4.2.15" resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.2.15.tgz#b7a6d263c2cecf44b6de9a051cf496249b154553" integrity sha512-rYff6FI+ZTKAPkJUoyz7Udq3GaoDZnxYDEvdEdFZASiA7PoErltHezDishqQiSDWrGxvxmplH304jyzQmjp0AQ== @@ -1788,7 +1788,7 @@ resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-8.2.0.tgz#3eb56d13a1de1d347ecb1957c6860c911704bc44" integrity sha512-/Sge3BymXo4lKc31C8OINJgXLaw+7vL1/L1pGiBNpGrBiT8FQiaFpSYV0uhTaG4y78vcMBTMFsWaHDvuD+xGzQ== -"@types/mock-fs@^4.10.0", "@types/mock-fs@^4.13.0": +"@types/mock-fs@^4.13.0": version "4.13.0" resolved "https://registry.yarnpkg.com/@types/mock-fs/-/mock-fs-4.13.0.tgz#b8b01cd2db588668b2532ecd21b1babd3fffb2c0" integrity sha512-FUqxhURwqFtFBCuUj3uQMp7rPSQs//b3O9XecAVxhqS9y4/W8SIJEZFq2mmpnFVZBXwR/2OyPLE97CpyYiB8Mw== @@ -4537,7 +4537,7 @@ commander@^2.12.1, commander@^2.18.0, commander@^2.19.0, commander@^2.20.0: resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== -commander@^6.0.0, commander@^6.1.0, commander@^6.2.0: +commander@^6.0.0, commander@^6.2.0: version "6.2.1" resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.1.tgz#0792eb682dfbc325999bb2b84fddddba110ac73c" integrity sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA== @@ -6273,7 +6273,7 @@ ethereumjs-wallet@0.6.5: utf8 "^3.0.0" uuid "^3.3.2" -ethers@^5.0.0, ethers@^5.0.1, ethers@^5.0.12, ethers@^5.0.18, ethers@^5.0.19, ethers@^5.0.2, ethers@^5.0.26: +ethers@^5.0.0, ethers@^5.0.1, ethers@^5.0.18, ethers@^5.0.19, ethers@^5.0.2, ethers@^5.0.26: version "5.0.31" resolved "https://registry.yarnpkg.com/ethers/-/ethers-5.0.31.tgz#60e3b1425864fe5d2babc147ede01be8382a7d2a" integrity sha512-zpq0YbNFLFn+t+ibS8UkVWFeK5w6rVMSvbSHrHAQslfazovLnQ/mc2gdN5+6P45/k8fPgHrfHrYvJ4XvyK/S1A== @@ -8447,14 +8447,6 @@ isobject@^3.0.0, isobject@^3.0.1: resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= -isomorphic-fetch@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz#611ae1acf14f5e81f729507472819fe9733558a9" - integrity sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk= - dependencies: - node-fetch "^1.0.1" - whatwg-fetch ">=0.10.0" - isomorphic-fetch@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-3.0.0.tgz#0267b005049046d2421207215d45d6a262b8b8b4" @@ -9730,7 +9722,7 @@ mocha@^7.1.2: yargs-parser "13.1.2" yargs-unparser "1.6.0" -mocha@^8.1.3, mocha@^8.2.0: +mocha@^8.2.0: version "8.3.0" resolved "https://registry.yarnpkg.com/mocha/-/mocha-8.3.0.tgz#a83a7432d382ae1ca29686062d7fdc2c36f63fe5" integrity sha512-TQqyC89V1J/Vxx0DhJIXlq9gbbL9XFNdeLQ1+JsnZsVaSOV1z3tWfw0qZmQJGQRIfkvZcs7snQnZnOCKoldq1Q== @@ -9947,7 +9939,12 @@ node-fetch@2.1.2: resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.1.2.tgz#ab884e8e7e57e38a944753cec706f788d1768bb5" integrity sha1-q4hOjn5X44qUR1POxwb3iNF2i7U= -node-fetch@^1.0.1, node-fetch@~1.7.1: +node-fetch@^2.6.0, node-fetch@^2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" + integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== + +node-fetch@~1.7.1: version "1.7.3" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef" integrity sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ== @@ -9955,11 +9952,6 @@ node-fetch@^1.0.1, node-fetch@~1.7.1: encoding "^0.1.11" is-stream "^1.0.1" -node-fetch@^2.6.0, node-fetch@^2.6.1: - version "2.6.1" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" - integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== - node-forge@^0.10.0: version "0.10.0" resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" @@ -14399,7 +14391,7 @@ whatwg-fetch@2.0.4: resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.4.tgz#dde6a5df315f9d39991aa17621853d720b85566f" integrity sha512-dcQ1GWpOD/eEQ97k66aiEVpNnapVj90/+R+SXTPYGHpYBBypfKJEQjLrvMZ7YXbKm21gXd4NcuxUTjiv1YtLng== -whatwg-fetch@>=0.10.0, whatwg-fetch@^3.4.1: +whatwg-fetch@^3.4.1: version "3.5.0" resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.5.0.tgz#605a2cd0a7146e5db141e29d1c62ab84c0c4c868" integrity sha512-jXkLtsR42xhXg7akoDKvKWE40eJeI+2KZqcp2h3NsOrRnDvtWX36KcKl30dy+hxECivdk2BVUHVNrPtoMBUx6A== @@ -14735,28 +14727,13 @@ yocto-queue@^0.1.0: resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== -zksync-crypto@^0.4.2: - version "0.4.5" - resolved "https://registry.yarnpkg.com/zksync-crypto/-/zksync-crypto-0.4.5.tgz#2941ea8dbada9178390afdcc3c93430f31b00784" - integrity sha512-XhhnNhc85BgfmLEAmA7YqrblUltfykrPxlbayGe0dQowroco+1ply3VEZbs0tSEDW5ssBwJHzRSYm3+dOCnzxQ== - zksync-crypto@^0.5.4: version "0.5.4" resolved "https://registry.yarnpkg.com/zksync-crypto/-/zksync-crypto-0.5.4.tgz#6b3ca224ce35bbf1843f20ffa526651c45000e03" integrity sha512-E5TrDnOijfbyqt3J38iYtqsSdytB68FsEEgiTd/YPVeF6Q1Fp+4ecVlUs+FlelY/OTRpWNkSLD9HGug1GpXHcw== -zksync@^0.9.0: - version "0.9.0" - resolved "https://registry.yarnpkg.com/zksync/-/zksync-0.9.0.tgz#7550f81a193e124464582e33062a03625a5e9b7f" - integrity sha512-5pRSsml/0fTNgkcmvTWi+Ar9+XFtdbpa6GSzx/DCCoHKvNLUUwEYIVYi5azTNnm7R1DEsJY1cWHyBhlyqI2RvA== - dependencies: - axios "^0.21.1" - websocket "^1.0.30" - websocket-as-promised "^1.1.0" - zksync-crypto "^0.4.2" - "zksync@link:sdk/zksync.js": - version "0.10.4" + version "0.10.7" dependencies: axios "^0.21.1" websocket "^1.0.30"