Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(indexes): remove sync-v1 indexes #1200

Open
wants to merge 17 commits into
base: feat/consensus-change
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 3 additions & 8 deletions docs/event-queue-feature.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,14 @@ When the Event Queue feature is enabled, the full node will generate specific ev

## Enabling the Event Queue

To enable the Event Queue feature, you must add two CLI options when running the full node:

1. Add `--unsafe-mode [network_name]`
2. Add `--x-enable-event-queue`
To enable the Event Queue feature, you must add this CLI option when running the full node: `--enable-event-queue`.

For example:

```bash
poetry run hathor-cli run_node --memory-storage --status 8080 --testnet --unsafe-mode testnet-golf --x-enable-event-queue
poetry run hathor-cli run_node --memory-storage --status 8080 --testnet --enable-event-queue
```

**ATTENTION**: While the Event Queue is in beta, it's considered unsafe. You must not use it in production environments.

### First run

If this is the first time your full node is running with the event queue enabled, there are 3 possibilities:
Expand All @@ -45,7 +40,7 @@ For case 2.2, an extra loading step will be performed during full node initializ

After running the full node with the Event Queue enabled, if you restart your full node (that is, stop it and then run it again), there are 2 possibilities:

1. You run the full node with the `--x-enable-event-queue` CLI option, that is, you keep the Event Queue enabled, or
1. You run the full node with the `--enable-event-queue` CLI option, that is, you keep the Event Queue enabled, or
2. You run the full node without the CLI option, that is, you don't enable it, but you **have to clear the event data in the database**.

For case 1, the full node will start normally, and continue to generate new events for synced vertices from where it stopped in the previous run.
Expand Down
24 changes: 2 additions & 22 deletions hathor/builder/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,6 @@ def __init__(self) -> None:

self._enable_stratum_server: Optional[bool] = None

self._full_verification: Optional[bool] = None

self._soft_voided_tx_ids: Optional[set[bytes]] = None

self._execution_manager: ExecutionManager | None = None
Expand Down Expand Up @@ -239,9 +237,6 @@ def build(self) -> BuildArtifacts:

kwargs: dict[str, Any] = {}

if self._full_verification is not None:
kwargs['full_verification'] = self._full_verification

if self._enable_event_queue is not None:
kwargs['enable_event_queue'] = self._enable_event_queue

Expand Down Expand Up @@ -372,8 +367,7 @@ def _get_or_create_consensus(self) -> ConsensusAlgorithm:
if self._consensus is None:
soft_voided_tx_ids = self._get_soft_voided_tx_ids()
pubsub = self._get_or_create_pubsub()
execution_manager = self._get_or_create_execution_manager()
self._consensus = ConsensusAlgorithm(soft_voided_tx_ids, pubsub, execution_manager=execution_manager)
self._consensus = ConsensusAlgorithm(soft_voided_tx_ids, pubsub)

return self._consensus

Expand Down Expand Up @@ -611,6 +605,7 @@ def _get_or_create_vertex_handler(self) -> VertexHandler:
verification_service=self._get_or_create_verification_service(),
consensus=self._get_or_create_consensus(),
feature_service=self._get_or_create_feature_service(),
execution_manager=self._get_or_create_execution_manager(),
pubsub=self._get_or_create_pubsub(),
wallet=self._get_or_create_wallet(),
)
Expand Down Expand Up @@ -778,21 +773,6 @@ def disable_sync_v2(self) -> 'Builder':
self._sync_v2_support = SyncSupportLevel.DISABLED
return self

def set_full_verification(self, full_verification: bool) -> 'Builder':
self.check_if_can_modify()
self._full_verification = full_verification
return self

def enable_full_verification(self) -> 'Builder':
self.check_if_can_modify()
self._full_verification = True
return self

def disable_full_verification(self) -> 'Builder':
self.check_if_can_modify()
self._full_verification = False
return self

def enable_ipv6(self) -> 'Builder':
self.check_if_can_modify()
self._enable_ipv6 = True
Expand Down
37 changes: 13 additions & 24 deletions hathor/builder/cli_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@ def check_or_raise(self, condition: bool, message: str) -> None:
if not condition:
raise BuilderError(message)

def check_or_warn(self, condition: bool, message: str) -> None:
"""Will log a warning `message` if `condition` is False."""
if not condition:
self.log.warn(message)

def create_manager(self, reactor: Reactor) -> HathorManager:
import hathor
from hathor.builder import SyncSupportLevel
Expand Down Expand Up @@ -190,20 +195,13 @@ def create_manager(self, reactor: Reactor) -> HathorManager:

hostname = self.get_hostname()

if self._args.sync_bridge:
raise BuilderError('--sync-bridge was removed')
elif self._args.sync_v1_only:
raise BuilderError('--sync-v1-only was removed')
elif self._args.sync_v2_only:
self.log.warn('--sync-v2-only is the default, this parameter has no effect')
elif self._args.x_remove_sync_v1:
self.log.warn('--x-remove-sync-v1 is deprecated and has no effect')
elif self._args.x_sync_bridge:
raise BuilderError('--x-sync-bridge was removed')
elif self._args.x_sync_v1_only:
raise BuilderError('--x-sync-v1-only was removed')
elif self._args.x_sync_v2_only:
self.log.warn('--x-sync-v2-only is deprecated and will be removed')
self.check_or_raise(not self._args.sync_bridge, '--sync-bridge was removed')
self.check_or_raise(not self._args.sync_v1_only, '--sync-v1-only was removed')
self.check_or_raise(not self._args.x_sync_bridge, '--x-sync-bridge was removed')
self.check_or_raise(not self._args.x_sync_v1_only, '--x-sync-v1-only was removed')
self.check_or_warn(not self._args.sync_v2_only, '--sync-v2-only is the default, this parameter has no effect')
self.check_or_warn(not self._args.x_remove_sync_v1, '--x-remove-sync-v1 is deprecated and has no effect')
self.check_or_warn(not self._args.x_sync_v2_only, '--x-sync-v2-only is deprecated and will be removed')

pubsub = PubSubManager(reactor)

Expand Down Expand Up @@ -236,19 +234,10 @@ def create_manager(self, reactor: Reactor) -> HathorManager:
self.log.debug('enable utxo index')
tx_storage.indexes.enable_utxo_index()

full_verification = False
if self._args.x_full_verification:
self.check_or_raise(
not self._args.x_enable_event_queue and not self._args.enable_event_queue,
'--x-full-verification cannot be used with --enable-event-queue'
)
full_verification = True

soft_voided_tx_ids = set(settings.SOFT_VOIDED_TX_IDS)
consensus_algorithm = ConsensusAlgorithm(
soft_voided_tx_ids,
pubsub=pubsub,
execution_manager=execution_manager
)

if self._args.x_enable_event_queue or self._args.enable_event_queue:
Expand Down Expand Up @@ -307,6 +296,7 @@ def create_manager(self, reactor: Reactor) -> HathorManager:
consensus=consensus_algorithm,
feature_service=self.feature_service,
pubsub=pubsub,
execution_manager=execution_manager,
wallet=self.wallet,
log_vertex_bytes=self._args.log_vertex_bytes,
)
Expand Down Expand Up @@ -339,7 +329,6 @@ def create_manager(self, reactor: Reactor) -> HathorManager:
wallet=self.wallet,
checkpoints=settings.CHECKPOINTS,
environment_info=get_environment_info(args=str(self._args), peer_id=str(peer.id)),
full_verification=full_verification,
enable_event_queue=self._args.x_enable_event_queue or self._args.enable_event_queue,
bit_signaling_service=bit_signaling_service,
verification_service=verification_service,
Expand Down
1 change: 1 addition & 0 deletions hathor/builder/resources_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ def create_resources(self) -> server.Site:

# Set websocket factory in metrics. It'll be started when the manager is started.
self.manager.websocket_factory = ws_factory
self.manager.metrics.websocket_factory = ws_factory

self._built_status = True
return status_server
1 change: 0 additions & 1 deletion hathor/cli/events_simulator/events_simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def execute(args: Namespace, reactor: 'ReactorProtocol') -> None:
simulator = Simulator(args.seed)
simulator.start()
builder = simulator.get_default_builder() \
.disable_full_verification() \
.enable_event_queue()

manager = simulator.create_peer(builder)
Expand Down
2 changes: 1 addition & 1 deletion hathor/cli/events_simulator/scenario.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def simulate_invalid_mempool_transaction(simulator: 'Simulator', manager: 'Hatho
simulator.run(60)

# the transaction should have been removed from the mempool and the storage after the re-org
assert tx not in manager.tx_storage.iter_mempool_from_best_index()
assert tx not in manager.tx_storage.iter_mempool()
assert not manager.tx_storage.transaction_exists(tx.hash)
assert bool(tx.get_metadata().voided_by)
balance_per_address = manager.wallet.get_balance_per_address(settings.HATHOR_TOKEN_UID)
Expand Down
2 changes: 1 addition & 1 deletion hathor/cli/openapi_files/openapi_base.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
],
"info": {
"title": "Hathor API",
"version": "0.63.0"
"version": "0.63.1"
},
"consumes": [
"application/json"
Expand Down
17 changes: 7 additions & 10 deletions hathor/cli/run_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,6 @@ def create_parser(cls) -> ArgumentParser:
parser.add_argument('--cache-interval', type=int, help='Cache flush interval')
parser.add_argument('--recursion-limit', type=int, help='Set python recursion limit')
parser.add_argument('--allow-mining-without-peers', action='store_true', help='Allow mining without peers')
fvargs = parser.add_mutually_exclusive_group()
fvargs.add_argument('--x-full-verification', action='store_true', help='Fully validate the local database')
parser.add_argument('--procname-prefix', help='Add a prefix to the process name', default='')
parser.add_argument('--allow-non-standard-script', action='store_true', help='Accept non-standard scripts on '
'/push-tx API')
Expand All @@ -134,14 +132,13 @@ def create_parser(cls) -> ArgumentParser:
parser.add_argument('--sentry-dsn', help='Sentry DSN')
parser.add_argument('--enable-debug-api', action='store_true', help='Enable _debug/* endpoints')
parser.add_argument('--enable-crash-api', action='store_true', help='Enable _crash/* endpoints')
sync_args = parser.add_mutually_exclusive_group()
sync_args.add_argument('--sync-bridge', action='store_true', help=SUPPRESS) # deprecated
sync_args.add_argument('--sync-v1-only', action='store_true', help=SUPPRESS) # deprecated
sync_args.add_argument('--sync-v2-only', action='store_true', help=SUPPRESS) # deprecated
sync_args.add_argument('--x-remove-sync-v1', action='store_true', help=SUPPRESS) # deprecated
sync_args.add_argument('--x-sync-v1-only', action='store_true', help=SUPPRESS) # deprecated
sync_args.add_argument('--x-sync-v2-only', action='store_true', help=SUPPRESS) # deprecated
sync_args.add_argument('--x-sync-bridge', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--sync-bridge', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--sync-v1-only', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--sync-v2-only', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--x-remove-sync-v1', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--x-sync-v1-only', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--x-sync-v2-only', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--x-sync-bridge', action='store_true', help=SUPPRESS) # deprecated
parser.add_argument('--x-localhost-only', action='store_true', help='Only connect to peers on localhost')
parser.add_argument('--x-rocksdb-indexes', action='store_true', help=SUPPRESS)
parser.add_argument('--x-enable-event-queue', action='store_true',
Expand Down
1 change: 0 additions & 1 deletion hathor/cli/run_node_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ class RunNodeArgs(BaseModel, extra=Extra.allow):
cache_interval: Optional[int]
recursion_limit: Optional[int]
allow_mining_without_peers: bool
x_full_verification: bool
procname_prefix: str
allow_non_standard_script: bool
max_output_script_size: Optional[int]
Expand Down
4 changes: 0 additions & 4 deletions hathor/conf/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,10 +359,6 @@ def GENESIS_TX2_TIMESTAMP(self) -> int:
# Amount in which tx min weight reaches the middle point between the minimum and maximum weight
MIN_TX_WEIGHT_K: int = 100

# When the node is being initialized (with a full verification) we don't verify
# the difficulty of all blocks, we execute the validation every N blocks only
VERIFY_WEIGHT_EVERY_N_BLOCKS: int = 1000

# Capabilities
CAPABILITY_WHITELIST: str = 'whitelist'
CAPABILITY_SYNC_VERSION: str = 'sync-version'
Expand Down
75 changes: 23 additions & 52 deletions hathor/consensus/block_consensus.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from itertools import chain
from typing import TYPE_CHECKING, Any, Iterable, Optional, cast
from typing import TYPE_CHECKING, Any, Iterable, Optional

from structlog import get_logger

Expand Down Expand Up @@ -145,10 +145,6 @@ def update_voided_info(self, block: Block) -> None:
meta = block.get_metadata()
if not meta.voided_by:
storage.indexes.height.add_new(block.get_height(), block.hash, block.timestamp)
storage.update_best_block_tips_cache([block.hash])
# The following assert must be true, but it is commented out for performance reasons.
if self._settings.SLOW_ASSERTS:
assert len(storage.get_best_block_tips(skip_cache=True)) == 1
else:
# Resolve all other cases, but (i).
log = self.log.new(block=block.hash_hex)
Expand All @@ -161,16 +157,9 @@ def update_voided_info(self, block: Block) -> None:
self.mark_as_voided(block, skip_remove_first_block_markers=True)

# Get the score of the best chains.
heads = [cast(Block, storage.get_transaction(h)) for h in storage.get_best_block_tips()]
best_score: int | None = None
for head in heads:
head_meta = head.get_metadata(force_reload=True)
if best_score is None:
best_score = head_meta.score
else:
# All heads must have the same score.
assert best_score == head_meta.score
assert best_score is not None
head = storage.get_best_block()
head_meta = head.get_metadata(force_reload=True)
best_score = head_meta.score

# Calculate the score.
# We cannot calculate score before getting the heads.
Expand All @@ -185,20 +174,27 @@ def update_voided_info(self, block: Block) -> None:
# Either eveyone has the same score or there is a winner.

valid_heads = []
for head in heads:
meta = head.get_metadata()
if not meta.voided_by:
valid_heads.append(head)
if not head_meta.voided_by:
valid_heads.append(head)

# We must have at most one valid head.
# Either we have a single best chain or all chains have already been voided.
assert len(valid_heads) <= 1, 'We must never have more than one valid head'

# Add voided_by to all heads.
common_block = self._find_first_parent_in_best_chain(block)
self.add_voided_by_to_multiple_chains(block, heads, common_block)

winner = False
if score > best_score:
winner = True
else:
min_hash: bytes = head.hash
if block.hash < min_hash:
winner = True

if winner:
# Add voided_by to all heads.
self.add_voided_by_to_multiple_chains(block, [head], common_block)

# We have a new winner candidate.
self.update_score_and_mark_as_the_best_chain_if_possible(block)
# As `update_score_and_mark_as_the_best_chain_if_possible` may affect `voided_by`,
Expand All @@ -209,16 +205,9 @@ def update_voided_info(self, block: Block) -> None:
self.log.debug('index new winner block', height=height, block=block.hash_hex)
# We update the height cache index with the new winner chain
storage.indexes.height.update_new_chain(height, block)
storage.update_best_block_tips_cache([block.hash])
# It is only a re-org if common_block not in heads
if common_block not in heads:
if common_block != head:
self.context.mark_as_reorg(common_block)
else:
best_block_tips = [blk.hash for blk in heads]
best_block_tips.append(block.hash)
storage.update_best_block_tips_cache(best_block_tips)
if not meta.voided_by:
self.context.mark_as_reorg(common_block)

def union_voided_by_from_parents(self, block: Block) -> set[bytes]:
"""Return the union of the voided_by of block's parents.
Expand Down Expand Up @@ -286,31 +275,13 @@ def update_score_and_mark_as_the_best_chain_if_possible(self, block: Block) -> N
self.update_score_and_mark_as_the_best_chain(block)
self.remove_voided_by_from_chain(block)

best_score: int
if self.update_voided_by_from_parents(block):
storage = block.storage
heads = [cast(Block, storage.get_transaction(h)) for h in storage.get_best_block_tips()]
best_score = 0
best_heads: list[Block]
for head in heads:
head_meta = head.get_metadata(force_reload=True)
if head_meta.score < best_score:
continue

if head_meta.score > best_score:
best_heads = [head]
best_score = head_meta.score
else:
assert best_score == head_meta.score
best_heads.append(head)
assert isinstance(best_score, int) and best_score > 0

assert len(best_heads) > 0
first_block = self._find_first_parent_in_best_chain(best_heads[0])
self.add_voided_by_to_multiple_chains(best_heads[0], [block], first_block)
if len(best_heads) == 1:
assert best_heads[0].hash != block.hash
self.update_score_and_mark_as_the_best_chain_if_possible(best_heads[0])
head = storage.get_best_block()
first_block = self._find_first_parent_in_best_chain(head)
self.add_voided_by_to_multiple_chains(head, [block], first_block)
assert head.hash != block.hash
self.update_score_and_mark_as_the_best_chain_if_possible(head)

def update_score_and_mark_as_the_best_chain(self, block: Block) -> None:
""" Update score and mark the chain as the best chain.
Expand Down
Loading