diff --git a/.github/ISSUE_TEMPLATE/ask-a-question.md b/.github/ISSUE_TEMPLATE/ask-a-question.md
index e503998..1bb285b 100644
--- a/.github/ISSUE_TEMPLATE/ask-a-question.md
+++ b/.github/ISSUE_TEMPLATE/ask-a-question.md
@@ -6,5 +6,4 @@ labels: 'type:docs'
assignees: ''
---
-This should only be used in very rare cases e.g. if you are not 100% sure if something is a bug or asking a question that leads to improving the documentation. For general questions please use [Discord](https://discord.gg/cGKSsRVCGm) or [Telegram](https://t.me/TronOfficialDevelopersGroupEn).
-
+If you are not 100% sure if something is a bug or asking a question that leads to improving the documentation. For general questions please use [Discord](https://discord.gg/cGKSsRVCGm) or [Telegram](https://t.me/TronOfficialDevelopersGroupEn).
diff --git a/.github/ISSUE_TEMPLATE/report-a-bug.md b/.github/ISSUE_TEMPLATE/report-a-bug.md
index a0d6812..c5c62d0 100644
--- a/.github/ISSUE_TEMPLATE/report-a-bug.md
+++ b/.github/ISSUE_TEMPLATE/report-a-bug.md
@@ -14,7 +14,7 @@ assignees: ''
#### Software Versions
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/conf/checkstyle/checkStyleAll.xml b/conf/checkstyle/checkStyleAll.xml
new file mode 100644
index 0000000..5ba7ec1
--- /dev/null
+++ b/conf/checkstyle/checkStyleAll.xml
@@ -0,0 +1,237 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/conf/main_net_config.conf b/conf/main_net_config.conf
index 31bb8ac..671117b 100644
--- a/conf/main_net_config.conf
+++ b/conf/main_net_config.conf
@@ -229,7 +229,7 @@ node {
# Transactions can only be broadcast if the number of effective connections is reached.
minEffectiveConnection = 1
-
+
# The switch of the reflection service, effective for all gRPC services
# reflectionService = true
}
@@ -733,4 +733,4 @@ event.subscribe = {
"" // contract topic you want to subscribe, if it's set to "", you will receive contract logs/events with any contract topic.
]
}
-}
\ No newline at end of file
+}
diff --git a/conf/nile_net_config.conf b/conf/nile_net_config.conf
index e86ba0d..460adcc 100644
--- a/conf/nile_net_config.conf
+++ b/conf/nile_net_config.conf
@@ -565,4 +565,4 @@ event.subscribe = {
"" // contract topic you want to subscribe, if it's set to "", you will receive contract logs/events with any contract topic.
]
}
-}
\ No newline at end of file
+}
diff --git a/conf/private_net_config_witness1.conf b/conf/private_net_config_witness1.conf
index 15328a2..f4688c6 100644
--- a/conf/private_net_config_witness1.conf
+++ b/conf/private_net_config_witness1.conf
@@ -96,7 +96,7 @@ node {
isOpenFullTcpDisconnect = true
p2p {
- version = 1 # 11111: mainnet; 20180622: testnet; you can set other number when you deploy one private net, but the node must have the same number in some private net.
+ version = 1 # 11111: mainnet; 20180622: testnet; you can set other number when you deploy one private net, but the node must have the same number in some private net.
}
active = [
@@ -276,7 +276,7 @@ localwitness = [
]
#localwitnesskeystore = [
-# "src/main/resources/localwitnesskeystore.json" # if you do not set the localwitness above, you must set this value.Otherwise,your SuperNode can not produce the block.
+# "src/main/resources/localwitnesskeystore.json" # if you do not set the localwitness above, you must set this value.Otherwise,your SuperNode can not produce the block.
#]
block = {
diff --git a/conf/private_net_config_witness2.conf b/conf/private_net_config_witness2.conf
index 2b2f72f..1c2dcb1 100644
--- a/conf/private_net_config_witness2.conf
+++ b/conf/private_net_config_witness2.conf
@@ -96,7 +96,7 @@ node {
isOpenFullTcpDisconnect = true
p2p {
- version = 1 # 11111: mainnet; 20180622: testnet; you can set other number when you deploy one private net, but the node must have the same number in some private net.
+ version = 1 # 11111: mainnet; 20180622: testnet; you can set other number when you deploy one private net, but the node must have the same number in some private net.
}
active = [
@@ -278,7 +278,7 @@ localwitness = [
]
#localwitnesskeystore = [
-# "src/main/resources/localwitnesskeystore.json" # if you do not set the localwitness above, you must set this value.Otherwise,your SuperNode can not produce the block.
+# "src/main/resources/localwitnesskeystore.json" # if you do not set the localwitness above, you must set this value.Otherwise,your SuperNode can not produce the block.
#]
block = {
diff --git a/images/github_issue_ask_a_question.png b/images/github_issue_ask_a_question.png
new file mode 100644
index 0000000..a9b22a9
Binary files /dev/null and b/images/github_issue_ask_a_question.png differ
diff --git a/images/pr_compare_forks.png b/images/pr_compare_forks.png
new file mode 100644
index 0000000..e53492a
Binary files /dev/null and b/images/pr_compare_forks.png differ
diff --git a/images/shadow-fork.png b/images/shadow-fork.png
new file mode 100644
index 0000000..4624099
Binary files /dev/null and b/images/shadow-fork.png differ
diff --git a/metric_monitor/README.md b/metric_monitor/README.md
index ccf9833..c35518e 100644
--- a/metric_monitor/README.md
+++ b/metric_monitor/README.md
@@ -24,7 +24,7 @@ It will start a TRON FullNode that connects to the Mainnet, along with Prometheu
```
metrics{
prometheus{
- enable=true
+ enable=true
port="9527"
}
}
@@ -34,10 +34,10 @@ metrics{
The Prometheus service will use the configuration file [prometheus.yml](metric_conf/prometheus.yml). It uses the configuration below to add targets for monitoring.
```
- targets:
- - tron_node1:9527 # use container name or local IP address
+ - tron_node1:9527 # use container name
labels:
- group: group-tron
- instance: fullnode-01
+ group: group-tron
+ instance: fullnode-01
```
You can view the running status of the Prometheus service at `http://localhost:9090/`. Click on "Status" -> "Configuration" to check whether the configuration file used by the container is correct.
@@ -54,7 +54,7 @@ Click the **Connections** on the left side of the main page and select "Data Sou
![image](../images/grafana_data_source.png)
#### Import dashboard
-For the convenience of java-tron node deployers, the TRON community provides a comprehensive dashboard configuration file [grafana_dashboard_tron_server.json](metric_conf/grafana_dashboard_tron_server.json).
+For the convenience of java-tron node deployers, the TRON community provides a comprehensive dashboard configuration file [grafana_dashboard_tron_server.json](metric_conf/grafana_dashboard_tron_server.json).
Click the Grafana dashboards icon on the left, then select "New" and "Import", then click "Upload JSON file" to import the downloaded dashboard configuration file:
![image](../images/grafana_dashboard.png)
@@ -105,30 +105,30 @@ Follow the example dashboard to add more panels.
### Block status
Used to check the block process performance from TronNetDelegate:
-- `tron:block_process_latency_seconds_bucket`: Cumulative counters
-- `tron:block_process_latency_seconds_count`: Count of events
+- `tron:block_process_latency_seconds_bucket`: Cumulative counters
+- `tron:block_process_latency_seconds_count`: Count of events
- `tron:block_process_latency_seconds_sum`: Total sum of all observed values
Used to check the block processing latency from the Manager, which is invoked by TronNetDelegate:
-- `tron:block_push_latency_seconds_bucket`: Cumulative counters
-- `tron:block_push_latency_seconds_count`: Count of events
-- `tron:block_push_latency_seconds_sum`: Total sum of all observed values
+- `tron:block_push_latency_seconds_bucket`: Cumulative counters
+- `tron:block_push_latency_seconds_count`: Count of events
+- `tron:block_push_latency_seconds_sum`: Total sum of all observed values
When handling the above block push logic, TRON's processing logic needs to acquire a synchronization lock. The `lock_acquire_latency_seconds_x` metric is used to indicate the latency.
-- `tron:lock_acquire_latency_seconds_bucket`: Cumulative counters
-- `tron:lock_acquire_latency_seconds_count`: Count of events
+- `tron:lock_acquire_latency_seconds_bucket`: Cumulative counters
+- `tron:lock_acquire_latency_seconds_count`: Count of events
- `tron:lock_acquire_latency_seconds_sum`: Total sum of all observed values
Used to check the block latency received from peers and not from sync requests:
-- `tron:block_fetch_latency_seconds_bucket`: Cumulative counters
-- `tron:block_fetch_latency_seconds_count`: Count of events
+- `tron:block_fetch_latency_seconds_bucket`: Cumulative counters
+- `tron:block_fetch_latency_seconds_count`: Count of events
- `tron:block_fetch_latency_seconds_sum`: Total sum of all observed values
- `tron:block_receive_delay_seconds_bucket/count/sum`
Verify the latency of all transactions' signatures when processing a block:
-- `tron:verify_sign_latency_seconds_bucket`: Cumulative counters for
-- `tron:verify_sign_latency_seconds_count`: Count of events
-- `tron:verify_sign_latency_seconds_sum`: Total sum of all observed values
+- `tron:verify_sign_latency_seconds_bucket`: Cumulative counters for
+- `tron:verify_sign_latency_seconds_count`: Count of events
+- `tron:verify_sign_latency_seconds_sum`: Total sum of all observed values
Check the usage from dashboard panel (enter edit mode), or by searching in [grafana_dashboard_tron_server.json](metric_conf/grafana_dashboard_tron_server.json).
![image](../images/metric_block_latency.png)
@@ -140,7 +140,7 @@ Check the usage from dashboard panel (enter edit mode), or by searching in [graf
Average transaction processing time:
- `tron:process_transaction_latency_seconds_bucket`: Cumulative counters
- `tron:process_transaction_latency_seconds_count`: Count of event
-- `tron:process_transaction_latency_seconds_sum`: Total sum of all observed values
+- `tron:process_transaction_latency_seconds_sum`: Total sum of all observed values
### Network peer status
@@ -149,7 +149,7 @@ TRON peers info and abnormal statistic metrics:
- `tron:p2p_disconnect_total`
- `tron:p2p_error_total`
-The latency exceeds 50ms to process a message from a peer will be logged by the below metrics:
+The latency exceeds 50ms to process a message from a peer will be logged by the below metrics:
- `tron:message_process_latency_seconds_bucket`: Cumulative counters for
- `tron:message_process_latency_seconds_count`: Count of events
- `tron:message_process_latency_seconds_sum`: Total sum of all observed values
@@ -174,9 +174,9 @@ Http request data traffic statistics:
- `tron:http_bytes_count`:Count of events
- `tron:http_bytes_sum`: Total sum of all observed values
-Http/GRPC request latency metrics:
-- `tron:http_service_latency_seconds_bucket`: Cumulative counters
-- `tron:http_service_latency_seconds_count`: Count of events
+Http/GRPC request latency metrics:
+- `tron:http_service_latency_seconds_bucket`: Cumulative counters
+- `tron:http_service_latency_seconds_count`: Count of events
- `tron:http_service_latency_seconds_sum`: Total sum of all observed values
- `tron:grpc_service_latency_seconds_bucket/count/sum`
- `tron:internal_service_latency_seconds_bucket/count/sum`
@@ -207,7 +207,7 @@ Currently, for `db` values of above metrics TRON has below possible objects:
- trans
- contract
- storage-row
-- block
+- block
- exchange
- DelegatedResource
- tree-block-index
@@ -280,11 +280,8 @@ Currently, for `db` values of above metrics TRON has below possible objects:
* `jvm_memory_pool_collection_used_bytes`: Used bytes after last collection of a given JVM memory pool
### Other metrics
-Beside above metrics, there are also metrics to measure the duration of a scrape process, which is useful for monitoring and understanding the performance of your Prometheus server and the targets it scrapes.
+Beside above metrics, there are also metrics to measure the duration of a scrape process, which is useful for monitoring and understanding the performance of your Prometheus server and the targets it scrapes.
- `scrape_duration_seconds`: It measures the time taken (in seconds) for Prometheus to scrape a target. This includes the entire process of making an HTTP request to the target, receiving the response, and processing the metrics.
- `scrape_samples_post_metric_relabeling`
- `scrape_samples_scraped`
-- `scrape_series_added`
-
-
-
+- `scrape_series_added`
diff --git a/metric_monitor/docker-compose.yml b/metric_monitor/docker-compose.yml
index 7e3cc63..4d9b57c 100644
--- a/metric_monitor/docker-compose.yml
+++ b/metric_monitor/docker-compose.yml
@@ -1,5 +1,4 @@
version: '3.8'
-
services:
tron_node1:
image: tronprotocol/java-tron:latest
@@ -11,15 +10,13 @@ services:
limits:
memory: 16g
ports:
- - "8090:8090" # for external http API request
- - "9527:9527" # used for metric API
+ - "8090:8090" # for external http API request
+ - "9527:9527" # used for metric API
volumes:
- ../conf:/java-tron/conf
- ./datadir:/java-tron/data # mount a local directory to make the blocks data persistent.
command: >
- -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC -XX:+PrintGC}"
- -c ./conf/main_net_config.conf
- -d /java-tron/data
+ -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC -XX:+PrintGC}" -c ./conf/main_net_config.conf -d /java-tron/data
prometheus:
image: prom/prometheus:latest
@@ -34,7 +31,6 @@ services:
- "9090:9090" # used for external collect metrics
volumes:
- ./metric_conf/prometheus.yml:/etc/prometheus/prometheus.yml
-
grafana:
image: grafana/grafana-oss
container_name: grafana
@@ -46,7 +42,6 @@ services:
memory: 1g
ports:
- "3000:3000"
-
networks:
tron_network:
- driver: bridge
\ No newline at end of file
+ driver: bridge
diff --git a/metric_monitor/metric_conf/grafana_dashboard_tron_server.json b/metric_monitor/metric_conf/grafana_dashboard_tron_server.json
index 65a4724..32d27fd 100644
--- a/metric_monitor/metric_conf/grafana_dashboard_tron_server.json
+++ b/metric_monitor/metric_conf/grafana_dashboard_tron_server.json
@@ -4906,4 +4906,4 @@
"uid": "WAY66gQ07X",
"version": 46,
"weekStart": ""
-}
\ No newline at end of file
+}
diff --git a/metric_monitor/metric_conf/prometheus.yml b/metric_monitor/metric_conf/prometheus.yml
index 3d4e9b5..bb432e2 100644
--- a/metric_monitor/metric_conf/prometheus.yml
+++ b/metric_monitor/metric_conf/prometheus.yml
@@ -20,4 +20,4 @@ scrape_configs:
# - tron_node2:9527
# labels:
# group: group-tron
-# instance: fullnode-01
\ No newline at end of file
+# instance: fullnode-01
diff --git a/private_net/README.md b/private_net/README.md
index d7c6c44..c52f25c 100644
--- a/private_net/README.md
+++ b/private_net/README.md
@@ -77,7 +77,7 @@ For all configurations, you need to set `node.p2p.version` to the same value and
```
node {
p2p {
- version = 1 # 11111: mainnet; 20180622: nilenet; others for private networks.
+ version = 1 # 11111: mainnet; 20180622: nilenet; others for private networks.
}
...
}
@@ -112,7 +112,7 @@ localwitness = [
genesis.block {
assets = [ # set account initial balance
...
- {
+ {
accountName = "TestE"
accountType = "AssetIssue"
address = "TCjptjyjenNKB2Y6EwyVT43DQyUUorxKWi"
@@ -127,17 +127,17 @@ genesis.block {
voteCount = 5000
}
]
-
+
```
**P2P node discovery setting**
In witness configure file, make sure `node.listen.port` is set for p2p peer discovery.
```
-node {
+node {
listen.port = 18888
- ...
-}
+ ...
+}
```
Then, in other configuration files, add witness `container_name:port` to connect to the newly added witness fullnodes.
@@ -147,7 +147,7 @@ seed.node = {
# used for docker deployment, to connect containers in tron_witness defined in docker-compose.yml
"tron_witness1:18888",
"tron_witness2:18888",
- ...
+ ...
]
}
```
@@ -169,11 +169,11 @@ block = {
maintenanceTimeInterval = 300000 # 5mins, default is 6 hours
proposalExpireTime = 600000 # 10mins, default is 3 days
}
-```
+```
You could also disable/enable the following committee-approved settings with `0` or `1`:
```
committee = {
- allowCreationOfContracts = 1
+ allowCreationOfContracts = 1
allowAdaptiveEnergy = 0
allowMultiSign = 1
allowDelegateResource = 1
diff --git a/private_net/docker-compose.yml b/private_net/docker-compose.yml
index bb1332f..d03857f 100644
--- a/private_net/docker-compose.yml
+++ b/private_net/docker-compose.yml
@@ -1,5 +1,4 @@
version: '3.8'
-
services:
tron_witness1:
image: tronprotocol/java-tron:latest
@@ -11,33 +10,29 @@ services:
limits:
memory: 16g
ports:
- - "8090:8090" # for external http API request
- - "50051:50051" # for external rpc API request
+ - "8090:8090" # for external http API request
+ - "50051:50051" # for external rpc API request
volumes:
- ../conf:/java-tron/conf
- ./datadir:/java-tron/data # mount a local directory to make the blocks data persistent.
- command: >
- -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}"
- -c /java-tron/conf/private_net_config_witness1.conf
- -d /java-tron/data
- -w
-
-# tron_witness2:
-# image: tronprotocol/java-tron:latest
-# container_name: tron_witness2 # change container_name
-# networks:
-# - tron_private_network
-# deploy:
-# resources:
-# limits:
-# memory: 16g
-# volumes:
-# - ../conf:/java-tron/conf
-# command: >
-# -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}"
-# -c /java-tron/conf/private_net_config_witness2.conf
-# -w
+ command: >
+ -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}" -c /java-tron/conf/private_net_config_witness1.conf -d /java-tron/data -w
+ # tron_witness2:
+ # image: tronprotocol/java-tron:latest
+ # container_name: tron_witness2 # change container_name
+ # networks:
+ # - tron_private_network
+ # deploy:
+ # resources:
+ # limits:
+ # memory: 16g
+ # volumes:
+ # - ../conf:/java-tron/conf
+ # command: >
+ # -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}"
+ # -c /java-tron/conf/private_net_config_witness2.conf
+ # -w
tron_node1:
image: tronprotocol/java-tron:latest
container_name: tron_node1
@@ -50,8 +45,7 @@ services:
volumes:
- ../conf:/java-tron/conf
command: >
- -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}"
- -c /java-tron/conf/private_net_config_others.conf
+ -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}" -c /java-tron/conf/private_net_config_others.conf
# tron_node2:
# image: tronprotocol/java-tron:latest
@@ -67,7 +61,6 @@ services:
# command: >
# -jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}"
# -c /java-tron/conf/private_net_config_others.conf
-
networks:
tron_private_network:
driver: bridge
diff --git a/run-checkstyle.sh b/run-checkstyle.sh
new file mode 100755
index 0000000..3d574ff
--- /dev/null
+++ b/run-checkstyle.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+# Define the URL for the Checkstyle JAR file
+CHECKSTYLE_VERSION="8.42"
+CHECKSTYLE_JAR="checkstyle-${CHECKSTYLE_VERSION}-all.jar"
+CHECKSTYLE_URL="https://github.com/checkstyle/checkstyle/releases/download/checkstyle-${CHECKSTYLE_VERSION}/${CHECKSTYLE_JAR}"
+
+# Define the directory where the JAR file should be stored
+LIB_DIR="libs"
+
+# Create the directory if it does not exist
+mkdir -p "$LIB_DIR"
+
+# Define the full path to the JAR file
+CHECKSTYLE_PATH="${LIB_DIR}/${CHECKSTYLE_JAR}"
+
+# Check if the JAR file already exists
+if [ -f "$CHECKSTYLE_PATH" ]; then
+ echo "Checkstyle JAR file already exists at ${CHECKSTYLE_PATH}"
+else
+ echo "Checkstyle JAR file not found. Downloading from ${CHECKSTYLE_URL}..."
+ curl -L -o "$CHECKSTYLE_PATH" "$CHECKSTYLE_URL" && echo "downloaded successfully"
+fi
+
+# Add the lib directory to .gitignore if it's not already present
+GITIGNORE_FILE=".gitignore"
+
+if ! grep -q "^${LIB_DIR}/$" "$GITIGNORE_FILE"; then
+ echo "Adding ${LIB_DIR}/ to ${GITIGNORE_FILE}"
+ echo "${LIB_DIR}/" >> "$GITIGNORE_FILE"
+else
+ echo "${LIB_DIR}/ is already in ${GITIGNORE_FILE}"
+fi
+
+# Check if there are any Java files in the project
+JAVA_FILES_FOUND=$(find . -name "*.java")
+
+# if there is java file go ahead to trigger checkStyle for all java files
+if [ -z "$JAVA_FILES_FOUND" ]; then
+ echo "No Java files found in the project."
+ exit 0
+else
+ echo "Java files found in the project. Now run further checks."
+fi
+
+# Path to your Checkstyle configuration file
+CHECKSTYLE_CONFIG="./conf/checkstyle/checkStyleAll.xml"
+
+# shellcheck disable=SC2027
+# shellcheck disable=SC2046
+
+All_Java_Files=$(find . -name "*.java")
+
+# Run Checkstyle on all Java files in the repository
+java -jar "$CHECKSTYLE_PATH" -c "$CHECKSTYLE_CONFIG" "$All_Java_Files"
+
+echo "finish ...."
+
+# Capture the exit code of Checkstyle
+# shellcheck disable=SC2320
+STATUS=$?
+
+# Exit with the same status code as Checkstyle
+exit $STATUS
diff --git a/single_node/README.md b/single_node/README.md
index 59306df..3b06b38 100644
--- a/single_node/README.md
+++ b/single_node/README.md
@@ -61,10 +61,10 @@ You can run the following command to start java-tron:
```
docker run -it --name tron -d --memory="16g" \
-p 8090:8090 -p 8091:8091 -p 18888:18888 -p 18888:18888/udp -p 50051:50051 \
-tronprotocol/java-tron
+tronprotocol/java-tron
```
The `-p` flag specifies the ports that the container needs to map to the host machine.
-`--memory="16g"` sets the memory limit to 16GB, ensuring that the TRON container gets enough memory.
+`--memory="16g"` sets the memory limit to 16GB, ensuring that the TRON container gets enough memory.
By default, it will use the [configuration](https://github.com/tronprotocol/java-tron/blob/develop/framework/src/main/resources/config.conf),
which sets the fullNode to connect to the mainnet with genesis block settings in `genesis.block`.
@@ -77,8 +77,8 @@ For abnormal cases, please check the troubleshooting section below.
This image also supports customizing some startup parameters. Here is an example for running a FullNode as a witness with a customized configuration file:
```
docker run -it --name tron -d -p 8090:8090 -p 8091:8091 -p 18888:18888 -p 18888:18888/udp -p 50051:50051 --memory="16g" \
- -v /host/path/java-tron/conf:/java-tron/conf \
- -v /host/path/java-tron/datadir:/java-tron/data \
+ -v /host/path/java-tron/conf:/java-tron/conf \
+ -v /host/path/java-tron/datadir:/java-tron/data \
tronprotocol/java-tron \
-jvm "{-Xmx16g -Xms12g -XX:+UseConcMarkSweepGC}" \
-c /java-tron/conf/config-localtest.conf \
diff --git a/tools/dbfork/README.md b/tools/dbfork/README.md
new file mode 100644
index 0000000..fee2cc6
--- /dev/null
+++ b/tools/dbfork/README.md
@@ -0,0 +1,150 @@
+## Database Fork Tool
+Database fork tool can help launch a private java-tron FullNode or network based on the state of public chain database to support shadow fork testing.
+The public chain database can come from the Mainnet, Nile Testnet, or Shasta Testnet.
+
+Database fork tool provides the ability to modify the witnesses and other related data in the database to
+implement shadow fork testing, which includes:
+- Erase the historical witnesses and active witnesses
+- Write new witnesses to the state and update new active witnesses
+- Write new balance for new addresses
+- Modify the owner permission of existing account to simulate the account operation
+- Set the new `latesteBlockHeaderTimestamp` to avoid the delay in producing blocks
+- Set the new `maintenanceTimeInterval` and `nextMaintenanceTime` optionally to facilitate testing
+
+After launching the shadow fork FullNode or network,
+developers can connect and interact with the node by [wallet-cli](https://tronprotocol.github.io/documentation-en/clients/wallet-cli/),
+[TronBox](https://developers.tron.network/reference/what-is-tronbox), [Tron-IDE](https://developers.tron.network/docs/tron-ide) or other tools, and execute the shadow fork testing.
+
+The whole procedure of shadow fork is described in the following figure:
+
+![image](../../images/shadow-fork.png)
+
+### Obtain the state data
+To use the DBFork tool, we need to obtain the state data of the public chain first. There are three possible ways:
+
+- Download the [Lite FullNode](https://tronprotocol.github.io/documentation-en/using_javatron/backup_restore/#lite-fullnode-data-snapshot) data snapshot;
+
+- Download the [Full Node](https://tronprotocol.github.io/documentation-en/using_javatron/backup_restore/#fullnode-data-snapshot) data snapshot;
+
+- Launch the FullNode and sync directly.
+
+If we want the state data of specified block height, we can modify the `node.shutdown.BlockHeight` option in the [config](https://github.com/tronprotocol/tron-deployment/blob/master/main_net_config.conf) to make the FullNode sync to the target height after downloading the snapshot.
+```conf
+node.shutdown = {
+# BlockTime = "54 59 08 * * ?" # if block header time in persistent db matched.
+ BlockHeight = 33350800 # if block header height in persistent db matched.
+# BlockCount = 12 # block sync count after node start.
+}
+```
+
+Please refer [Startup a fullnode](https://tronprotocol.github.io/documentation-en/using_javatron/installing_javatron/#startup-a-fullnode) to launch the FullNode and sync to the specified block number. The FullNode will halt when it syncs to the target block height.
+
+**Note**: `node.shutdown.BlockHeight` is the solidified block height. You can use the `/wallet/getnowblock` api to check the latest block height, which should be `node.shutdown.BlockHeight + 20` when the FullNode halts.
+
+If you need to perform multiple shadow fork tests, you'd better backup the `output-directory` using the [Toolkit data copy](https://tronprotocol.github.io/documentation-en/using_javatron/toolkit/#data-copy) tool.
+```shell
+java -jar Toolkit.jar db copy output-directory output-directory-bak
+```
+
+
+### Run the DBFork tool
+Run the DBFork tool in the Toolkit to modify the related data. The available parameters are:
+- `-c | --config=`: config the new witnesses, balances, etc for shadow
+ fork. Default: fork.conf
+- `-d | --database-directory=`: java-tron database directory path. Default: output-directory
+- `--db-engine=`:
+ database engine: leveldb or rocksdb. Default: leveldb
+- `-h | --help`
+- `-r | --retain-witnesses`: retain the previous witnesses and active witnesses. Default: false
+
+The example of `fork.conf` can be:
+
+```conf
+witnesses = [
+ {
+ address = "TS1hu4ZCcwBFYpQqUGoWy1GWBzamqxiT5W"
+ url = "http://meme5.com"
+ voteCount = 100000036
+ },
+ {
+ address = "TRY18iTFy6p8yhWiCt1dhd2gz2c15ungq3"
+ voteCount = 100000035
+ }
+]
+
+accounts = [
+ {
+ address = "TS1hu4ZCcwBFYpQqUGoWy1GWBzamqxiT5W"
+ accountName = "Meme"
+ balance = 99000000000000000
+ },
+ {
+ address = "TRY18iTFy6p8yhWiCt1dhd2gz2c15ungq3"
+ accountType = "Normal"
+ balance = 99000000000000000
+ },
+ {
+ address = "TLLM21wteSPs4hKjbxgmH1L6poyMjeTbHm"
+ owner = "TS1hu4ZCcwBFYpQqUGoWy1GWBzamqxiT5W"
+ }
+]
+
+latestBlockHeaderTimestamp = 1735628883000
+maintenanceTimeInterval = 21600000
+nextMaintenanceTime = 1735628894000
+```
+
+For the `witnesses`, we can configure the following properties:
+- `address`: add the new witness address
+- `url`: set the URL of the new witness
+- `voteCount`: set the vote count of the witness
+
+For the `accounts`, we can configure the following properties:
+- `address`: add or modify the account address
+- `accountName`: set the name of the account
+- `accountType`: set the account type, namely `Normal`, `AssetIssue` and `Contract`
+- `balance`: set the balance of the account
+- `owner`: set the owner permission of the account
+
+*Note*: If you need to add new address, you can use the [tronlink](https://www.tronlink.org/) or [wallet-cli](https://github.com/tronprotocol/wallet-cli?tab=readme-ov-file#account-related-commands) to
+genrate the private key and address.
+
+set `latestBlockHeaderTimestamp` as current millisecond time to avoid the delay in producing blocks.
+
+set `maintenanceTimeInterval` and `nextMaintenanceTime` optionally to facilitate testing.
+
+
+Execute database fork command:
+```shell script
+# clone the tron-docker
+git clone https://github.com/tronprotocol/tron-docker.git
+# enter the directory
+cd tron-docker/tools/gradlew
+# compile the database fork tool
+./gradlew :dbfork:build
+# execute full command
+java -jar ../dbfork/build/libs/dbfork.jar -c /path/to/fork.conf -d /path/to/output-directory
+```
+
+### Launch the FullNode
+Launch the FullNode against the modified state. To launch the node smoothly, we may need to change some parameters in the [config](https://github.com/tronprotocol/tron-deployment/blob/master/main_net_config.conf):
+```config
+needSyncCheck = false
+minParticipationRate = 0
+minEffectiveConnection = 0
+node.p2p.version = 202501 // arbitrary number except for 11111(mainnet) and 20180622(testnet)
+```
+*Note*: please remember to comment `node.shutdown.BlockHeight` in the config if you have modified it previously.
+
+To isolate from the mainnet and other testnets, The `node.p2p.version` can be arbitrary number different from the mainnet and testnets.
+
+To produce the blocks, we also need to configure the private keys of the witness addresses in the config and run the FullNode with the `--witness` parameter, please refer [startup a fullnode that produces blocks](https://tronprotocol.github.io/documentation-en/using_javatron/installing_javatron/#startup-a-fullnode-that-produces-blocks).
+```config
+localwitness = [
+]
+```
+
+If another node wants to join the shadow fork network, it needs to execute the above steps, or it copies the state data from the first shadow fork node directly. They need to configure the same `node.p2p.version` and add the `seed.node` in the config, then they can sync and produce blocks to form a local testnet.
+
+At last, developers can connect and interact with the node by [wallet-cli](https://tronprotocol.github.io/documentation-en/clients/wallet-cli/),
+[TronBox](https://developers.tron.network/reference/what-is-tronbox), [Tron-IDE](https://developers.tron.network/docs/tron-ide) or other tools, and execute the shadow fork testing.
diff --git a/tools/dbfork/build.gradle b/tools/dbfork/build.gradle
new file mode 100644
index 0000000..201ac53
--- /dev/null
+++ b/tools/dbfork/build.gradle
@@ -0,0 +1,83 @@
+plugins {
+ // Apply the application plugin to add support for building a CLI application in Java.
+ id 'java'
+ id 'application'
+ id 'checkstyle'
+ id 'com.github.johnrengelman.shadow' version '7.1.2'
+}
+
+sourceCompatibility = JavaVersion.VERSION_1_8
+targetCompatibility = JavaVersion.VERSION_1_8
+[compileJava, compileTestJava]*.options*.encoding = 'UTF-8'
+
+repositories {
+ mavenLocal()
+ mavenCentral()
+ maven { url 'https://repo.spring.io/plugins-release' }
+ maven { url 'https://jitpack.io' }
+}
+
+tasks.named('jar') {
+ enabled = false
+ dependsOn shadowJar
+}
+
+shadowJar {
+ archiveBaseName.set('dbfork')
+ archiveClassifier.set('')
+ archiveVersion.set('')
+ mergeServiceFiles()
+}
+
+dependencies {
+ // Use JUnit test framework.
+ testImplementation 'junit:junit:4.13.2'
+
+ implementation('com.github.tronprotocol.java-tron:chainbase:GreatVoyage-v4.7.7') {
+ exclude group: 'com.fasterxml.jackson', module: 'jackson-bom'
+ exclude group: "pull-parser", module: "pull-parser" // https://www.jianshu.com/p/ef6936448aa6
+ }
+ implementation platform('com.fasterxml.jackson:jackson-bom:2.14.0')
+ implementation group: 'info.picocli', name: 'picocli', version: '4.6.3'
+
+ compileOnly 'org.projectlombok:lombok:1.18.12'
+ annotationProcessor 'org.projectlombok:lombok:1.18.12'
+ testCompileOnly 'org.projectlombok:lombok:1.18.12'
+ testAnnotationProcessor 'org.projectlombok:lombok:1.18.12'
+}
+
+application {
+ // Define the main class for the application.
+ mainClass = 'org.tron.DBFork'
+}
+
+jar {
+ manifest {
+ attributes(
+ 'Main-Class': 'org.tron.DBFork'
+ )
+ }
+}
+
+checkstyle {
+ toolVersion = '8.41'
+ configFile = file("../../conf/checkstyle/checkStyle.xml")
+}
+
+task checkstyleDBForkMain(type: Checkstyle) {
+ source 'src/main/java'
+ include '**/*.java'
+ exclude '**/generated/**'
+ classpath = files()
+ ignoreFailures = false
+}
+
+task checkstyleDBForkTest(type: Checkstyle) {
+ source 'src/test/java'
+ include '**/*.java'
+ exclude '**/generated/**'
+ classpath = files()
+ ignoreFailures = false
+}
+
+check.dependsOn checkstyleDBForkMain, checkstyleDBForkTest
diff --git a/tools/dbfork/src/main/java/org/tron/DBFork.java b/tools/dbfork/src/main/java/org/tron/DBFork.java
new file mode 100644
index 0000000..b9fb45f
--- /dev/null
+++ b/tools/dbfork/src/main/java/org/tron/DBFork.java
@@ -0,0 +1,263 @@
+package org.tron;
+
+import com.google.protobuf.ByteString;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.Callable;
+import java.util.stream.Collectors;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.ArrayUtils;
+import org.tron.common.utils.ByteArray;
+import org.tron.common.utils.Commons;
+import org.tron.core.capsule.AccountCapsule;
+import org.tron.core.capsule.WitnessCapsule;
+import org.tron.db.TronDatabase;
+import org.tron.protos.Protocol.Account;
+import org.tron.protos.Protocol.AccountType;
+import org.tron.protos.Protocol.Permission;
+
+import static org.tron.utils.Constant.*;
+
+import org.tron.utils.Utils;
+import picocli.CommandLine;
+import picocli.CommandLine.Command;
+
+@Slf4j(topic = "dbfork")
+@Command(name = "dbfork", mixinStandardHelpOptions = true, version = "DBFork 1.0",
+ description = "Modify the database of java-tron for shadow fork testing.",
+ exitCodeListHeading = "Exit Codes:%n",
+ exitCodeList = {
+ "0:Successful",
+ "n:Internal error: exception occurred,please check logs/dbfork.log"})
+public class DBFork implements Callable {
+
+ private TronDatabase witnessStore;
+ private TronDatabase witnessScheduleStore;
+ private TronDatabase accountStore;
+ private TronDatabase dynamicPropertiesStore;
+
+ @CommandLine.Spec
+ CommandLine.Model.CommandSpec spec;
+
+ @CommandLine.Option(names = {"-d", "--database-directory"},
+ defaultValue = "output-directory",
+ description = "java-tron database directory path. Default: ${DEFAULT-VALUE}")
+ private String database;
+
+ @CommandLine.Option(names = {"-c", "--config"},
+ defaultValue = "fork.conf",
+ description = "config the new witnesses, balances, etc for shadow fork."
+ + " Default: ${DEFAULT-VALUE}")
+ private String config;
+
+ @CommandLine.Option(names = {"--db-engine"},
+ defaultValue = "leveldb",
+ description = "database engine: leveldb or rocksdb. Default: ${DEFAULT-VALUE}")
+ private String dbEngine;
+
+ @CommandLine.Option(names = {"-r", "--retain-witnesses"},
+ description = "retain the previous witnesses and active witnesses.")
+ private boolean retain;
+
+ @CommandLine.Option(names = {"-h", "--help"})
+ private boolean help;
+
+ public static void main(String[] args) {
+ int exitCode = new CommandLine(new DBFork()).execute(args);
+ System.exit(exitCode);
+ }
+
+ private void initStore() {
+ witnessStore = new TronDatabase(database, WITNESS_STORE, dbEngine);
+ witnessScheduleStore = new TronDatabase(database, WITNESS_SCHEDULE_STORE,
+ dbEngine);
+ accountStore = new TronDatabase(database, ACCOUNT_STORE, dbEngine);
+ dynamicPropertiesStore = new TronDatabase(database, DYNAMIC_PROPERTY_STORE,
+ dbEngine);
+ }
+
+ private void closeStore() {
+ witnessStore.close();
+ witnessScheduleStore.close();
+ accountStore.close();
+ dynamicPropertiesStore.close();
+ }
+
+ @Override
+ public Integer call() throws Exception {
+ if (help) {
+ spec.commandLine().usage(System.out);
+ return 0;
+ }
+
+ File dbFile = Paths.get(database).toFile();
+ if (!dbFile.exists() || !dbFile.isDirectory()) {
+ throw new IOException("Database [" + database + "] not exist!");
+ }
+ File tmp = Paths.get(database, "database", "tmp").toFile();
+ if (tmp.exists()) {
+ Utils.deleteDir(tmp);
+ }
+
+ Config forkConfig;
+ File file = Paths.get(config).toFile();
+ if (file.exists() && file.isFile()) {
+ forkConfig = ConfigFactory.parseFile(Paths.get(config).toFile());
+ } else {
+ throw new IOException("Fork config file [" + config + "] not exist!");
+ }
+
+ initStore();
+
+ log.info("Choose the DB engine: {}.", dbEngine);
+ spec.commandLine().getOut().format("Choose the DB engine: %s.", dbEngine).println();
+
+ if (!retain) {
+ log.info("Erase the previous witnesses and active witnesses.");
+ spec.commandLine().getOut().println("Erase the previous witnesses and active witnesses.");
+ witnessScheduleStore.delete(ACTIVE_WITNESSES);
+ witnessStore.reset();
+ } else {
+ log.warn("Keep the previous witnesses and active witnesses.");
+ spec.commandLine().getOut().println("Keep the previous witnesses and active witnesses.");
+ }
+
+ if (forkConfig.hasPath(WITNESS_KEY)) {
+ List extends Config> witnesses = forkConfig.getConfigList(WITNESS_KEY);
+ if (witnesses.isEmpty()) {
+ spec.commandLine().getOut().println("no witness listed in the config.");
+ }
+ witnesses = witnesses.stream()
+ .filter(c -> c.hasPath(WITNESS_ADDRESS))
+ .collect(Collectors.toList());
+
+ if (witnesses.isEmpty()) {
+ spec.commandLine().getOut().println("no witness listed in the config.");
+ }
+
+ List witnessList = new ArrayList<>();
+ witnesses.stream().forEach(
+ w -> {
+ ByteString address = ByteString.copyFrom(
+ Commons.decodeFromBase58Check(w.getString(WITNESS_ADDRESS)));
+ WitnessCapsule witness = new WitnessCapsule(address);
+ witness.setIsJobs(true);
+ if (w.hasPath(WITNESS_VOTE) && w.getLong(WITNESS_VOTE) > 0) {
+ witness.setVoteCount(w.getLong(WITNESS_VOTE));
+ }
+ if (w.hasPath(WITNESS_URL)) {
+ witness.setUrl(w.getString(WITNESS_URL));
+ }
+ witnessStore.put(address.toByteArray(), witness.getData());
+ witnessList.add(witness.getAddress());
+ });
+
+ witnessList.sort(Comparator.comparingLong((ByteString b) ->
+ new WitnessCapsule(witnessStore.get(b.toByteArray())).getVoteCount())
+ .reversed()
+ .thenComparing(Comparator.comparingInt(ByteString::hashCode).reversed()));
+ List activeWitnesses = witnessList.subList(0,
+ witnesses.size() >= MAX_ACTIVE_WITNESS_NUM ? MAX_ACTIVE_WITNESS_NUM : witnessList.size());
+ witnessScheduleStore.put(ACTIVE_WITNESSES, Utils.getActiveWitness(activeWitnesses));
+ log.info("{} witnesses and {} active witnesses have been modified.",
+ witnesses.size(), activeWitnesses.size());
+ spec.commandLine().getOut().format("%d witnesses and %d active witnesses have been modified.",
+ witnesses.size(), activeWitnesses.size()).println();
+ }
+
+ if (forkConfig.hasPath(ACCOUNTS_KEY)) {
+ List extends Config> accounts = forkConfig.getConfigList(ACCOUNTS_KEY);
+ if (accounts.isEmpty()) {
+ spec.commandLine().getOut().println("no account listed in the config.");
+ }
+
+ accounts = accounts.stream()
+ .filter(c -> c.hasPath(ACCOUNT_ADDRESS))
+ .collect(Collectors.toList());
+
+ if (accounts.isEmpty()) {
+ spec.commandLine().getOut().println("no account listed in the config.");
+ }
+
+ accounts.stream().forEach(
+ a -> {
+ byte[] address = Commons.decodeFromBase58Check(a.getString(ACCOUNT_ADDRESS));
+ byte[] value = accountStore.get(address);
+ AccountCapsule accountCapsule =
+ ArrayUtils.isEmpty(value) ? null : new AccountCapsule(value);
+ if (Objects.isNull(accountCapsule)) {
+ ByteString byteAddress = ByteString.copyFrom(
+ Commons.decodeFromBase58Check(a.getString(ACCOUNT_ADDRESS)));
+ Account account = Account.newBuilder().setAddress(byteAddress).build();
+ accountCapsule = new AccountCapsule(account);
+ }
+
+ if (a.hasPath(ACCOUNT_BALANCE) && a.getLong(ACCOUNT_BALANCE) > 0) {
+ accountCapsule.setBalance(a.getLong(ACCOUNT_BALANCE));
+ }
+ if (a.hasPath(ACCOUNT_NAME)) {
+ accountCapsule.setAccountName(
+ ByteArray.fromString(a.getString(ACCOUNT_NAME)));
+ }
+ if (a.hasPath(ACCOUNT_TYPE)) {
+ accountCapsule.updateAccountType(
+ AccountType.valueOf(a.getString(ACCOUNT_TYPE)));
+ }
+
+ if (a.hasPath(ACCOUNT_OWNER)) {
+ byte[] owner = Commons.decodeFromBase58Check(a.getString(ACCOUNT_OWNER));
+ Permission ownerPermission = AccountCapsule
+ .createDefaultOwnerPermission(ByteString.copyFrom(owner));
+ accountCapsule.updatePermissions(ownerPermission, null, null);
+ }
+
+ accountStore.put(address, accountCapsule.getData());
+ });
+ log.info("{} accounts have been modified.", accounts.size());
+ spec.commandLine().getOut().format("%d accounts have been modified.", accounts.size())
+ .println();
+ }
+
+ if (forkConfig.hasPath(LATEST_BLOCK_TIMESTAMP)
+ && forkConfig.getLong(LATEST_BLOCK_TIMESTAMP) > 0) {
+ long latestBlockHeaderTimestamp = forkConfig.getLong(LATEST_BLOCK_TIMESTAMP);
+ dynamicPropertiesStore
+ .put(LATEST_BLOCK_HEADER_TIMESTAMP, ByteArray.fromLong(latestBlockHeaderTimestamp));
+ log.info("The latest block header timestamp has been modified as {}.",
+ latestBlockHeaderTimestamp);
+ spec.commandLine().getOut().format("The latest block header timestamp has been modified "
+ + "as %d.", latestBlockHeaderTimestamp).println();
+ }
+
+ if (forkConfig.hasPath(MAINTENANCE_INTERVAL)
+ && forkConfig.getLong(MAINTENANCE_INTERVAL) > 0) {
+ long maintenanceTimeInterval = forkConfig.getLong(MAINTENANCE_INTERVAL);
+ dynamicPropertiesStore
+ .put(MAINTENANCE_TIME_INTERVAL, ByteArray.fromLong(maintenanceTimeInterval));
+ log.info("The maintenance time interval has been modified as {}.",
+ maintenanceTimeInterval);
+ spec.commandLine().getOut().format("The maintenance time interval has been modified as %d.",
+ maintenanceTimeInterval).println();
+ }
+
+ if (forkConfig.hasPath(NEXT_MAINTENANCE_TIME)
+ && forkConfig.getLong(NEXT_MAINTENANCE_TIME) > 0) {
+ long nextMaintenanceTime = forkConfig.getLong(NEXT_MAINTENANCE_TIME);
+ dynamicPropertiesStore.put(MAINTENANCE_TIME, ByteArray.fromLong(nextMaintenanceTime));
+ log.info("The next maintenance time has been modified as {}.",
+ nextMaintenanceTime);
+ spec.commandLine().getOut().format("The next maintenance time has been modified as %d.",
+ nextMaintenanceTime).println();
+ }
+
+ closeStore();
+ return 0;
+ }
+}
diff --git a/tools/dbfork/src/main/java/org/tron/db/LevelDbDataSourceImpl.java b/tools/dbfork/src/main/java/org/tron/db/LevelDbDataSourceImpl.java
new file mode 100644
index 0000000..dbd781e
--- /dev/null
+++ b/tools/dbfork/src/main/java/org/tron/db/LevelDbDataSourceImpl.java
@@ -0,0 +1,508 @@
+/*
+ * Copyright (c) [2016] [ ] This file is part of the ethereumJ library.
+ *
+ * The ethereumJ library is free software: you can redistribute it and/or modify it under the terms
+ * of the GNU Lesser General Public License as published by the Free Software Foundation, either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * The ethereumJ library is distributed in the hope that it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License along with the ethereumJ
+ * library. If not, see .
+ */
+package org.tron.db;
+
+import static org.fusesource.leveldbjni.JniDBFactory.factory;
+
+import com.google.common.collect.Sets;
+import com.google.common.primitives.Bytes;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.iq80.leveldb.DB;
+import org.iq80.leveldb.DBIterator;
+import org.iq80.leveldb.Logger;
+import org.iq80.leveldb.Options;
+import org.iq80.leveldb.ReadOptions;
+import org.iq80.leveldb.WriteBatch;
+import org.iq80.leveldb.WriteOptions;
+import org.slf4j.LoggerFactory;
+import org.tron.common.storage.WriteOptionsWrapper;
+import org.tron.common.storage.metric.DbStat;
+import org.tron.common.utils.FileUtil;
+import org.tron.common.utils.StorageUtils;
+import org.tron.core.db.common.DbSourceInter;
+import org.tron.core.db.common.iterator.StoreIterator;
+import org.tron.core.db2.common.Instance;
+import org.tron.core.db2.common.WrappedByteArray;
+
+@Slf4j(topic = "DB")
+@NoArgsConstructor
+public class LevelDbDataSourceImpl extends DbStat implements DbSourceInter,
+ Iterable>, Instance {
+
+ private String dataBaseName;
+ private DB database;
+ private volatile boolean alive;
+ private String parentPath;
+ private Options options;
+ private WriteOptions writeOptions;
+ private ReadWriteLock resetDbLock = new ReentrantReadWriteLock();
+ private static final String LEVELDB = "LEVELDB";
+ private static final org.slf4j.Logger innerLogger = LoggerFactory.getLogger(LEVELDB);
+ private Logger leveldbLogger = new Logger() {
+ @Override
+ public void log(String message) {
+ innerLogger.info("{} {}", dataBaseName, message);
+ }
+ };
+
+ /**
+ * constructor.
+ */
+ public LevelDbDataSourceImpl(String parentPath, String dataBaseName, Options options,
+ WriteOptions writeOptions) {
+ this.parentPath = Paths.get(
+ parentPath,
+ "database"
+ ).toString();
+ this.dataBaseName = dataBaseName;
+ this.options = options.logger(leveldbLogger);
+ this.writeOptions = writeOptions;
+ initDB();
+ }
+
+ public LevelDbDataSourceImpl(String parentPath, String dataBaseName) {
+ this.parentPath = Paths.get(
+ parentPath,
+ "database"
+ ).toString();
+
+ this.dataBaseName = dataBaseName;
+ options = new Options().logger(leveldbLogger);
+ writeOptions = new WriteOptions();
+ }
+
+ @Override
+ public void initDB() {
+ resetDbLock.writeLock().lock();
+ try {
+ log.debug("Init DB: {}.", dataBaseName);
+
+ if (isAlive()) {
+ return;
+ }
+
+ if (dataBaseName == null) {
+ throw new IllegalArgumentException("No name set to the dbStore");
+ }
+
+ try {
+ openDatabase(options);
+ alive = true;
+ } catch (IOException ioe) {
+ throw new RuntimeException(String.format("Can't initialize database, %s", dataBaseName),
+ ioe);
+ }
+ log.debug("Init DB {} done.", dataBaseName);
+ } finally {
+ resetDbLock.writeLock().unlock();
+ }
+ }
+
+ private void openDatabase(Options dbOptions) throws IOException {
+ final Path dbPath = getDbPath();
+ if (dbPath == null || dbPath.getParent() == null) {
+ return;
+ }
+ if (!Files.isSymbolicLink(dbPath.getParent())) {
+ Files.createDirectories(dbPath.getParent());
+ }
+ try {
+ database = factory.open(dbPath.toFile(), dbOptions);
+ if (!this.getDBName().startsWith("checkpoint")) {
+ log
+ .info("DB {} open success with writeBufferSize {} M, cacheSize {} M, maxOpenFiles {}.",
+ this.getDBName(), dbOptions.writeBufferSize() / 1024 / 1024,
+ dbOptions.cacheSize() / 1024 / 1024, dbOptions.maxOpenFiles());
+ }
+ } catch (IOException e) {
+ if (e.getMessage().contains("Corruption:")) {
+ log.error("Database {} corrupted, please delete database directory({}) and restart.",
+ dataBaseName, parentPath, e);
+ } else {
+ log.error("Open Database {} failed", dataBaseName, e);
+ }
+ System.exit(1);
+ }
+ }
+
+ public Path getDbPath() {
+ return Paths.get(parentPath, dataBaseName);
+ }
+
+ /**
+ * reset database.
+ */
+ public void resetDb() {
+ resetDbLock.writeLock().lock();
+ try {
+ closeDB();
+ FileUtil.recursiveDelete(getDbPath().toString());
+ initDB();
+ } finally {
+ resetDbLock.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public boolean isAlive() {
+ return alive;
+ }
+
+ @Override
+ public String getDBName() {
+ return dataBaseName;
+ }
+
+ @Override
+ public void setDBName(String name) {
+ this.dataBaseName = name;
+ }
+
+ @Override
+ public byte[] getData(byte[] key) {
+ resetDbLock.readLock().lock();
+ try {
+ return database.get(key);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void putData(byte[] key, byte[] value) {
+ resetDbLock.readLock().lock();
+ try {
+ database.put(key, value, writeOptions);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void deleteData(byte[] key) {
+ resetDbLock.readLock().lock();
+ try {
+ database.delete(key, writeOptions);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Deprecated
+ @Override
+ public Set allKeys() {
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ Set result = Sets.newHashSet();
+ for (iterator.seekToFirst(); iterator.hasNext(); iterator.next()) {
+ result.add(iterator.peekNext().getKey());
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Deprecated
+ @Override
+ public Set allValues() {
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ Set result = Sets.newHashSet();
+ for (iterator.seekToFirst(); iterator.hasNext(); iterator.next()) {
+ result.add(iterator.peekNext().getValue());
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public Set getlatestValues(long limit) {
+ if (limit <= 0) {
+ return Sets.newHashSet();
+ }
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ Set result = Sets.newHashSet();
+ long i = 0;
+ iterator.seekToLast();
+ if (iterator.hasNext()) {
+ result.add(iterator.peekNext().getValue());
+ i++;
+ }
+ for (; iterator.hasPrev() && i++ < limit; iterator.prev()) {
+ result.add(iterator.peekPrev().getValue());
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public Set getValuesNext(byte[] key, long limit) {
+ if (limit <= 0) {
+ return Sets.newHashSet();
+ }
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ Set result = Sets.newHashSet();
+ long i = 0;
+ for (iterator.seek(key); iterator.hasNext() && i++ < limit; iterator.next()) {
+ result.add(iterator.peekNext().getValue());
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public List getKeysNext(byte[] key, long limit) {
+ if (limit <= 0) {
+ return new ArrayList<>();
+ }
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ List result = new ArrayList<>();
+ long i = 0;
+ for (iterator.seek(key); iterator.hasNext() && i++ < limit; iterator.next()) {
+ result.add(iterator.peekNext().getKey());
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public Map getNext(byte[] key, long limit) {
+ if (limit <= 0) {
+ return Collections.emptyMap();
+ }
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ Map result = new HashMap<>();
+ long i = 0;
+ for (iterator.seek(key); iterator.hasNext() && i++ < limit; iterator.next()) {
+ Entry entry = iterator.peekNext();
+ result.put(entry.getKey(), entry.getValue());
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public Map prefixQuery(byte[] key) {
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ Map result = new HashMap<>();
+ for (iterator.seek(key); iterator.hasNext(); iterator.next()) {
+ Entry entry = iterator.peekNext();
+ if (Bytes.indexOf(entry.getKey(), key) == 0) {
+ result.put(WrappedByteArray.of(entry.getKey()), entry.getValue());
+ } else {
+ return result;
+ }
+ }
+ return result;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public long getTotal() throws RuntimeException {
+ resetDbLock.readLock().lock();
+ try (DBIterator iterator = getDBIterator()) {
+ long total = 0;
+ for (iterator.seekToFirst(); iterator.hasNext(); iterator.next()) {
+ total++;
+ }
+ return total;
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ private void updateByBatchInner(Map rows) throws Exception {
+ try (WriteBatch batch = database.createWriteBatch()) {
+ innerBatchUpdate(rows, batch);
+ database.write(batch, writeOptions);
+ }
+ }
+
+ private void updateByBatchInner(Map rows, WriteOptions options) throws Exception {
+ try (WriteBatch batch = database.createWriteBatch()) {
+ innerBatchUpdate(rows, batch);
+ database.write(batch, options);
+ }
+ }
+
+ private void innerBatchUpdate(Map rows, WriteBatch batch) {
+ rows.forEach((key, value) -> {
+ if (value == null) {
+ batch.delete(key);
+ } else {
+ batch.put(key, value);
+ }
+ });
+ }
+
+ @Override
+ public void updateByBatch(Map rows, WriteOptionsWrapper options) {
+ resetDbLock.readLock().lock();
+ try {
+ updateByBatchInner(rows, options.level);
+ } catch (Exception e) {
+ try {
+ updateByBatchInner(rows, options.level);
+ } catch (Exception e1) {
+ throw new RuntimeException(e);
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void updateByBatch(Map rows) {
+ resetDbLock.readLock().lock();
+ try {
+ updateByBatchInner(rows);
+ } catch (Exception e) {
+ try {
+ updateByBatchInner(rows);
+ } catch (Exception e1) {
+ throw new RuntimeException(e);
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public boolean flush() {
+ return false;
+ }
+
+ @Override
+ public void closeDB() {
+ resetDbLock.writeLock().lock();
+ try {
+ if (!isAlive()) {
+ return;
+ }
+ database.close();
+ alive = false;
+ } catch (IOException e) {
+ log.error("Failed to find the dbStore file on the closeDB: {}.", dataBaseName, e);
+ } finally {
+ resetDbLock.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public org.tron.core.db.common.iterator.DBIterator iterator() {
+ return new StoreIterator(getDBIterator());
+ }
+
+ public Stream> stream() {
+ return StreamSupport.stream(spliterator(), false);
+ }
+
+ @Override
+ public LevelDbDataSourceImpl newInstance() {
+ return new LevelDbDataSourceImpl(StorageUtils.getOutputDirectoryByDbName(dataBaseName),
+ dataBaseName, options, writeOptions);
+ }
+
+ private DBIterator getDBIterator() {
+ ReadOptions readOptions = new ReadOptions().fillCache(false);
+ return database.iterator(readOptions);
+ }
+
+
+ /**
+ * Compactions Level Files Size(MB) Time(sec) Read(MB) Write(MB)
+ * --------------------------------------------------
+ * 1 2 2 0 0 2 2 1 1 0 0
+ * 1
+ */
+ @Override
+ public List getStats() throws Exception {
+ resetDbLock.readLock().lock();
+ try {
+ if (!isAlive()) {
+ return Collections.emptyList();
+ }
+ String stat = database.getProperty("leveldb.stats");
+ String[] stats = stat.split("\n");
+ return Arrays.stream(stats).skip(3).collect(Collectors.toList());
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public String getEngine() {
+ return LEVELDB;
+ }
+
+ @Override
+ public String getName() {
+ return this.dataBaseName;
+ }
+
+ @Override
+ public void stat() {
+ this.statProperty();
+ }
+
+}
diff --git a/tools/dbfork/src/main/java/org/tron/db/RocksDbDataSourceImpl.java b/tools/dbfork/src/main/java/org/tron/db/RocksDbDataSourceImpl.java
new file mode 100644
index 0000000..0eacfa7
--- /dev/null
+++ b/tools/dbfork/src/main/java/org/tron/db/RocksDbDataSourceImpl.java
@@ -0,0 +1,585 @@
+package org.tron.db;
+
+import com.google.common.collect.Sets;
+import com.google.common.primitives.Bytes;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+import lombok.NoArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.rocksdb.BlockBasedTableConfig;
+import org.rocksdb.BloomFilter;
+import org.rocksdb.Checkpoint;
+import org.rocksdb.DirectComparator;
+import org.rocksdb.InfoLogLevel;
+import org.rocksdb.Logger;
+import org.rocksdb.Options;
+import org.rocksdb.ReadOptions;
+import org.rocksdb.RocksDB;
+import org.rocksdb.RocksDBException;
+import org.rocksdb.RocksIterator;
+import org.rocksdb.Statistics;
+import org.rocksdb.Status;
+import org.rocksdb.WriteBatch;
+import org.rocksdb.WriteOptions;
+import org.slf4j.LoggerFactory;
+import org.tron.common.setting.RocksDbSettings;
+import org.tron.common.storage.WriteOptionsWrapper;
+import org.tron.common.storage.metric.DbStat;
+import org.tron.common.utils.FileUtil;
+import org.tron.common.utils.PropUtil;
+import org.tron.core.db.common.DbSourceInter;
+import org.tron.core.db.common.iterator.RockStoreIterator;
+import org.tron.core.db2.common.Instance;
+import org.tron.core.db2.common.WrappedByteArray;
+
+
+@Slf4j(topic = "DB")
+@NoArgsConstructor
+public class RocksDbDataSourceImpl extends DbStat implements DbSourceInter,
+ Iterable>, Instance {
+
+ ReadOptions readOpts;
+ private String dataBaseName;
+ private RocksDB database;
+ private volatile boolean alive;
+ private String parentPath;
+ private ReadWriteLock resetDbLock = new ReentrantReadWriteLock();
+ private static final String KEY_ENGINE = "ENGINE";
+ private static final String ROCKSDB = "ROCKSDB";
+ private DirectComparator comparator;
+ private static final org.slf4j.Logger rocksDbLogger = LoggerFactory.getLogger(ROCKSDB);
+
+ public RocksDbDataSourceImpl(String parentPath, String name, RocksDbSettings settings,
+ DirectComparator comparator) {
+ this.dataBaseName = name;
+ this.parentPath = parentPath;
+ this.comparator = comparator;
+ RocksDbSettings.setRocksDbSettings(settings);
+ initDB();
+ }
+
+ public RocksDbDataSourceImpl(String parentPath, String name, RocksDbSettings settings) {
+ this.dataBaseName = name;
+ this.parentPath = parentPath;
+ RocksDbSettings.setRocksDbSettings(settings);
+ initDB();
+ }
+
+ public RocksDbDataSourceImpl(String parentPath, String name) {
+ this.parentPath = parentPath;
+ this.dataBaseName = name;
+ }
+
+ public Path getDbPath() {
+ return Paths.get(parentPath, dataBaseName);
+ }
+
+ public RocksDB getDatabase() {
+ return database;
+ }
+
+ public boolean isAlive() {
+ return alive;
+ }
+
+ @Override
+ public void closeDB() {
+ resetDbLock.writeLock().lock();
+ try {
+ if (!isAlive()) {
+ return;
+ }
+ database.close();
+ alive = false;
+ } catch (Exception e) {
+ log.error("Failed to find the dbStore file on the closeDB: {}.", dataBaseName, e);
+ } finally {
+ resetDbLock.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public void resetDb() {
+ resetDbLock.writeLock().lock();
+ try {
+ closeDB();
+ FileUtil.recursiveDelete(getDbPath().toString());
+ initDB();
+ } finally {
+ resetDbLock.writeLock().unlock();
+ }
+ }
+
+ private boolean quitIfNotAlive() {
+ if (!isAlive()) {
+ log.warn("DB {} is not alive.", dataBaseName);
+ }
+ return !isAlive();
+ }
+
+ @Override
+ public Set allKeys() throws RuntimeException {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return null;
+ }
+ Set result = Sets.newHashSet();
+ try (final RocksIterator iter = getRocksIterator()) {
+ for (iter.seekToFirst(); iter.isValid(); iter.next()) {
+ result.add(iter.key());
+ }
+ return result;
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public Set allValues() throws RuntimeException {
+ return null;
+ }
+
+ @Override
+ public long getTotal() throws RuntimeException {
+ return 0;
+ }
+
+ @Override
+ public String getDBName() {
+ return this.dataBaseName;
+ }
+
+ @Override
+ public void setDBName(String name) {
+ }
+
+ public boolean checkOrInitEngine() {
+ String dir = getDbPath().toString();
+ String enginePath = dir + File.separator + "engine.properties";
+
+ if (FileUtil.createDirIfNotExists(dir)) {
+ if (!FileUtil.createFileIfNotExists(enginePath)) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+
+ // for the first init engine
+ String engine = PropUtil.readProperty(enginePath, KEY_ENGINE);
+ if (engine.isEmpty() && !PropUtil.writeProperty(enginePath, KEY_ENGINE, ROCKSDB)) {
+ return false;
+ }
+ engine = PropUtil.readProperty(enginePath, KEY_ENGINE);
+
+ return ROCKSDB.equals(engine);
+ }
+
+ public void initDB() {
+ if (!checkOrInitEngine()) {
+ throw new RuntimeException(
+ String.format("failed to check database: %s, engine do not match", dataBaseName));
+ }
+ initDB(RocksDbSettings.getSettings());
+ }
+
+ public void initDB(RocksDbSettings settings) {
+ resetDbLock.writeLock().lock();
+ try {
+ if (isAlive()) {
+ return;
+ }
+ if (dataBaseName == null) {
+ throw new IllegalArgumentException("No name set to the dbStore");
+ }
+
+ try (Options options = new Options()) {
+
+ // most of these options are suggested by https://github
+ //.com/facebook/rocksdb/wiki/Set-Up-Options
+
+ // general options
+ if (settings.isEnableStatistics()) {
+ options.setStatistics(new Statistics());
+ options.setStatsDumpPeriodSec(60);
+ }
+ options.setCreateIfMissing(true);
+ options.setIncreaseParallelism(1);
+ options.setLevelCompactionDynamicLevelBytes(true);
+ options.setMaxOpenFiles(settings.getMaxOpenFiles());
+
+ // general options supported user config
+ options.setNumLevels(settings.getLevelNumber());
+ options.setMaxBytesForLevelMultiplier(settings.getMaxBytesForLevelMultiplier());
+ options.setMaxBytesForLevelBase(settings.getMaxBytesForLevelBase());
+ options.setMaxBackgroundCompactions(settings.getCompactThreads());
+ options.setLevel0FileNumCompactionTrigger(settings.getLevel0FileNumCompactionTrigger());
+ options.setTargetFileSizeMultiplier(settings.getTargetFileSizeMultiplier());
+ options.setTargetFileSizeBase(settings.getTargetFileSizeBase());
+ if (comparator != null) {
+ options.setComparator(comparator);
+ }
+ options.setLogger(new Logger(options) {
+ @Override
+ protected void log(InfoLogLevel infoLogLevel, String logMsg) {
+ rocksDbLogger.info("{} {}", dataBaseName, logMsg);
+ }
+ });
+
+ // table options
+ final BlockBasedTableConfig tableCfg;
+ options.setTableFormatConfig(tableCfg = new BlockBasedTableConfig());
+ tableCfg.setBlockSize(settings.getBlockSize());
+ tableCfg.setBlockCache(RocksDbSettings.getCache());
+ tableCfg.setCacheIndexAndFilterBlocks(true);
+ tableCfg.setPinL0FilterAndIndexBlocksInCache(true);
+ tableCfg.setFilter(new BloomFilter(10, false));
+
+ // read options
+ readOpts = new ReadOptions();
+ readOpts = readOpts.setPrefixSameAsStart(true)
+ .setVerifyChecksums(false);
+
+ try {
+ log.debug("Opening database {}.", dataBaseName);
+ final Path dbPath = getDbPath();
+
+ if (!Files.isSymbolicLink(dbPath.getParent())) {
+ Files.createDirectories(dbPath.getParent());
+ }
+
+ try {
+ database = RocksDB.open(options, dbPath.toString());
+ } catch (RocksDBException e) {
+ if (Objects.equals(e.getStatus().getCode(), Status.Code.Corruption)) {
+ log.error("Database {} corrupted, please delete database directory({}) " +
+ "and restart.", dataBaseName, parentPath, e);
+ } else {
+ log.error("Open Database {} failed", dataBaseName, e);
+ }
+ System.exit(1);
+ }
+
+ alive = true;
+ } catch (IOException ioe) {
+ throw new RuntimeException(
+ String.format("failed to init database: %s", dataBaseName), ioe);
+ }
+
+ log.debug("Init DB {} done.", dataBaseName);
+ }
+ } finally {
+ resetDbLock.writeLock().unlock();
+ }
+ }
+
+ @Override
+ public void putData(byte[] key, byte[] value) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return;
+ }
+ database.put(key, value);
+ } catch (RocksDBException e) {
+ throw new RuntimeException(dataBaseName, e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public byte[] getData(byte[] key) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return null;
+ }
+ return database.get(key);
+ } catch (RocksDBException e) {
+ throw new RuntimeException(dataBaseName, e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void deleteData(byte[] key) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return;
+ }
+ database.delete(key);
+ } catch (RocksDBException e) {
+ throw new RuntimeException(dataBaseName, e);
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public boolean flush() {
+ return false;
+ }
+
+ @Override
+ public org.tron.core.db.common.iterator.DBIterator iterator() {
+ return new RockStoreIterator(getRocksIterator());
+ }
+
+ private void updateByBatchInner(Map rows) throws Exception {
+ if (quitIfNotAlive()) {
+ return;
+ }
+ try (WriteBatch batch = new WriteBatch()) {
+ for (Map.Entry entry : rows.entrySet()) {
+ if (entry.getValue() == null) {
+ batch.delete(entry.getKey());
+ } else {
+ batch.put(entry.getKey(), entry.getValue());
+ }
+ }
+ database.write(new WriteOptions(), batch);
+ }
+ }
+
+ private void updateByBatchInner(Map rows, WriteOptions options)
+ throws Exception {
+ if (quitIfNotAlive()) {
+ return;
+ }
+ try (WriteBatch batch = new WriteBatch()) {
+ for (Map.Entry entry : rows.entrySet()) {
+ if (entry.getValue() == null) {
+ batch.delete(entry.getKey());
+ } else {
+ batch.put(entry.getKey(), entry.getValue());
+ }
+ }
+ database.write(options, batch);
+ }
+ }
+
+ @Override
+ public void updateByBatch(Map rows, WriteOptionsWrapper optionsWrapper) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return;
+ }
+ updateByBatchInner(rows, optionsWrapper.rocks);
+ } catch (Exception e) {
+ try {
+ updateByBatchInner(rows);
+ } catch (Exception e1) {
+ throw new RuntimeException(dataBaseName, e1);
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public void updateByBatch(Map rows) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return;
+ }
+ updateByBatchInner(rows);
+ } catch (Exception e) {
+ try {
+ updateByBatchInner(rows);
+ } catch (Exception e1) {
+ throw new RuntimeException(dataBaseName, e1);
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public List getKeysNext(byte[] key, long limit) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return new ArrayList<>();
+ }
+ if (limit <= 0) {
+ return new ArrayList<>();
+ }
+
+ try (RocksIterator iter = getRocksIterator()) {
+ List result = new ArrayList<>();
+ long i = 0;
+ for (iter.seek(key); iter.isValid() && i < limit; iter.next(), i++) {
+ result.add(iter.key());
+ }
+ return result;
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public Map getNext(byte[] key, long limit) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return null;
+ }
+ if (limit <= 0) {
+ return Collections.emptyMap();
+ }
+ try (RocksIterator iter = getRocksIterator()) {
+ Map result = new HashMap<>();
+ long i = 0;
+ for (iter.seek(key); iter.isValid() && i < limit; iter.next(), i++) {
+ result.put(iter.key(), iter.value());
+ }
+ return result;
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public Map prefixQuery(byte[] key) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return null;
+ }
+ try (RocksIterator iterator = getRocksIterator()) {
+ Map result = new HashMap<>();
+ for (iterator.seek(key); iterator.isValid(); iterator.next()) {
+ if (Bytes.indexOf(iterator.key(), key) == 0) {
+ result.put(WrappedByteArray.of(iterator.key()), iterator.value());
+ } else {
+ return result;
+ }
+ }
+ return result;
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public Set getlatestValues(long limit) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return null;
+ }
+ if (limit <= 0) {
+ return Sets.newHashSet();
+ }
+ try (RocksIterator iter = getRocksIterator()) {
+ Set result = Sets.newHashSet();
+ long i = 0;
+ for (iter.seekToLast(); iter.isValid() && i < limit; iter.prev(), i++) {
+ result.add(iter.value());
+ }
+ return result;
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+
+ public Set getValuesNext(byte[] key, long limit) {
+ resetDbLock.readLock().lock();
+ try {
+ if (quitIfNotAlive()) {
+ return null;
+ }
+ if (limit <= 0) {
+ return Sets.newHashSet();
+ }
+ try (RocksIterator iter = getRocksIterator()) {
+ Set result = Sets.newHashSet();
+ long i = 0;
+ for (iter.seek(key); iter.isValid() && i < limit; iter.next(), i++) {
+ result.add(iter.value());
+ }
+ return result;
+ }
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ public void backup(String dir) throws RocksDBException {
+ Checkpoint cp = Checkpoint.create(database);
+ cp.createCheckpoint(dir + this.getDBName());
+ }
+
+ private RocksIterator getRocksIterator() {
+ try (ReadOptions readOptions = new ReadOptions().setFillCache(false)) {
+ return database.newIterator(readOptions);
+ }
+ }
+
+ public boolean deleteDbBakPath(String dir) {
+ return FileUtil.deleteDir(new File(dir + this.getDBName()));
+ }
+
+ @Override
+ public RocksDbDataSourceImpl newInstance() {
+ return new RocksDbDataSourceImpl(parentPath, dataBaseName, RocksDbSettings.getSettings());
+ }
+
+
+ /**
+ * Level Files Size(MB) -------------------- 0 5 10 1 134 254 2 1311
+ * 2559 3 1976 4005 4 0 0 5 0 0 6 0 0
+ */
+ @Override
+ public List getStats() throws Exception {
+ resetDbLock.readLock().lock();
+ try {
+ if (!isAlive()) {
+ return Collections.emptyList();
+ }
+ String stat = database.getProperty("rocksdb.levelstats");
+ String[] stats = stat.split("\n");
+ return Arrays.stream(stats).skip(2).collect(Collectors.toList());
+ } finally {
+ resetDbLock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public String getEngine() {
+ return ROCKSDB;
+ }
+
+ @Override
+ public String getName() {
+ return this.dataBaseName;
+ }
+
+ @Override
+ public void stat() {
+ this.statProperty();
+ }
+}
diff --git a/tools/dbfork/src/main/java/org/tron/db/TronDatabase.java b/tools/dbfork/src/main/java/org/tron/db/TronDatabase.java
new file mode 100644
index 0000000..03de378
--- /dev/null
+++ b/tools/dbfork/src/main/java/org/tron/db/TronDatabase.java
@@ -0,0 +1,93 @@
+package org.tron.db;
+
+import java.nio.file.Paths;
+import java.util.Objects;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+import org.iq80.leveldb.WriteOptions;
+import org.rocksdb.DirectComparator;
+import org.tron.common.setting.RocksDbSettings;
+import org.tron.common.utils.DbOptionalsUtils;
+import org.tron.core.db.common.DbSourceInter;
+
+@Slf4j(topic = "DB")
+public class TronDatabase {
+
+ protected DbSourceInter dbSource;
+ @Getter
+ private String dbName;
+
+ public TronDatabase(String outputDirectory, String dbName, String dbEngine) {
+ this.dbName = dbName;
+
+ if ("LEVELDB".equalsIgnoreCase(dbEngine)) {
+ dbSource =
+ new LevelDbDataSourceImpl(outputDirectory,
+ dbName,
+ DbOptionalsUtils.createDefaultDbOptions(),
+ new WriteOptions().sync(false));
+ } else if ("ROCKSDB".equalsIgnoreCase(dbEngine)) {
+ String parentName = Paths.get(outputDirectory,
+ "database").toString();
+ dbSource =
+ new RocksDbDataSourceImpl(parentName, dbName, RocksDbSettings.getDefaultSettings(),
+ getDirectComparator());
+ } else {
+ log.error("invalid db engine: {}", dbEngine);
+ System.exit(-1);
+ }
+
+ dbSource.initDB();
+ }
+
+ protected DirectComparator getDirectComparator() {
+ return null;
+ }
+
+ public void put(byte[] key, byte[] value) {
+ if (Objects.isNull(key) || Objects.isNull(value)) {
+ return;
+ }
+
+ dbSource.putData(key, value);
+ }
+
+ public byte[] get(byte[] key) {
+ if (Objects.isNull(key)) {
+ return null;
+ }
+
+ return dbSource.getData(key);
+ }
+
+ public void delete(byte[] key) {
+ if (Objects.isNull(key)) {
+ return;
+ }
+
+ dbSource.deleteData(key);
+ }
+
+ /**
+ * reset the database.
+ */
+ public void reset() {
+ dbSource.resetDb();
+ }
+
+ public void close() {
+ log.info("******** Begin to close {}. ********", getName());
+ try {
+ dbSource.closeDB();
+ } catch (Exception e) {
+ log.warn("Failed to close {}.", getName(), e);
+ } finally {
+ log.info("******** End to close {}. ********", getName());
+ }
+ }
+
+ public String getName() {
+ return this.getClass().getSimpleName();
+ }
+
+}
diff --git a/tools/dbfork/src/main/java/org/tron/utils/Constant.java b/tools/dbfork/src/main/java/org/tron/utils/Constant.java
new file mode 100644
index 0000000..01e2ba1
--- /dev/null
+++ b/tools/dbfork/src/main/java/org/tron/utils/Constant.java
@@ -0,0 +1,31 @@
+package org.tron.utils;
+
+public class Constant {
+
+ public static final String WITNESS_KEY = "witnesses";
+ public static final String WITNESS_ADDRESS = "address";
+ public static final String WITNESS_URL = "url";
+ public static final String WITNESS_VOTE = "voteCount";
+ public static final String ACCOUNTS_KEY = "accounts";
+ public static final String ACCOUNT_NAME = "accountName";
+ public static final String ACCOUNT_TYPE = "accountType";
+ public static final String ACCOUNT_ADDRESS = "address";
+ public static final String ACCOUNT_BALANCE = "balance";
+ public static final String ACCOUNT_OWNER = "owner";
+ public static final String LATEST_BLOCK_TIMESTAMP = "latestBlockHeaderTimestamp";
+ public static final String MAINTENANCE_INTERVAL = "maintenanceTimeInterval";
+ public static final String NEXT_MAINTENANCE_TIME = "nextMaintenanceTime";
+ public static final int MAX_ACTIVE_WITNESS_NUM = 27;
+
+ public static final String WITNESS_STORE = "witness";
+ public static final String WITNESS_SCHEDULE_STORE = "witness_schedule";
+ public static final String ACCOUNT_STORE = "account";
+ public static final String DYNAMIC_PROPERTY_STORE = "properties";
+
+ public static final byte[] LATEST_BLOCK_HEADER_TIMESTAMP = "latest_block_header_timestamp"
+ .getBytes();
+ public static final byte[] MAINTENANCE_TIME_INTERVAL = "MAINTENANCE_TIME_INTERVAL".getBytes();
+ public static final byte[] MAINTENANCE_TIME = "NEXT_MAINTENANCE_TIME".getBytes();
+ public static final byte[] ACTIVE_WITNESSES = "active_witnesses".getBytes();
+ public static final int ADDRESS_BYTE_ARRAY_LENGTH = 21;
+}
diff --git a/tools/dbfork/src/main/java/org/tron/utils/Utils.java b/tools/dbfork/src/main/java/org/tron/utils/Utils.java
new file mode 100644
index 0000000..6722cbe
--- /dev/null
+++ b/tools/dbfork/src/main/java/org/tron/utils/Utils.java
@@ -0,0 +1,32 @@
+package org.tron.utils;
+
+import com.google.protobuf.ByteString;
+import java.io.File;
+import java.util.List;
+
+public class Utils {
+
+ public static boolean deleteDir(File dir) {
+ if (dir.isDirectory()) {
+ String[] children = dir.list();
+ for (int i = 0; i < children.length; i++) {
+ boolean success = deleteDir(new File(dir, children[i]));
+ if (!success) {
+ return false;
+ }
+ }
+ }
+ return dir.delete();
+ }
+
+ public static byte[] getActiveWitness(List witnesses) {
+ byte[] ba = new byte[witnesses.size() * Constant.ADDRESS_BYTE_ARRAY_LENGTH];
+ int i = 0;
+ for (ByteString address : witnesses) {
+ System.arraycopy(address.toByteArray(), 0,
+ ba, i * Constant.ADDRESS_BYTE_ARRAY_LENGTH, Constant.ADDRESS_BYTE_ARRAY_LENGTH);
+ i++;
+ }
+ return ba;
+ }
+}
diff --git a/tools/dbfork/src/main/resources/logback.xml b/tools/dbfork/src/main/resources/logback.xml
new file mode 100644
index 0000000..10cd6fa
--- /dev/null
+++ b/tools/dbfork/src/main/resources/logback.xml
@@ -0,0 +1,53 @@
+
+
+
+
+
+
+
+
+
+ %d{HH:mm:ss.SSS} %-5level [%t] [%c{1}]\(%F:%L\) %m%n
+
+
+ INFO
+
+
+
+
+ ./logs/dbfork.log
+
+
+ ./logs/dbfork-%d{yyyy-MM-dd}.%i.log.gz
+
+ 500MB
+ 10
+ 50GB
+
+
+ %d{HH:mm:ss.SSS} %-5level [%t] [%c{1}]\(%F:%L\) %m%n
+
+
+ TRACE
+
+
+
+
+
+ 0
+
+
+
+ 100
+ true
+
+
+
+
+
+
+
+
+
diff --git a/tools/dbfork/src/test/java/org/tron/DBForkTest.java b/tools/dbfork/src/test/java/org/tron/DBForkTest.java
new file mode 100644
index 0000000..6c670ee
--- /dev/null
+++ b/tools/dbfork/src/test/java/org/tron/DBForkTest.java
@@ -0,0 +1,168 @@
+package org.tron;
+
+import com.google.protobuf.ByteString;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.Paths;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.tron.common.utils.ByteArray;
+import org.tron.common.utils.Commons;
+import org.tron.core.capsule.AccountCapsule;
+import org.tron.core.capsule.WitnessCapsule;
+import org.tron.db.TronDatabase;
+import org.tron.utils.Utils;
+import picocli.CommandLine;
+import static org.tron.utils.Constant.*;
+
+public class DBForkTest {
+
+ private TronDatabase witnessStore;
+ private TronDatabase witnessScheduleStore;
+ private TronDatabase accountStore;
+ private TronDatabase dynamicPropertiesStore;
+
+ @Rule
+ public final TemporaryFolder folder = new TemporaryFolder();
+ private String dbPath;
+ private String forkPath;
+ private String dbEngine = "leveldb";
+
+ public void init() {
+ witnessStore = new TronDatabase(dbPath, WITNESS_STORE, dbEngine);
+ witnessScheduleStore = new TronDatabase(dbPath, WITNESS_SCHEDULE_STORE,
+ dbEngine);
+ accountStore = new TronDatabase(dbPath, ACCOUNT_STORE, dbEngine);
+ dynamicPropertiesStore = new TronDatabase(dbPath, DYNAMIC_PROPERTY_STORE,
+ dbEngine);
+ }
+
+ public void close() {
+ witnessStore.close();
+ witnessScheduleStore.close();
+ accountStore.close();
+ dynamicPropertiesStore.close();
+ }
+
+ @Test
+ public void testDbFork() throws IOException {
+ dbPath = folder.newFolder().toString();
+ forkPath = getConfig("fork.conf");
+
+ String[] args = new String[]{"-d",
+ dbPath, "-c",
+ forkPath};
+ CommandLine cli = new CommandLine(new DBFork());
+ Assert.assertEquals(0, cli.execute(args));
+
+ init();
+ Config forkConfig;
+ File file = Paths.get(forkPath).toFile();
+ if (file.exists() && file.isFile()) {
+ forkConfig = ConfigFactory.parseFile(Paths.get(forkPath).toFile());
+ } else {
+ throw new IOException("Fork config file [" + forkPath + "] not exist!");
+ }
+
+ if (forkConfig.hasPath(WITNESS_KEY)) {
+ List extends Config> witnesses = forkConfig.getConfigList(WITNESS_KEY);
+ if (witnesses.isEmpty()) {
+ System.out.println("no witness listed in the config.");
+ }
+ witnesses = witnesses.stream()
+ .filter(c -> c.hasPath(WITNESS_ADDRESS))
+ .collect(Collectors.toList());
+ if (witnesses.isEmpty()) {
+ System.out.println("no witness listed in the config.");
+ }
+
+ List witnessAddresses = witnesses.stream().map(
+ w -> {
+ ByteString address = ByteString.copyFrom(
+ Commons.decodeFromBase58Check(w.getString(WITNESS_ADDRESS)));
+ return address;
+ }
+ ).collect(Collectors.toList());
+ Assert.assertArrayEquals(Utils.getActiveWitness(witnessAddresses),
+ witnessScheduleStore.get(ACTIVE_WITNESSES));
+
+ witnesses.stream().forEach(
+ w -> {
+ WitnessCapsule witnessCapsule = new WitnessCapsule(witnessStore.get(
+ Commons.decodeFromBase58Check(w.getString(WITNESS_ADDRESS))));
+ if (w.hasPath(WITNESS_VOTE)) {
+ Assert.assertEquals(w.getLong(WITNESS_VOTE), witnessCapsule.getVoteCount());
+ }
+ if (w.hasPath(WITNESS_URL)) {
+ Assert.assertEquals(w.getString(WITNESS_URL), witnessCapsule.getUrl());
+ }
+ }
+ );
+ }
+
+ if (forkConfig.hasPath(ACCOUNTS_KEY)) {
+ List extends Config> accounts = forkConfig.getConfigList(ACCOUNTS_KEY);
+ if (accounts.isEmpty()) {
+ System.out.println("no account listed in the config.");
+ }
+ accounts = accounts.stream()
+ .filter(c -> c.hasPath(ACCOUNT_ADDRESS))
+ .collect(Collectors.toList());
+ if (accounts.isEmpty()) {
+ System.out.println("no account listed in the config.");
+ }
+ accounts.stream().forEach(
+ a -> {
+ byte[] address = Commons.decodeFromBase58Check(a.getString(ACCOUNT_ADDRESS));
+ AccountCapsule account = new AccountCapsule(accountStore.get(address));
+ Assert.assertNotNull(account);
+ if (a.hasPath(ACCOUNT_BALANCE)) {
+ Assert.assertEquals(a.getLong(ACCOUNT_BALANCE), account.getBalance());
+ }
+ if (a.hasPath(ACCOUNT_NAME)) {
+ Assert.assertArrayEquals(ByteArray.fromString(a.getString(ACCOUNT_NAME)),
+ account.getAccountName().toByteArray());
+ }
+ if (a.hasPath(ACCOUNT_TYPE)) {
+ Assert.assertEquals(a.getString(ACCOUNT_TYPE), account.getType().toString());
+ }
+ if (a.hasPath(ACCOUNT_OWNER)) {
+ Assert.assertArrayEquals(Commons.decodeFromBase58Check(a.getString(ACCOUNT_OWNER)),
+ account.getPermissionById(0).getKeys(0).getAddress().toByteArray());
+ }
+ }
+ );
+ }
+
+ if (forkConfig.hasPath(LATEST_BLOCK_TIMESTAMP)) {
+ long latestBlockHeaderTimestamp = forkConfig.getLong(LATEST_BLOCK_TIMESTAMP);
+ Assert.assertEquals(latestBlockHeaderTimestamp,
+ ByteArray.toLong(dynamicPropertiesStore.get(LATEST_BLOCK_HEADER_TIMESTAMP)));
+ }
+
+ if (forkConfig.hasPath(MAINTENANCE_INTERVAL)) {
+ long maintenanceTimeInterval = forkConfig.getLong(MAINTENANCE_INTERVAL);
+ Assert.assertEquals(maintenanceTimeInterval,
+ ByteArray.toLong(dynamicPropertiesStore.get(MAINTENANCE_TIME_INTERVAL)));
+ }
+
+ if (forkConfig.hasPath(NEXT_MAINTENANCE_TIME)) {
+ long nextMaintenanceTime = forkConfig.getLong(NEXT_MAINTENANCE_TIME);
+ Assert.assertEquals(nextMaintenanceTime,
+ ByteArray.toLong(dynamicPropertiesStore.get(MAINTENANCE_TIME)));
+ }
+ close();
+ }
+
+ private static String getConfig(String config) {
+ URL path = DBForkTest.class.getClassLoader().getResource(config);
+ return path == null ? null : path.getPath();
+ }
+}
diff --git a/tools/dbfork/src/test/resources/fork.conf b/tools/dbfork/src/test/resources/fork.conf
new file mode 100644
index 0000000..b14a27d
--- /dev/null
+++ b/tools/dbfork/src/test/resources/fork.conf
@@ -0,0 +1,38 @@
+witnesses = [
+ {
+ address = "TS1hu4ZCcwBFYpQqUGoWy1GWBzamqxiT5W"
+ url = "http://meme5.com"
+ voteCount = 100000036
+ },
+ {
+ address = "TRY18iTFy6p8yhWiCt1dhd2gz2c15ungq3"
+ voteCount = 100000035
+ },
+ {
+ address = "TKmyxLsRR2FWMVEHaQA2pZh1xB7oXPXzG1"
+ }
+]
+
+accounts = [
+ {
+ address = "TS1hu4ZCcwBFYpQqUGoWy1GWBzamqxiT5W"
+ accountName = "Meme"
+ balance = 99000000000000000
+ },
+ {
+ address = "TRY18iTFy6p8yhWiCt1dhd2gz2c15ungq3"
+ accountType = "Normal"
+ balance = 99000000000000000
+ },
+ {
+ address = "TLLM21wteSPs4hKjbxgmH1L6poyMjeTbHm"
+ owner = "TS1hu4ZCcwBFYpQqUGoWy1GWBzamqxiT5W"
+ },
+ {
+ address = "TKmyxLsRR2FWMVEHaQA2pZh1xB7oXPXzG1"
+ }
+]
+
+latestBlockHeaderTimestamp = 1735628883000
+maintenanceTimeInterval = 21600000
+nextMaintenanceTime = 1735628894000
diff --git a/tools/docker/Dockerfile b/tools/docker/Dockerfile
new file mode 100644
index 0000000..64d5026
--- /dev/null
+++ b/tools/docker/Dockerfile
@@ -0,0 +1,62 @@
+FROM ubuntu:24.04
+ARG VERSION="dev"
+ENV NO_PROXY_CACHE="-o Acquire::BrokenProxy=true -o Acquire::http::No-Cache=true -o Acquire::http::Pipeline-Depth=0"
+
+ENV TMP_DIR="/tron-build"
+ENV JDK_TAR="jdk-8u202-linux-x64.tar.gz"
+ENV JDK_DIR="jdk1.8.0_202"
+ENV JDK_MD5="0029351f7a946f6c05b582100c7d45b7"
+ENV BASE_DIR="/java-tron"
+
+# Update and install dependencies without using any cache
+RUN apt-get update $NO_PROXY_CACHE && \
+ apt-get --quiet --yes install git wget 7zip curl jq && \
+ wget -P /usr/local https://github.com/frekele/oracle-java/releases/download/8u202-b08/$JDK_TAR \
+ && echo "$JDK_MD5 /usr/local/$JDK_TAR" | md5sum -c \
+ && tar -zxf /usr/local/$JDK_TAR -C /usr/local\
+ && rm /usr/local/$JDK_TAR \
+ && export JAVA_HOME=/usr/local/$JDK_DIR \
+ && export CLASSPATH=$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar \
+ && export PATH=$PATH:$JAVA_HOME/bin \
+ && echo "git clone" \
+ && mkdir -p $TMP_DIR \
+ && cd $TMP_DIR \
+ && git clone https://github.com/tronprotocol/java-tron.git \
+ && cd java-tron \
+ && git checkout master \
+ && ./gradlew build -x test \
+ && cd build/distributions \
+ && 7z x -y java-tron-1.0.0.zip \
+ && mv java-tron-1.0.0 $BASE_DIR \
+ && rm -rf $TMP_DIR \
+ && rm -rf ~/.gradle \
+ && mv /usr/local/$JDK_DIR/jre /usr/local \
+ && rm -rf /usr/local/$JDK_DIR \
+ # Clean apt cache
+ apt-get clean && \
+ rm -rf /var/cache/apt/archives/* /var/cache/apt/archives/partial/* && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN wget -P $BASE_DIR/config https://raw.githubusercontent.com/tronprotocol/tron-deployment/master/main_net_config.conf
+
+ENV JAVA_HOME="/usr/local/jre"
+ENV PATH=$PATH:$JAVA_HOME/bin
+
+COPY docker-entrypoint.sh $BASE_DIR/bin
+
+WORKDIR $BASE_DIR
+
+ENTRYPOINT ["./bin/docker-entrypoint.sh"]
+
+# Build-time metadata as defined at http://label-schema.org
+ARG BUILD_DATE
+ARG VCS_REF
+LABEL org.label-schema.build-date=$BUILD_DATE \
+ org.label-schema.name="Java-TRON" \
+ org.label-schema.description="TRON protocol" \
+ org.label-schema.url="https://tron.network/" \
+ org.label-schema.vcs-ref=$VCS_REF \
+ org.label-schema.vcs-url="https://github.com/tronprotocol/java-tron.git" \
+ org.label-schema.vendor="TRON protocol" \
+ org.label-schema.version=$VERSION \
+ org.label-schema.schema-version="1.0"
diff --git a/tools/docker/build.gradle b/tools/docker/build.gradle
new file mode 100644
index 0000000..77a98c5
--- /dev/null
+++ b/tools/docker/build.gradle
@@ -0,0 +1,152 @@
+/*
+ * This file was generated by the Gradle 'init' task.
+ *
+ * This is a general purpose Gradle build.
+ * Learn more about Gradle by exploring our samples at https://docs.gradle.org/7.6.4/samples
+ */
+import java.text.SimpleDateFormat
+
+//allprojects {
+// version = "1.0.0"
+// apply plugin: "java-library"
+//}
+
+version = "1.0.0"
+apply plugin: "java-library"
+
+def getGitCommitDetails(length = 8) {
+ try {
+ def gitFolder = "${rootDir}/../.git/"
+ if (!file(gitFolder).isDirectory()) {
+ gitFolder = file(gitFolder).text.substring(length).trim() + "/"
+ }
+ def takeFromHash = length
+ def head = new File(gitFolder + "HEAD").text.split(":")
+ def isCommit = head.length == 1
+
+ def commitHash, refHeadFile
+ if (isCommit) {
+ commitHash = head[0].trim().take(takeFromHash)
+ refHeadFile = new File(gitFolder + "HEAD")
+ } else {
+ refHeadFile = new File(gitFolder + head[1].trim())
+ commitHash = refHeadFile.text.trim().take(takeFromHash)
+ }
+
+ // Use head file modification time as a proxy for the build date
+ def lastModified = new Date(refHeadFile.lastModified())
+ // Format the date as "yy.M" (e.g. 24.3 for March 2024)
+ def formattedDate = new SimpleDateFormat("yy.M").format(lastModified)
+
+ return [hash: commitHash, date: formattedDate]
+ } catch (Exception e) {
+ logger.warn('Could not calculate git commit details, using defaults (run with --info for stacktrace)')
+ logger.info('Error retrieving git commit details', e)
+ return [hash: "xxxxxxxx", date: "00.0"]
+ }
+}
+
+// http://label-schema.org/rc1/
+// using the RFC3339 format "2016-04-12T23:20:50.52Z"
+def buildTime() {
+ def df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'")
+ df.setTimeZone(TimeZone.getTimeZone("UTC"))
+ return df.format(new Date())
+}
+
+// set the shell command to use according to os
+def shell = org.gradle.internal.os.OperatingSystem.current().isWindows() ? "${projectDir}\\wslsh.bat" : '/bin/bash'
+
+def dockerBuildVersion = project.hasProperty('release.releaseVersion') ? project.getProperty('release.releaseVersion') : "${version}"
+def dockerOrgName = project.hasProperty('dockerOrgName') ? project.getProperty("dockerOrgName") : "tronprotocol"
+def dockerArtifactName = project.hasProperty("dockerArtifactName") ? project.getProperty("dockerArtifactName") : "java-tron"
+def dockerImageName = "${dockerOrgName}/${dockerArtifactName}"
+
+task sourceDocker {
+ def dockerBuildDir = "${rootDir}/build/docker-tron/"
+
+ doLast {
+ copy {
+ from file("${rootDir}/../docker/Dockerfile")
+ into(dockerBuildDir)
+ }
+ copy {
+ from file("${rootDir}/../docker/docker-entrypoint.sh")
+ into "${dockerBuildDir}"
+ }
+ exec {
+ def image = "${dockerImageName}:${dockerBuildVersion}"
+ def dockerPlatform = ""
+ if (project.hasProperty('docker-platform')){
+ dockerPlatform = "--platform ${project.getProperty('docker-platform')}"
+ println "Building for platform ${project.getProperty('docker-platform')}"
+ } else {
+ dockerPlatform = "--platform linux/amd64"
+ println "Building for default linux/amd64 platform"
+ }
+ def gitDetails = getGitCommitDetails(7)
+ executable shell
+ workingDir dockerBuildDir
+ args "-c", "docker build ${dockerPlatform} --build-arg BUILD_DATE=${buildTime()} --build-arg VERSION=${dockerBuildVersion} --build-arg VCS_REF=${gitDetails.hash} -t ${image} ."
+ }
+ }
+}
+
+// Takes the version and if it contains SNAPSHOT, alpha, beta or RC in version then return true indicating an interim build
+def isInterimBuild(dockerBuildVersion) {
+ return (dockerBuildVersion ==~ /.*-SNAPSHOT/) || (dockerBuildVersion ==~ /.*-alpha/)
+ || (dockerBuildVersion ==~ /.*-beta/) || (dockerBuildVersion ==~ /.*-RC.*/)
+ || (dockerBuildVersion ==~ /.*develop.*/)
+}
+
+
+task testDocker {
+ dependsOn sourceDocker
+ def dockerReportsDir = "${rootDir}/../docker/reports/"
+
+ doFirst {
+ new File(dockerReportsDir).mkdir()
+ }
+
+ doLast {
+ exec {
+ def image = "${dockerImageName}:${dockerBuildVersion}"
+ workingDir "${rootDir}/../docker"
+ executable shell
+ args "-c", "./test.sh ${image}"
+ }
+ }
+}
+
+// Make sure to `docker login` first
+task dockerUpload {
+ dependsOn sourceDocker
+ def architecture = System.getenv('architecture')
+ if (architecture == null) {
+ architecture = "amd64" //set default as amd64
+ }
+ def image = "${dockerImageName}:${dockerBuildVersion}"
+ def additionalTags = []
+
+ if (project.hasProperty('branch') && project.property('branch') == 'main') {
+ additionalTags.add('develop')
+ }
+
+ if (!isInterimBuild(dockerBuildVersion)) {
+ additionalTags.add(dockerBuildVersion.split(/\./)[0..1].join('.'))
+ }
+
+ doLast {
+ exec {
+ def archVariantImage = "${image}-${architecture}"
+ def cmd = "docker tag '${image}' '${archVariantImage}' && docker push '${archVariantImage}'"
+ println "Executing '${cmd}'"
+ executable shell
+ args "-c", cmd
+ }
+ }
+}
+
+tasks.named('jar') {
+ enabled = false
+}
diff --git a/tools/docker/docker-entrypoint.sh b/tools/docker/docker-entrypoint.sh
new file mode 100755
index 0000000..befded7
--- /dev/null
+++ b/tools/docker/docker-entrypoint.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -eo pipefail
+shopt -s nullglob
+
+# shellcheck disable=SC2145
+echo "./bin/FullNode $@" > command.txt
+exec "./bin/FullNode" "$@"
diff --git a/tools/docker/docker.md b/tools/docker/docker.md
new file mode 100644
index 0000000..f6e0635
--- /dev/null
+++ b/tools/docker/docker.md
@@ -0,0 +1,108 @@
+# Docker Shell Guide
+
+java-tron support containerized processes, we maintain a Docker image with latest version build from our master branch on DockerHub. To simplify the use of Docker and common docker commands, we also provide a shell script to help you better manage container services,this guide describes how to use the script tool.
+
+
+## Prerequisites
+
+Requires a docker to be installed on the system. Docker version >=20.10.12.
+
+
+## Quick Start
+
+Shell can be obtained from the java-tron project or independently, you can get the script from [here](https://github.com/tronprotocol/java-tron/blob/develop/docker/docker.sh) or download via the wget:
+```shell
+$ wget https://raw.githubusercontent.com/tronprotocol/java-tron/develop/docker/docker.sh
+```
+
+### Pull the mirror image
+Get the `tronprotocol/java-tron` image from the DockerHub, this image contains the full JDK environment and the host network configuration file, using the script for simple docker operations.
+```shell
+$ sh docker.sh --pull
+```
+
+### Run the service
+Before running the java-tron service, make sure some ports on your local machine are open,the image has the following ports automatically exposed:
+- `8090`: used by the HTTP based JSON API
+- `50051`: used by the GRPC based API
+- `18888`: TCP and UDP, used by the P2P protocol running the network
+
+#### Full node on the main network
+
+```shell
+$ sh docker.sh --run --net main
+```
+or you can use `-p` to customize the port mapping, more custom parameters, please refer to [Options](#Options)
+
+```shell
+$ sh docker.sh --run --net main -p 8080:8090 -p 40051:50051
+```
+
+#### Full node on the nile test network
+```shell
+$ sh docker.sh --run --net test
+```
+
+#### Full node on the private network
+you can also build your own private-net and will download a configuration file from the network for your private network, which will be stored in your local `config` directory.
+```shell
+$ sh docker.sh --run --net private
+```
+#### Configuration
+The script will automatically download and use the corresponding configuration file from the github repository according to the `--net` parameter. if you don't want to update the configuration file every time you start the service, please add a startup parameter.
+
+```shell
+$ sh docker.sh --run --update-config false
+```
+
+Or use the `-c` parameter to specify your own configuration file, which will not automatically download a new configuration file from github repository.
+
+
+### View logs
+If you want to see the logs of the java-tron service, please use the `--log` parameter
+
+```shell
+$ sh docker.sh --log | grep 'PushBlock'
+```
+### Stop the service
+
+If you want to stop the container of java-tron, you can execute
+
+```shell
+$ sh docker.sh --stop
+```
+
+## Build Image
+
+If you do not want to use the default official image, you can also compile your own local image, first you need to change some parameters in the shell script to specify your own mirror info.
+`DOCKER_REPOSITORY` is your repository name
+`DOCKER_IMAGES` is the image name
+`DOCKER_TARGET` is the version number, here is an example:
+
+```shell
+DOCKER_REPOSITORY="your_repository"
+DOCKER_IMAGES="java-tron"
+DOCKER_TARGET="1.0"
+```
+
+then execute the build:
+
+```shell
+$ sh docker.sh --build
+```
+
+## Options
+
+Parameters for all functions:
+
+* **`--build`** building a local mirror image
+* **`--pull`** download a docker mirror from **DockerHub**
+* **`--run`** run the docker mirror
+* **`--log`** exporting the java-tron run log on the container
+* **`--stop`** stopping a running container
+* **`--rm`** remove container,only deletes the container, not the image
+* **`-p`** publish a container's port to the host, format:`-p hostPort:containerPort`
+* **`-c`** specify other java-tron configuration file in the container
+* **`-v`** bind mount a volume for the container,format: `-v host-src:container-dest`, the `host-src` is an absolute path
+* **`--net`** select the network, you can join the main-net, test-net
+* **`--update-config`** update configuration file, default true
diff --git a/tools/docker/docker.sh b/tools/docker/docker.sh
new file mode 100644
index 0000000..bf4961f
--- /dev/null
+++ b/tools/docker/docker.sh
@@ -0,0 +1,291 @@
+#!/bin/bash
+#############################################################################
+#
+# GNU LESSER GENERAL PUBLIC LICENSE
+# Version 3, 29 June 2007
+#
+# Copyright (C) [2007] [TRON Foundation], Inc.
+# Everyone is permitted to copy and distribute verbatim copies
+# of this license document, but changing it is not allowed.
+#
+#
+# This version of the GNU Lesser General Public License incorporates
+# the terms and conditions of version 3 of the GNU General Public
+# License, supplemented by the additional permissions listed below.
+#
+# You can find java-tron at https://github.com/tronprotocol/java-tron/
+#
+##############################################################################
+
+BASE_DIR="/java-tron"
+DOCKER_REPOSITORY="tronprotocol"
+DOCKER_IMAGES="java-tron"
+# latest or version
+DOCKER_TARGET="latest"
+
+HOST_HTTP_PORT=8090
+HOST_RPC_PORT=50051
+HOST_LISTEN_PORT=18888
+
+DOCKER_HTTP_PORT=8090
+DOCKER_RPC_PORT=50051
+DOCKER_LISTEN_PORT=18888
+
+VOLUME=`pwd`
+CONFIG="$VOLUME/config"
+OUTPUT_DIRECTORY="$VOLUME/output-directory"
+
+CONFIG_PATH="/java-tron/config/"
+CONFIG_FILE="main_net_config.conf"
+MAIN_NET_CONFIG_FILE="main_net_config.conf"
+TEST_NET_CONFIG_FILE="test_net_config.conf"
+PRIVATE_NET_CONFIG_FILE="private_net_config.conf"
+
+# update the configuration file, if true, the configuration file will be fetched from the network every time you start
+UPDATE_CONFIG=true
+
+LOG_FILE="/logs/tron.log"
+
+JAVA_TRON_REPOSITORY="https://raw.githubusercontent.com/tronprotocol/java-tron/develop/"
+DOCKER_FILE="Dockerfile"
+ENDPOINT_SHELL="docker-entrypoint.sh"
+
+if test docker; then
+ docker -v
+else
+ echo "warning: docker must be installed, please install docker first."
+ exit
+fi
+
+docker_ps() {
+ containerID=`docker ps -a | grep "$DOCKER_REPOSITORY-$DOCKER_IMAGES" | awk '{print $1}'`
+ cid=$containerID
+}
+
+docker_image() {
+ image_name=`docker images |grep "$DOCKER_REPOSITORY/$DOCKER_IMAGES" |awk {'print $1'}| awk 'NR==1'`
+ image=$image_name
+}
+
+download_config() {
+ mkdir -p config
+ if test curl; then
+ curl -o config/$CONFIG_FILE -LO https://raw.githubusercontent.com/tronprotocol/tron-deployment/master/$CONFIG_FILE -s
+ elif test wget; then
+ wget -P -q config/ https://raw.githubusercontent.com/tronprotocol/tron-deployment/master/$CONFIG_FILE
+ fi
+}
+
+
+check_download_config() {
+ if [[ ! -d 'config' || ! -f "config/$CONFIG_FILE" ]]; then
+ mkdir -p config
+ if test curl; then
+ curl -o config/$CONFIG_FILE -LO https://raw.githubusercontent.com/tronprotocol/tron-deployment/master/$CONFIG_FILE -s
+ elif test wget; then
+ wget -P -q config/ https://raw.githubusercontent.com/tronprotocol/tron-deployment/master/$CONFIG_FILE
+ fi
+ fi
+}
+
+run() {
+ docker_image
+
+ if [ ! $image ] ; then
+ echo 'warning: no java-tron mirror image, do you need to get the mirror image?[y/n]'
+ read need
+
+ if [[ $need == 'y' || $need == 'yes' ]]; then
+ pull
+ else
+ echo "warning: no mirror image found, go ahead and download a mirror."
+ exit
+ fi
+ fi
+
+ volume=""
+ parameter=""
+ tron_parameter=""
+ if [ $# -gt 0 ]; then
+ while [ -n "$1" ]; do
+ case "$1" in
+ -v)
+ volume="$volume -v $2"
+ shift 2
+ ;;
+ -p)
+ parameter="$parameter -p $2"
+ shift 2
+ ;;
+ -c)
+ tron_parameter="$tron_parameter -c $2"
+ UPDATE_CONFIG=false
+ shift 2
+ ;;
+ --net)
+ if [[ "$2" = "main" ]]; then
+ CONFIG_FILE=$MAIN_NET_CONFIG_FILE
+ elif [[ "$2" = "test" ]]; then
+ CONFIG_FILE=$TEST_NET_CONFIG_FILE
+ elif [[ "$2" = "private" ]]; then
+ CONFIG_FILE=$PRIVATE_NET_CONFIG_FILE
+ fi
+ shift 2
+ ;;
+ --update-config)
+ UPDATE_CONFIG=$2
+ shift 2
+ ;;
+ *)
+ echo "run: arg $1 is not a valid parameter"
+ exit
+ ;;
+ esac
+ done
+ if [ $UPDATE_CONFIG = true ]; then
+ download_config
+ fi
+
+ if [ -z "$volume" ]; then
+ volume=" -v $CONFIG:/java-tron/config -v $OUTPUT_DIRECTORY:/java-tron/output-directory"
+ fi
+
+ if [ -z "$parameter" ]; then
+ parameter=" -p $HOST_HTTP_PORT:$DOCKER_HTTP_PORT -p $HOST_RPC_PORT:$DOCKER_RPC_PORT -p $HOST_LISTEN_PORT:$DOCKER_LISTEN_PORT"
+ fi
+
+ if [ -z "$tron_parameter" ]; then
+ tron_parameter=" -c $CONFIG_PATH$CONFIG_FILE"
+ fi
+
+ # Using custom parameters
+ docker run -d -it --name "$DOCKER_REPOSITORY-$DOCKER_IMAGES" \
+ $volume \
+ $parameter \
+ --restart always \
+ "$DOCKER_REPOSITORY/$DOCKER_IMAGES:$DOCKER_TARGET" \
+ $tron_parameter
+ else
+ if [ $UPDATE_CONFIG = true ]; then
+ download_config
+ fi
+ # Default parameters
+ docker run -d -it --name "$DOCKER_REPOSITORY-$DOCKER_IMAGES" \
+ -v $CONFIG:/java-tron/config \
+ -v $OUTPUT_DIRECTORY:/java-tron/output-directory \
+ -p $HOST_HTTP_PORT:$DOCKER_HTTP_PORT \
+ -p $HOST_RPC_PORT:$DOCKER_RPC_PORT \
+ -p $HOST_LISTEN_PORT:$DOCKER_LISTEN_PORT \
+ --restart always \
+ "$DOCKER_REPOSITORY/$DOCKER_IMAGES:$DOCKER_TARGET" \
+ -c "$CONFIG_PATH$CONFIG_FILE"
+ fi
+}
+
+build() {
+ echo 'docker build'
+ if [ ! -f "Dockerfile" ]; then
+ echo 'warning: Dockerfile not exists.'
+ if test curl; then
+ DOWNLOAD_CMD="curl -LJO "
+ elif test wget; then
+ DOWNLOAD_CMD="wget "
+ else
+ echo "Dockerfile cannot be downloaded, you need to install 'curl' or 'wget'!"
+ exit
+ fi
+ # download Dockerfile
+ `$DOWNLOAD_CMD "$JAVA_TRON_REPOSITORY$DOCKER_FILE"`
+ `$DOWNLOAD_CMD "$JAVA_TRON_REPOSITORY$ENDPOINT_SHELL"`
+ chmod u+rwx $ENDPOINT_SHELL
+ fi
+ docker build -t "$DOCKER_REPOSITORY/$DOCKER_IMAGES:$DOCKER_TARGET" .
+}
+
+pull() {
+ echo "docker pull $DOCKER_REPOSITORY/$DOCKER_IMAGES:$DOCKER_TARGET"
+ docker pull "$DOCKER_REPOSITORY/$DOCKER_IMAGES:$DOCKER_TARGET"
+}
+
+start() {
+ docker_ps
+ if [ $cid ]; then
+ echo "containerID: $cid"
+ echo "docker stop $cid"
+ docker start $cid
+ docker ps
+ else
+ echo "container not running!"
+ fi
+}
+
+stop() {
+ docker_ps
+ if [ $cid ]; then
+ echo "containerID: $cid"
+ echo "docker stop $cid"
+ docker stop $cid
+ docker ps
+ else
+ echo "container not running!"
+ fi
+}
+
+rm_container() {
+ stop
+ if [ $cid ]; then
+ echo "containerID: $cid"
+ echo "docker rm $cid"
+ docker rm $cid
+ docker_ps
+ else
+ echo "image not exists!"
+ fi
+}
+
+log() {
+ docker_ps
+
+ if [ $cid ]; then
+ echo "containerID: $cid"
+ docker exec -it $cid tail -100f $BASE_DIR/$LOG_FILE
+ else
+ echo "container not exists!"
+ fi
+
+}
+
+case "$1" in
+ --pull)
+ pull ${@: 2}
+ exit
+ ;;
+ --start)
+ start ${@: 2}
+ exit
+ ;;
+ --stop)
+ stop ${@: 2}
+ exit
+ ;;
+ --build)
+ build ${@: 2}
+ exit
+ ;;
+ --run)
+ run ${@: 2}
+ exit
+ ;;
+ --rm)
+ rm_container ${@: 2}
+ exit
+ ;;
+ --log)
+ log ${@: 2}
+ exit
+ ;;
+ *)
+ echo "arg: $1 is not a valid parameter"
+ exit
+ ;;
+esac
diff --git a/tools/docker/test.sh b/tools/docker/test.sh
new file mode 100755
index 0000000..fb58cc9
--- /dev/null
+++ b/tools/docker/test.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+##
+## Copyright contributors to Besu.
+##
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+## the License. You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+## an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+## specific language governing permissions and limitations under the License.
+##
+## SPDX-License-Identifier: Apache-2.0
+##
+
+export TEST_PATH=./tests
+#export GOSS_PATH=$TEST_PATH/goss-linux-${architecture} # TODO. fixed by https://github.com/goss-org/goss/tree/master/extras/dgoss#mac-osx
+export GOSS_PATH=$TEST_PATH/goss-linux-amd64
+export GOSS_OPTS="$GOSS_OPTS --format junit"
+export GOSS_FILES_STRATEGY=cp
+DOCKER_IMAGE=$1
+# shellcheck disable=SC2034
+DOCKER_FILE="${2:-$PWD/Dockerfile}"
+
+i=0
+
+# Test for normal startup with ports opened
+# we test that things listen on the right interface/port, not what interface the advertise
+# hence we dont set p2p-host=0.0.0.0 because this sets what its advertising to devp2p; the important piece is that it defaults to listening on all interfaces
+GOSS_FILES_PATH=$TEST_PATH/01 \
+bash $TEST_PATH/dgoss run --sysctl net.ipv6.conf.all.disable_ipv6=1 "$DOCKER_IMAGE" \
+#-p 8090:8090 -p 8091:8091 -p 18888:18888 -p 18888:18888/udp -p 50051:50051
+# shellcheck disable=SC2006
+# shellcheck disable=SC2003
+# shellcheck disable=SC2188
+> ./reports/01.xml || i=`expr $i + 1`
+
+exit "$i"
diff --git a/tools/docker/tests/01/goss.yaml b/tools/docker/tests/01/goss.yaml
new file mode 100644
index 0000000..cbe6dc8
--- /dev/null
+++ b/tools/docker/tests/01/goss.yaml
@@ -0,0 +1,5 @@
+---
+# runtime docker tests
+process:
+ java:
+ running: true
diff --git a/tools/docker/tests/01/goss_wait.yaml b/tools/docker/tests/01/goss_wait.yaml
new file mode 100644
index 0000000..9b5820f
--- /dev/null
+++ b/tools/docker/tests/01/goss_wait.yaml
@@ -0,0 +1,39 @@
+---
+# runtime docker tests for interfaces & ports
+port:
+ tcp:18888:
+ listening: true
+ ip:
+ - 0.0.0.0
+ udp:18888:
+ listening: true
+ ip:
+ - 0.0.0.0
+ tcp:50051:
+ listening: true
+ ip:
+ - 0.0.0.0
+ tcp:8090:
+ listening: true
+ ip:
+ - 0.0.0.0
+ tcp:8091:
+ listening: true
+ ip:
+ - 0.0.0.0
+addr:
+ tcp://localhost:8090:
+ reachable: true
+ timeout: 500
+
+http:
+ http://localhost:8090/wallet/getnowblock:
+ method: GET
+ status: 200
+ body:
+ - "\"blockID\":\"00000000000000001ebf88508a03865c71d452e25f4d51194196a1d22b6653dc\""
+
+command:
+ /goss/testSync.sh: # use absolute directory to run shell
+ exit-status: 0
+ timeout: 10000000
diff --git a/tools/docker/tests/01/testSync.sh b/tools/docker/tests/01/testSync.sh
new file mode 100755
index 0000000..c789768
--- /dev/null
+++ b/tools/docker/tests/01/testSync.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+last_syncNum=0
+
+response=$(curl -X GET http://127.0.0.1:8090/wallet/getnodeinfo)
+
+# parse response
+result=$(echo "$response" | jq -r '.beginSyncNum')
+echo "result 1: $result, response 1: $response"
+
+# shellcheck disable=SC2236
+if [ ! -z "$result" ]; then
+ last_sync=$(printf "%d" "$result")
+ last_syncNum=$last_sync
+ echo "TRON node first sync: $last_sync"
+
+else
+ alert="TRON node first sync error: : $response"
+ echo "$alert"
+ exit 1
+fi
+
+# interval as 10s
+monitor_interval=10
+# try 100 times
+count=100
+second_syncNum=0
+
+check_sync_process() {
+ response=$(curl -X GET http://127.0.0.1:8090/wallet/getnodeinfo)
+
+ # parse response
+ result=$(echo "$response" | jq -r '.beginSyncNum')
+ echo "result 2: $result, response 2: $response"
+
+ # shellcheck disable=SC2236
+ if [ ! -z "$result" ]; then
+ last_sync=$(printf "%d" "$result")
+ second_syncNum=$last_sync
+ echo "TRON node second sync: $last_sync"
+ else
+ alert="TRON node second sync error: : $response"
+ echo "$alert"
+ exit 1
+ fi
+}
+
+# shellcheck disable=SC2004
+for((i=1;i<=$count;i++)); do
+ echo "try i: $i"
+ sleep $monitor_interval
+ check_sync_process
+ if [ "$second_syncNum" -gt "$last_syncNum" ]; then
+ echo "sync increased"
+ exit 0
+ fi
+done
+echo "sync not increased"
+exit 1
diff --git a/tools/docker/tests/dgoss b/tools/docker/tests/dgoss
new file mode 100755
index 0000000..f29a9f7
--- /dev/null
+++ b/tools/docker/tests/dgoss
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+set -e
+
+USAGE="USAGE: $(basename "$0") [run|edit|dockerfile] "
+GOSS_FILES_PATH="${GOSS_FILES_PATH:-.}"
+
+info() {
+ echo -e "INFO: $*" >&2;
+}
+error() {
+ echo -e "ERROR: $*" >&2;
+ exit 1;
+}
+
+cleanup() {
+ set +e
+ { kill "$log_pid" && wait "$log_pid"; } 2> /dev/null
+ rm -rf "$tmp_dir"
+ if [[ $id ]];then
+ info "Deleting container"
+ docker rm -vf "$id" > /dev/null
+ fi
+}
+
+# Copy goss & any config into a folder which then gets mounted as a volume
+# Run the container and perform the checks
+setup_container(){
+ info "Setting up test dir"
+ cp "${GOSS_PATH}" "$tmp_dir/goss"
+ chmod 755 "$tmp_dir/goss"
+ [[ -e "${GOSS_FILES_PATH}/goss.yaml" ]] && cp "${GOSS_FILES_PATH}/goss.yaml" "$tmp_dir"
+ [[ -e "${GOSS_FILES_PATH}/goss_wait.yaml" ]] && cp "${GOSS_FILES_PATH}/goss_wait.yaml" "$tmp_dir"
+ [[ -e "${GOSS_FILES_PATH}/testSync.sh" ]] && cp "${GOSS_FILES_PATH}/testSync.sh" "$tmp_dir"
+ # shellcheck disable=SC2236
+ [[ ! -z "${GOSS_VARS}" ]] && [[ -e "${GOSS_FILES_PATH}/${GOSS_VARS}" ]] && cp "${GOSS_FILES_PATH}/${GOSS_VARS}" "$tmp_dir"
+ # copy the Dockerfile if path has been provided
+ # shellcheck disable=SC2236
+ [[ ! -z "${3}" ]] && [[ "${3}" == *"Dockerfile"* ]] && cp "${3}" "$tmp_dir"
+ info "Setup complete"
+
+ # Switch between mount or cp files strategy
+ GOSS_FILES_STRATEGY=${GOSS_FILES_STRATEGY:="mount"}
+ case "$GOSS_FILES_STRATEGY" in
+ mount)
+ info "Starting docker container"
+ id=$(docker run -d -v "$tmp_dir:/goss:z" "${@:2}")
+ docker logs -f "$id" > "$tmp_dir/docker_output.log" 2>&1 &
+ ;;
+ cp)
+ info "Creating docker container"
+ # shellcheck disable=SC2068
+ id=$(docker create ${@:2})
+ info "Copy goss files into container"
+ docker cp "$tmp_dir"/. "$id":/goss
+ info "Starting docker container"
+ docker start "$id" > /dev/null
+ ;;
+ *) error "Wrong goss files strategy used! Correct options are \"mount\" or \"cp\"."
+ esac
+
+ log_pid=$!
+ info "Container ID: ${id:0:8}"
+}
+
+get_file_from_docker() {
+ if docker exec "$id" sh -c "test -e $1" > /dev/null;then
+ mkdir -p "${GOSS_FILES_PATH}"
+ info "Copied '$1' from container to '${GOSS_FILES_PATH}'"
+ docker cp "$id:$1" "${GOSS_FILES_PATH}"
+ fi
+}
+
+# Main
+tmp_dir=$(mktemp -d /tmp/tmp.XXXXXXXXXX)
+chmod 777 "$tmp_dir"
+# shellcheck disable=SC2154
+trap 'ret=$?;cleanup;exit $ret' EXIT
+
+GOSS_PATH="${GOSS_PATH:-$(which goss 2> /dev/null || true)}"
+[[ $GOSS_PATH ]] || { error "Couldn't find goss installation, please set GOSS_PATH to it"; }
+[[ ${GOSS_OPTS+x} ]] || GOSS_OPTS="--color --format documentation"
+[[ ${GOSS_WAIT_OPTS+x} ]] || GOSS_WAIT_OPTS="-r 30s -s 1s > /dev/null"
+GOSS_SLEEP=${GOSS_SLEEP:-1.0}
+
+case "$1" in
+ run)
+ info "Run Docker tests"
+ setup_container "$@"
+ if [[ -e "${GOSS_FILES_PATH}/goss_wait.yaml" ]]; then
+ info "Found goss_wait.yaml, waiting for it to pass before running tests"
+ if [[ -z "${GOSS_VARS}" ]]; then
+ if ! docker exec "$id" sh -c "/goss/goss -g /goss/goss_wait.yaml validate $GOSS_WAIT_OPTS"; then
+ error "goss_wait.yaml never passed"
+ fi
+ else
+ if ! docker exec "$id" sh -c "/goss/goss -g /goss/goss_wait.yaml --vars='/goss/${GOSS_VARS}' validate $GOSS_WAIT_OPTS"; then
+ error "goss_wait.yaml never passed"
+ fi
+ fi
+ fi
+ [[ $GOSS_SLEEP ]] && { info "Sleeping for $GOSS_SLEEP"; sleep "$GOSS_SLEEP"; }
+ # info "Container health"
+ # if ! docker top $id; then
+ # docker logs $id
+ # fi
+ info "Running Tests"
+ if [[ -z "${GOSS_VARS}" ]]; then
+ docker exec "$id" sh -c "/goss/goss -g /goss/goss.yaml validate $GOSS_OPTS"
+ else
+ docker exec "$id" sh -c "/goss/goss -g /goss/goss.yaml --vars='/goss/${GOSS_VARS}' validate $GOSS_OPTS"
+ fi
+ ;;
+ dockerfile)
+ info "Run Dockerfile tests"
+ setup_container "$@"
+ docker exec "$id" sh -c "cat /goss/goss.yaml"
+ docker exec "$id" sh -c "/goss/goss -g /goss/goss.yaml validate $GOSS_OPTS"
+ ;;
+ edit)
+ setup_container "$@"
+ info "Run goss add/autoadd to add resources"
+ docker exec -it "$id" sh -c 'cd /goss; PATH="/goss:$PATH" exec sh'
+ get_file_from_docker "/goss/goss.yaml"
+ get_file_from_docker "/goss/goss_wait.yaml"
+ # shellcheck disable=SC2236
+ [[ ! -z "${GOSS_VARS}" ]] && get_file_from_docker "/goss/${GOSS_VARS}"
+ ;;
+ *)
+ error "$USAGE"
+esac
diff --git a/tools/docker/tests/goss-linux-amd64 b/tools/docker/tests/goss-linux-amd64
new file mode 100755
index 0000000..7f86a12
Binary files /dev/null and b/tools/docker/tests/goss-linux-amd64 differ
diff --git a/tools/gradlew/build.gradle b/tools/gradlew/build.gradle
new file mode 100644
index 0000000..6029ef7
--- /dev/null
+++ b/tools/gradlew/build.gradle
@@ -0,0 +1,26 @@
+plugins {
+ id 'java'
+}
+
+tasks.named('jar') {
+ enabled = false
+}
+
+task copyToParent(type: Copy) {
+ into "$buildDir/libs"
+ subprojects.each { subproject ->
+ from(subproject.tasks.withType(Jar))
+ }
+}
+
+build.finalizedBy(copyToParent)
+
+gradle.buildFinished {
+ if (project.hasProperty('cleanSubBuild')) {
+ subprojects.each { subproject ->
+ if (subproject.buildDir.exists()) {
+ subproject.buildDir.deleteDir()
+ }
+ }
+ }
+}
diff --git a/tools/gradlew/gradle/wrapper/gradle-wrapper.jar b/tools/gradlew/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000..afba109
Binary files /dev/null and b/tools/gradlew/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/tools/gradlew/gradle/wrapper/gradle-wrapper.properties b/tools/gradlew/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 0000000..c7d437b
--- /dev/null
+++ b/tools/gradlew/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,6 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-7.6.4-bin.zip
+networkTimeout=10000
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/tools/gradlew/gradlew b/tools/gradlew/gradlew
new file mode 100755
index 0000000..79a61d4
--- /dev/null
+++ b/tools/gradlew/gradlew
@@ -0,0 +1,244 @@
+#!/bin/sh
+
+#
+# Copyright © 2015-2021 the original authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##############################################################################
+#
+# Gradle start up script for POSIX generated by Gradle.
+#
+# Important for running:
+#
+# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
+# noncompliant, but you have some other compliant shell such as ksh or
+# bash, then to run this script, type that shell name before the whole
+# command line, like:
+#
+# ksh Gradle
+#
+# Busybox and similar reduced shells will NOT work, because this script
+# requires all of these POSIX shell features:
+# * functions;
+# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
+# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
+# * compound commands having a testable exit status, especially «case»;
+# * various built-in commands including «command», «set», and «ulimit».
+#
+# Important for patching:
+#
+# (2) This script targets any POSIX shell, so it avoids extensions provided
+# by Bash, Ksh, etc; in particular arrays are avoided.
+#
+# The "traditional" practice of packing multiple parameters into a
+# space-separated string is a well documented source of bugs and security
+# problems, so this is (mostly) avoided, by progressively accumulating
+# options in "$@", and eventually passing that to Java.
+#
+# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
+# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
+# see the in-line comments for details.
+#
+# There are tweaks for specific operating systems such as AIX, CygWin,
+# Darwin, MinGW, and NonStop.
+#
+# (3) This script is generated from the Groovy template
+# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
+# within the Gradle project.
+#
+# You can find Gradle at https://github.com/gradle/gradle/.
+#
+##############################################################################
+
+# Attempt to set APP_HOME
+
+# Resolve links: $0 may be a link
+app_path=$0
+
+# Need this for daisy-chained symlinks.
+while
+ APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
+ [ -h "$app_path" ]
+do
+ ls=$( ls -ld "$app_path" )
+ link=${ls#*' -> '}
+ case $link in #(
+ /*) app_path=$link ;; #(
+ *) app_path=$APP_HOME$link ;;
+ esac
+done
+
+# This is normally unused
+# shellcheck disable=SC2034
+APP_BASE_NAME=${0##*/}
+APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD=maximum
+
+warn () {
+ echo "$*"
+} >&2
+
+die () {
+ echo
+ echo "$*"
+ echo
+ exit 1
+} >&2
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "$( uname )" in #(
+ CYGWIN* ) cygwin=true ;; #(
+ Darwin* ) darwin=true ;; #(
+ MSYS* | MINGW* ) msys=true ;; #(
+ NONSTOP* ) nonstop=true ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD=$JAVA_HOME/jre/sh/java
+ else
+ JAVACMD=$JAVA_HOME/bin/java
+ fi
+ if [ ! -x "$JAVACMD" ] ; then
+ die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+ fi
+else
+ JAVACMD=java
+ which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
+ case $MAX_FD in #(
+ max*)
+ # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
+ # shellcheck disable=SC3045
+ MAX_FD=$( ulimit -H -n ) ||
+ warn "Could not query maximum file descriptor limit"
+ esac
+ case $MAX_FD in #(
+ '' | soft) :;; #(
+ *)
+ # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
+ # shellcheck disable=SC3045
+ ulimit -n "$MAX_FD" ||
+ warn "Could not set maximum file descriptor limit to $MAX_FD"
+ esac
+fi
+
+# Collect all arguments for the java command, stacking in reverse order:
+# * args from the command line
+# * the main class name
+# * -classpath
+# * -D...appname settings
+# * --module-path (only if needed)
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if "$cygwin" || "$msys" ; then
+ APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
+ CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
+
+ JAVACMD=$( cygpath --unix "$JAVACMD" )
+
+ # Now convert the arguments - kludge to limit ourselves to /bin/sh
+ for arg do
+ if
+ case $arg in #(
+ -*) false ;; # don't mess with options #(
+ /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
+ [ -e "$t" ] ;; #(
+ *) false ;;
+ esac
+ then
+ arg=$( cygpath --path --ignore --mixed "$arg" )
+ fi
+ # Roll the args list around exactly as many times as the number of
+ # args, so each arg winds up back in the position where it started, but
+ # possibly modified.
+ #
+ # NB: a `for` loop captures its iteration list before it begins, so
+ # changing the positional parameters here affects neither the number of
+ # iterations, nor the values presented in `arg`.
+ shift # remove old arg
+ set -- "$@" "$arg" # push replacement arg
+ done
+fi
+
+# Collect all arguments for the java command;
+# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of
+# shell script including quotes and variable substitutions, so put them in
+# double quotes to make sure that they get re-expanded; and
+# * put everything else in single quotes, so that it's not re-expanded.
+
+set -- \
+ "-Dorg.gradle.appname=$APP_BASE_NAME" \
+ -classpath "$CLASSPATH" \
+ org.gradle.wrapper.GradleWrapperMain \
+ "$@"
+
+# Stop when "xargs" is not available.
+if ! command -v xargs >/dev/null 2>&1
+then
+ die "xargs is not available"
+fi
+
+# Use "xargs" to parse quoted args.
+#
+# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
+#
+# In Bash we could simply go:
+#
+# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
+# set -- "${ARGS[@]}" "$@"
+#
+# but POSIX shell has neither arrays nor command substitution, so instead we
+# post-process each arg (as a line of input to sed) to backslash-escape any
+# character that might be a shell metacharacter, then use eval to reverse
+# that process (while maintaining the separation between arguments), and wrap
+# the whole thing up as a single "set" statement.
+#
+# This will of course break if any of these variables contains a newline or
+# an unmatched quote.
+#
+
+eval "set -- $(
+ printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
+ xargs -n1 |
+ sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
+ tr '\n' ' '
+ )" '"$@"'
+
+exec "$JAVACMD" "$@"
diff --git a/tools/gradlew/gradlew.bat b/tools/gradlew/gradlew.bat
new file mode 100644
index 0000000..93e3f59
--- /dev/null
+++ b/tools/gradlew/gradlew.bat
@@ -0,0 +1,92 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%"=="" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%"=="" set DIRNAME=.
+@rem This is normally unused
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if %ERRORLEVEL% equ 0 goto execute
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if %ERRORLEVEL% equ 0 goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+set EXIT_CODE=%ERRORLEVEL%
+if %EXIT_CODE% equ 0 set EXIT_CODE=1
+if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
+exit /b %EXIT_CODE%
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/tools/gradlew/settings.gradle b/tools/gradlew/settings.gradle
new file mode 100644
index 0000000..7e87c5e
--- /dev/null
+++ b/tools/gradlew/settings.gradle
@@ -0,0 +1,5 @@
+rootProject.name = 'tron-docker'
+include('docker')
+project(':docker').projectDir = new File(rootDir, '../docker')
+include('dbfork')
+project(':dbfork').projectDir = new File(rootDir, '../dbfork')