diff --git a/.github/workflows/build-deploy-docs.yml b/.github/workflows/build-deploy-docs.yml
new file mode 100644
index 000000000..8024b54dc
--- /dev/null
+++ b/.github/workflows/build-deploy-docs.yml
@@ -0,0 +1,204 @@
+name: 🥘 Build & Deploy Docs HB
+
+on:
+ pull_request:
+ branches:
+ - main
+ paths:
+ # Trigger on changes to docs, mkdocs config, or the workflow itself
+ - "docs/**"
+ - "mkdocs.yml"
+ - ".github/workflows/build-deploy-docs.yml"
+ push:
+ branches:
+ - main
+ paths:
+ # Trigger on changes to docs, mkdocs config, or the workflow itself
+ - "docs/**"
+ - "mkdocs.yml"
+ - ".github/workflows/build-deploy-docs.yml"
+
+ # Perform a release using a workflow dispatch
+ workflow_dispatch:
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ # Run the build as part of PRs to confirm the site properly builds
+ check_build:
+ if: ${{ startsWith(github.ref, 'refs/pull/') }}
+ runs-on: ubuntu-22.04
+ steps:
+ - name: ⬇️ Checkout repo
+ uses: actions/checkout@v3
+
+ # Setup Python environment
+ - name: 🐍 Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.x' # Use a recent Python 3 version
+
+ # Install Erlang OTP 27 using kerl
+ - name: Install Erlang OTP 27
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y build-essential autoconf libncurses5-dev libssl-dev
+ git clone https://github.com/kerl/kerl.git
+ ./kerl/kerl build 27.0 otp-27.0
+ ./kerl/kerl install otp-27.0 ~/otp-27.0
+ echo '. ~/otp-27.0/activate' >> ~/.bashrc
+ . ~/otp-27.0/activate
+ echo "Erlang version:"
+ erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().'
+ # Install system dependencies needed for HyperBEAM
+ - name: Install system dependencies
+ run: |
+ sudo apt-get update && sudo apt-get install -y --no-install-recommends \
+ build-essential \
+ cmake \
+ pkg-config \
+ ncurses-dev \
+ libssl-dev \
+ ca-certificates
+ # Debug step - display the region with syntax error
+ - name: Debug syntax error region
+ run: |
+ echo "Showing the region with syntax error in hb_message.erl:"
+ sed -n '1440,1460p' src/hb_message.erl || echo "File not found or cannot be read"
+ echo "Checking for syntax error fix files:"
+ find . -name "*.erl.fix" -o -name "hb_message.erl.*" | grep -v ".beam" || echo "No fix files found"
+ echo "Erlang version:"
+ . ~/otp-27.0/activate && erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().'
+ # Install rebar3
+ - name: Install rebar3
+ run: |
+ . ~/otp-27.0/activate
+ mkdir -p ~/.config/rebar3
+ curl -O https://s3.amazonaws.com/rebar3/rebar3 && chmod +x rebar3
+ sudo mv rebar3 /usr/local/bin/rebar3
+ . ~/otp-27.0/activate && rebar3 --version
+ # Install Rust toolchain (needed for WASM components)
+ - name: Install Rust and Cargo
+ run: |
+ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
+ echo "$HOME/.cargo/bin" >> $GITHUB_PATH
+ source "$HOME/.cargo/env"
+ # Setup Node.js
+ - name: ⎔ Setup Node
+ uses: actions/setup-node@v3
+ with:
+ node-version: 22 # Or your preferred version
+
+ # Install pip dependencies and cache them
+ - name: 📦 Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
+ - name: 🛠 Build Docs
+ run: |
+ . ~/otp-27.0/activate
+ SKIP_COMPILE=1 SKIP_EDOC=1 ./docs/build-all.sh -v
+ # Build and deploy the artifacts to Arweave via ArDrive
+ deploy:
+ if: github.ref == 'refs/heads/main'
+ runs-on: ubuntu-22.04
+ # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+ # However, do NOT cancel in-progress runs as we want to allow these deployments to complete.
+ concurrency:
+ group: deploy
+ cancel-in-progress: false
+ steps:
+ - name: ⬇️ Checkout repo
+ uses: actions/checkout@v3
+
+ # Setup Python environment
+ - name: 🐍 Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.x'
+
+ # Install Erlang OTP 27 using kerl
+ - name: Install Erlang OTP 27
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y build-essential autoconf libncurses5-dev libssl-dev
+ git clone https://github.com/kerl/kerl.git
+ ./kerl/kerl build 27.0 otp-27.0
+ ./kerl/kerl install otp-27.0 ~/otp-27.0
+ echo '. ~/otp-27.0/activate' >> ~/.bashrc
+ . ~/otp-27.0/activate
+ echo "Erlang version:"
+ erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().'
+ # Install system dependencies needed for HyperBEAM
+ - name: Install system dependencies
+ run: |
+ sudo apt-get update && sudo apt-get install -y --no-install-recommends \
+ build-essential \
+ cmake \
+ pkg-config \
+ ncurses-dev \
+ libssl-dev \
+ ca-certificates
+ # Debug step - display the region with syntax error
+ - name: Debug syntax error region
+ run: |
+ echo "Showing the region with syntax error in hb_message.erl:"
+ sed -n '1440,1460p' src/hb_message.erl || echo "File not found or cannot be read"
+ echo "Checking for syntax error fix files:"
+ find . -name "*.erl.fix" -o -name "hb_message.erl.*" | grep -v ".beam" || echo "No fix files found"
+ echo "Erlang version:"
+ . ~/otp-27.0/activate && erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().'
+ # Install rebar3
+ - name: Install rebar3
+ run: |
+ . ~/otp-27.0/activate
+ mkdir -p ~/.config/rebar3
+ curl -O https://s3.amazonaws.com/rebar3/rebar3 && chmod +x rebar3
+ sudo mv rebar3 /usr/local/bin/rebar3
+ . ~/otp-27.0/activate && rebar3 --version
+ # Install Rust toolchain (needed for WASM components)
+ - name: Install Rust and Cargo
+ run: |
+ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
+ echo "$HOME/.cargo/bin" >> $GITHUB_PATH
+ source "$HOME/.cargo/env"
+ # Install pip dependencies and cache them
+ - name: 📦 Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
+ # Setup Node.js (needed for npx deploy command)
+ - name: ⎔ Setup Node
+ uses: actions/setup-node@v3
+ with:
+ node-version: 22 # Or your preferred version
+
+ - name: 👀 Env
+ run: |
+ echo "Event name: ${{ github.event_name }}"
+ echo "Git ref: ${{ github.ref }}"
+ echo "GH actor: ${{ github.actor }}"
+ echo "SHA: ${{ github.sha }}"
+ VER=`node --version`; echo "Node ver: $VER"
+ VER=`npm --version`; echo "npm ver: $VER"
+ . ~/otp-27.0/activate && erl -eval 'io:format("Erlang OTP version: ~s~n", [erlang:system_info(otp_release)]), halt().'
+ - name: 🛠 Build Docs
+ id: build_artifacts
+ run: |
+ . ~/otp-27.0/activate
+ SKIP_COMPILE=1 SKIP_EDOC=1 ./docs/build-all.sh -v
+ touch mkdocs-site/.nojekyll
+ echo "artifacts_output_dir=mkdocs-site" >> $GITHUB_OUTPUT
+ - name: 💾 Publish to Arweave
+ id: publish_artifacts
+ run: |
+ npx permaweb-deploy \
+ --arns-name=dps-testing-facility \
+ --ant-process=${{ secrets.ANT_PROCESS }} \
+ --deploy-folder=${ARTIFACTS_OUTPUT_DIR}
+ env:
+ DEPLOY_KEY: ${{ secrets.DEPLOY_KEY }}
+ ARTIFACTS_OUTPUT_DIR: ${{ steps.build_artifacts.outputs.artifacts_output_dir }}
+ ANT_PROCESS: ${{ secrets.ANT_PROCESS }}
diff --git a/.gitignore b/.gitignore
index c6262e289..2767f3291 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,6 +19,7 @@ logs
*.iml
rebar3.crashdump
*~
+/venv
*.json
!.vscode/*
@@ -40,4 +41,7 @@ cache-*
cu/
mkdocs-site/
-deployment.sh
\ No newline at end of file
+mkdocs-site-id.txt
+mkdocs-site-manifest.csv
+!test/admissible-report-wallet.json
+deployment.sh
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 0e5822ac6..f8cbdec3e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,5 +1,5 @@
{
"editor.detectIndentation": false,
- "editor.insertSpaces": false,
+ "editor.insertSpaces": true,
"editor.tabSize": 4
}
diff --git a/README.md b/README.md
index ae121bf86..fc1b805bd 100644
--- a/README.md
+++ b/README.md
@@ -99,7 +99,7 @@ To start a shell with profiles:
rebar3 as rocksdb shell
# Multiple profiles
-rebar3 as rocksdb,genesis_wasm shell
+rebar3 as rocksdb, genesis_wasm shell
```
To create a release with profiles:
@@ -266,42 +266,36 @@ schedule of another execution.
Details on other devices found in the pre-loaded set can be located in their
respective documentation.
-## Contributing
+## Documentation
-HyperBEAM is developed as an open source implementation of the AO-Core protocol
-by [Forward Research](https://fwd.arweave.net). Pull Requests are always welcome!
+HyperBEAM uses [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme to build its documentation site. All documentation source files are located in the `docs/` directory.
-To get started building on HyperBEAM, check out the [hacking on HyperBEAM](./docs/contribute/setup.md)
-guide.
+To build and view the documentation locally:
-## Documentation
+```bash
+# Create and activate a virtual environment (optional but recommended)
+python3 -m venv venv
+source venv/bin/activate # (macOS/Linux) On Windows use `venv\Scripts\activate`
-HyperBEAM uses [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme to build its documentation site.
+# Install required packages
+pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
-Building the documentation requires Python 3, pip, and the following packages:
-```bash
-pip3 install mkdocs mkdocs-material
-```
+# Build the docs
+./docs/build-all.sh
-- **Source Files:** All documentation source files (Markdown `.md`, images, CSS) are located in the `docs/` directory.
-- **Source Code Docs:** Erlang source code documentation is generated using `rebar3 edoc` (with the `edown_doclet` plugin) into the `docs/source-code-docs/` directory as Markdown files. These are then incorporated into the main MkDocs site.
-- **Build Script:** The entire process (compiling, generating edoc, processing source docs, building the site) is handled by the `./docs/build-all.sh` script.
+# Serve the docs
+cd mkdocs-site
+python3 -m http.server 8000
+# Then open http://127.0.0.1:8000/ in your browser
+```
-To build and view the documentation locally:
+For more details on the documentation structure, how to contribute, and other information, please see the [full documentation README](./docs/README.md).
-1. Ensure you are in the project root directory.
-2. Run the build script:
- ```bash
- ./docs/build-all.sh
- ```
+## Contributing
-This script performs the following steps:
-- Compiles the Erlang project (`rebar3 compile`).
-- Generates Markdown documentation from source code comments (`rebar3 edoc`) into `docs/source-code-docs/`.
-- Processes the generated source code Markdown files (updates index, cleans up TOCs).
-- Builds the MkDocs site into the `dist/mkdocs` directory (`mkdocs build`).
-- Starts a local development server (`mkdocs serve`) to view the site at `http://127.0.0.1:8000/`.
+HyperBEAM is developed as an open source implementation of the AO-Core protocol
+by [Forward Research](https://fwd.arweave.net). Pull Requests are always welcome!
-Press `Ctrl+C` in the terminal where the script is running to stop the local server.
+To get started building on HyperBEAM, check out the [hacking on HyperBEAM](./docs/misc/hacking-on-hyperbeam.md)
+guide.
-The final static site is generated in the `dist/mkdocs` directory, as configured in `mkdocs.yml` (`site_dir: dist/mkdocs`).
\ No newline at end of file
diff --git a/config.flat b/config.flat
index b1f25dc70..d9ed8b3a4 100644
--- a/config.flat
+++ b/config.flat
@@ -1 +1 @@
-port: 10001
+port: 10001
\ No newline at end of file
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 000000000..4168a5bed
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,115 @@
+
+## Documentation
+
+HyperBEAM uses [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme to build its documentation site.
+
+Building the documentation requires Python 3 and pip. It's recommended to use a virtual environment:
+
+```bash
+# Create and activate a virtual environment (optional but recommended)
+python3 -m venv venv
+source venv/bin/activate # (macOS/Linux) On Windows use `venv\Scripts\activate`
+
+# Install required packages
+pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
+
+# Deactivate the virtual environment when done
+# deactivate
+```
+
+- **Source Files:** All documentation source files (Markdown `.md`, images, CSS) are located in the `docs/` directory.
+- **Source Code Docs:** Erlang source code documentation is generated using `rebar3 edoc` (with the `edown_doclet` plugin) into the `docs/source-code-docs/` directory as Markdown files. These are then incorporated into the main MkDocs site.
+- **Build Script:** The entire process (compiling, generating edoc, processing source docs, building the site) is handled by the `./docs/build-all.sh` script.
+
+To build the documentation locally:
+
+1. Ensure you are in the project root directory.
+2. If using a virtual environment, make sure it's activated.
+3. Run the build script:
+ ```bash
+ ./docs/build-all.sh
+ ```
+
+This script performs the following steps:
+- Compiles the Erlang project (`rebar3 compile`).
+- Generates Markdown documentation from source code comments (`rebar3 edoc`) into `docs/source-code-docs/`.
+- Processes the generated source code Markdown files (updates index, cleans up TOCs).
+- Builds the MkDocs site into the `mkdocs-site` directory (`mkdocs build`).
+
+To view the built documentation locally:
+
+1. Navigate to the site directory:
+ ```bash
+ cd mkdocs-site
+ ```
+2. Start a simple Python HTTP server:
+ ```bash
+ python3 -m http.server 8000
+ ```
+3. Open your web browser and go to `http://127.0.0.1:8000/`.
+
+Press `Ctrl+C` in the terminal where the server is running to stop it.
+
+The final static site is generated in the `mkdocs-site` directory, as configured in `mkdocs.yml` (`site_dir: mkdocs-site`).
+
+### Contributing to the Documentation
+
+To contribute documentation to HyperBEAM, follow these steps:
+
+1. **Fork the Repository**
+ - Fork the [HyperBEAM repository](https://github.com/permaweb/HyperBEAM) to your GitHub account
+
+2. **Choose the Right Location**
+ - Review the existing documentation structure in `./docs/` to determine the appropriate location for your content
+ - Documentation is organized into several main sections:
+ - `overview/`: High-level concepts and architecture
+ - `installation-core/`: Setup and configuration guides
+ - `components/`: Detailed component documentation
+ - `usage/`: Tutorials and usage guides
+ - `resources/`: Reference materials and source code documentation
+ - `community/`: Contribution guidelines and community resources
+
+3. **Create Your Documentation**
+ - Create a new Markdown file (`.md`) in the appropriate directory
+ - Follow the existing documentation style and format
+ - Use proper Markdown syntax and include:
+ - Clear headings and subheadings
+ - Code blocks with appropriate language specification
+ - Links to related documentation
+ - Images (if needed) in the `docs/assets/` directory
+
+4. **Update the Navigation**
+ - Edit `mkdocs.yml` to add your documentation to the navigation
+ - Place your entry in the appropriate section under the `nav:` configuration
+ - Follow the existing indentation and format
+
+5. **Test Your Changes**
+ - Set up a local development environment:
+ ```bash
+ python3 -m venv venv
+ source venv/bin/activate
+ pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
+ ```
+ - Run the build script to verify your changes:
+ ```bash
+ ./docs/build-all.sh
+ ```
+ - View the documentation locally at `http://127.0.0.1:8000/`
+
+6. **Submit a Pull Request**
+ - Create a new branch for your documentation changes
+ - Commit your changes with a descriptive message
+ - Submit a PR with:
+ - A clear title describing the documentation addition
+ - A detailed description explaining:
+ - The purpose of the new documentation
+ - Why it should be added to the official docs
+ - Any related issues or discussions
+ - Screenshots of the rendered documentation (if applicable)
+
+7. **Review Process**
+ - The HyperBEAM team will review your PR
+ - Be prepared to make adjustments based on feedback
+ - Once approved, your documentation will be merged into the main repository
+
+For more detailed contribution guidelines, see the [Community Guidelines](./docs/misc/community/guidelines.md) and [Development Setup](./docs/misc/community/setup.md) documentation.
diff --git a/docs/assets/images/Power-web2-web3-fig.mp4 b/docs/assets/images/Power-web2-web3-fig.mp4
new file mode 100644
index 000000000..ff11d1511
Binary files /dev/null and b/docs/assets/images/Power-web2-web3-fig.mp4 differ
diff --git a/docs/assets/images/create-new-devices-fig.png b/docs/assets/images/create-new-devices-fig.png
new file mode 100644
index 000000000..c65714ea8
Binary files /dev/null and b/docs/assets/images/create-new-devices-fig.png differ
diff --git a/docs/assets/images/favicon.png b/docs/assets/images/favicon.png
index 70a56fd57..dedcaa2ef 100644
Binary files a/docs/assets/images/favicon.png and b/docs/assets/images/favicon.png differ
diff --git a/docs/assets/images/monetize-fig.mp4 b/docs/assets/images/monetize-fig.mp4
new file mode 100644
index 000000000..2b69faa52
Binary files /dev/null and b/docs/assets/images/monetize-fig.mp4 differ
diff --git a/docs/assets/images/monetize-your-hardware-fig.png b/docs/assets/images/monetize-your-hardware-fig.png
new file mode 100644
index 000000000..04fb303a2
Binary files /dev/null and b/docs/assets/images/monetize-your-hardware-fig.png differ
diff --git a/docs/assets/images/rock-solid-fig.png b/docs/assets/images/rock-solid-fig.png
new file mode 100644
index 000000000..5de564c07
Binary files /dev/null and b/docs/assets/images/rock-solid-fig.png differ
diff --git a/docs/assets/images/what-is-hyperbeam-fig.mp4 b/docs/assets/images/what-is-hyperbeam-fig.mp4
new file mode 100644
index 000000000..364d598f3
Binary files /dev/null and b/docs/assets/images/what-is-hyperbeam-fig.mp4 differ
diff --git a/docs/assets/style.css b/docs/assets/style.css
index fe5a0a271..faf217687 100644
--- a/docs/assets/style.css
+++ b/docs/assets/style.css
@@ -1,57 +1,880 @@
-.md-header__title{
- margin-left: 0px !important;
+/* General Text Styles */
+h1 {
+ font-size: clamp(1.6rem, 1.5vw, 1.7rem) !important;
+ color: rgba(60, 60, 67) !important;
+ font-weight: 600 !important;
}
+h2 {
+ font-size: clamp(1.2rem, 1.5vw, 1.3rem) !important;
+ color: rgba(60, 60, 67);
+ ;
+ /* Semi-transparent black */
+}
-.header-logo {
- display: flex;
- flex-direction: column;
- align-items: flex-start;
- }
-
- .logo-container {
+p {
+ font-size: clamp(0.6rem, 1.5vw, 0.7rem);
+ line-height: 1.75;
+}
+
+li {
+ font-size: clamp(0.6rem, 1.5vw, 0.75rem);
+ line-height: 1.75;
+}
+
+img {
+ user-select: none;
+}
+
+input {
+ font-size: clamp(0.47rem, 1.5vw, 0.5rem) !important;
+}
+
+body {
+ --docs-max-width: 60rem;
+ --homepage-max-width: 90rem;
+ --sections-max-width: 80rem;
+ --parallax-perspective: 2rem;
+ --md-accent-fg-color: #555555 !important;
+ --md-default-fg-color--light: #bebebe !important;
+}
+
+.md-nav__item--section>.md-nav__link {
+ color: black !important;
+ margin-bottom: 8px;
+}
+
+/*
+h1, h2, h3, h4, h5, h6 {
+ border-bottom: 1px solid #ccc;
+ padding-bottom: 10px;
+}
+*/
+
+.md-content h2,
+h3,
+h4,
+h5,
+h6 {
+ font-weight: 400 !important;
+}
+
+.md-content h2 {
+ border-top: 1px solid #ccc;
+ padding-top: 1.5rem;
+}
+
+.md-content h3,
+h4,
+h5,
+h6 {
+ border-top: 1px solid #e5e5e58a;
+ padding-top: 1rem;
+}
+
+/* Body and Header Customization */
+.md-header {
+ box-shadow: none !important;
+ z-index: 100;
+ transition: transform 0.3s ease-in-out;
+ position: fixed;
+ width: 100%;
+ transform: translateY(0);
+}
+
+.header-hidden {
+ transform: translateY(-100%);
+ transition: transform 0.15s ease-in-out;
+}
+
+.md-main {
+ transition: padding-top 0.3s ease;
+}
+
+.header-hidden + .md-main {
+ padding-top: 0;
+}
+
+[dir=ltr] .md-sidebar--primary {
+ left: -15rem !important;
+}
+
+[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary {
+ transform: translateX(15rem) !important;
+}
+
+.md-grid {
+ /* transition: all 1s; this occurs on initial load, causing strange UI artifact */
+ max-width: var(--docs-max-width);
+}
+
+.md-main__inner {
+ gap: 2.25rem;
+}
+
+.custom-homepage-header {
+ position: fixed;
+ filter: invert(1);
+ top: 0;
+ z-index: 20 !important;
+ background: linear-gradient(0deg, rgba(255, 255, 255, 0) 0%, #ffffff 100%);
+ border-bottom: 0px solid;
+}
+
+
+
+.custom-homepage-header .md-grid {
+ max-width: var(--homepage-max-width);
+}
+
+.custom-homepage-header .md-tabs {
+ background: #ffffff00 !important;
+ border-bottom: 0;
+}
+
+/* Logo Customization */
+.md-logo img {
+ height: 0.9rem !important;
+}
+
+/* Header Topic Visibility */
+.md-header__topic {
+ display: none;
+}
+
+/* Navigation Styles */
+.md-nav__title,
+.md-nav__item {
+ font-size: clamp(0.6rem, 1.5vw, 0.65rem) !important;
+ box-shadow: none !important;
+}
+
+.md-nav__title {
+ color: #000000 !important;
+}
+
+.md-nav__link {
+ padding: 4px 16px;
+ border-radius: 6px;
+ margin: 0;
+ margin-top: 4px;
+}
+
+.md-tabs__item--active .md-tabs__link {
+ position: relative;
+ font-weight: 700;
+}
+
+.md-tabs__item--active .md-tabs__link::after {
+ content: "";
+ position: absolute;
+ bottom: -9px;
+ left: 5%;
+ width: 90%;
+ height: 2px;
+ background-color: black;
+ border-radius: 1px;
+ transition: all 0.3s ease;
+}
+
+
+.md-nav__link:hover {
+ background: rgb(243, 243, 243);
+}
+
+.md-nav__link--active {
+ color: #000000 !important;
+ /* Active link color */
+ font-weight: 700;
+ background: rgb(237, 237, 237);
+}
+
+.md-sidebar {
+ width: 15rem;
+}
+
+[dir=ltr] .md-sidebar__inner {
+ padding-right: calc(100% - 15rem);
+}
+
+.md-nav--secondary {
+ border-left: 0.05rem solid lightgray !important;
+}
+
+/* Tab Navigation */
+.md-tabs__link {
+ font-size: clamp(0.65rem, 1.5vw, 0.65rem) !important;
+}
+
+.md-tabs__list {
+ justify-content: space-between;
+}
+
+.md-tabs__item {
+
+ height: 1.5rem;
+
+}
+
+.md-tabs__item a {
+ margin-top: 0px;
+}
+
+/* Search Form Customization */
+.md-search__form {
+ height: 32px !important;
+ /* Adjust search form height */
+ padding: 0 8px !important;
+}
+
+.md-search {
+ margin-left: 1.8rem;
+}
+
+.md-search__inner {
+ width: 8rem;
+}
+
+.md-search__input {
+ padding-left: 1rem !important;
+ padding-right: 1rem !important;
+}
+
+/* Search Results */
+.md-search-result__item h1,
+.md-search-result__item h2 {
+ font-size: clamp(0.7rem, 1.5vw, 0.8rem) !important;
+}
+
+.md-search-result__item h2 {
+ font-weight: 500 !important;
+}
+
+.md-search-result__item summary div {
+ font-size: clamp(0.5rem, 1.5vw, 0.55rem) !important;
+}
+
+.md-search-result__icon {
+ width: 0.8rem;
+ height: 0.8rem;
+}
+
+.md-search__icon svg {
+ width: 16px;
+ height: 16px;
+}
+
+.md-search__icon {
+ top: 0.4rem !important;
+ left: 0.3rem !important;
+}
+
+/* Source Citation Styles */
+
+.md-header__source {
+ width: 10rem;
+ padding: 0.6rem;
+}
+
+.md-source {
+ font-size: clamp(0.45rem, 1.5vw, 0.5rem);
display: flex;
+ justify-content: end;
align-items: center;
- margin-bottom: 10px;
- }
-
- .logo-stripes {
- display: flex;
- flex-direction: column;
- height: 100px;
- width: 30px;
- margin-right: 10px;
- }
-
- .stripe {
- flex: 1;
- width: 100%;
- }
-
- .green { background-color: #33F22F; }
- .yellow { background-color: #FEE55F; }
- .blue { background-color: #86DAFE; }
- .purple { background-color: #9611FF; }
- .red { background-color: #F60000; }
-
- .logo-text h1 {
- font-size: 2.5rem;
- font-weight: bold;
- margin: 0;
- color: var(--md-default-fg-color);
- }
-
- .tagline {
+}
+
+.md-source__fact {
+ font-size: clamp(0.35rem, 1.5vw, 0.4rem);
+}
+
+/* Tagline Styles */
+.tagline {
font-size: 0.9rem;
letter-spacing: 1px;
margin: 0;
color: var(--md-default-fg-color--light);
- }
-
- [data-md-color-scheme="slate"] .logo-text h1 {
+}
+
+/* Color Scheme Customization for Slate */
+[data-md-color-scheme="slate"] .logo-text h1 {
color: #FFF;
- }
-
- [data-md-color-scheme="slate"] .tagline {
+}
+
+[data-md-color-scheme="slate"] .tagline {
color: #AAA;
- }
\ No newline at end of file
+}
+
+/* Hero Customization */
+.custom-homepage-main {
+ perspective: var(--parallax-perspective);
+ overflow: hidden auto;
+ scroll-behavior: smooth;
+ height: 100vh;
+ width: 100vw;
+}
+
+.hero-container {
+ position: relative;
+ height: 160vh;
+ transform-style: preserve-3d;
+}
+
+
+.hero-content-wrapper {
+
+ height: inherit;
+}
+
+.hero-floating-wrapper {
+ display: flex;
+ flex-direction: column;
+ justify-content: end;
+ position: sticky;
+ top: 0;
+ z-index: 11;
+ height: 100vh;
+ margin-bottom: -100vh;
+
+}
+
+.hero-detail {
+ display: flex;
+ flex-direction: column;
+}
+
+.hero-detail h1 {
+ font-size: clamp(1.3rem, 1.5vw, 1.45rem) !important;
+ margin: 0 !important;
+ font-weight: 500 !important;
+ color: white !important;
+}
+
+.hero-detail span {
+ color: white !important;
+ margin: 0;
+}
+
+.hero-inner-content-middle {
+ display: flex;
+
+ margin-left: auto;
+ margin-right: auto;
+ justify-content: space-between;
+ align-items: center;
+ /* padding: 2rem 0.8rem; */
+ transition: all 1s;
+ max-width: var(--homepage-max-width);
+ width: 100%;
+ padding: 2rem 0.8rem;
+
+}
+
+.hero-inner-content-bottom {
+ display: flex;
+ max-width: var(--homepage-max-width);
+ margin-left: auto;
+ margin-right: auto;
+ transition: all 1s;
+ width: 100%;
+ padding: 1rem 0.8rem;
+}
+
+.hero-text-container {
+ display: flex;
+ flex-direction: column;
+ flex: 1 0 0;
+ transition: all 1s;
+ align-items: start;
+ justify-content: space-between;
+}
+
+.hero-button-cards-container {
+ display: flex;
+ flex: 1.5 0 0;
+ gap: 0.5rem;
+ height: 40%;
+ min-height: 7rem;
+ backdrop-filter: blur(50px);
+
+}
+
+.hero-button-card {
+ position: relative;
+ display: flex;
+ flex-direction: column;
+ flex: 1 0 0;
+ border-radius: 4px;
+ border: 1px solid #2b2b2b;
+ background: rgba(0, 0, 0, 0.75);
+ justify-content: space-between;
+ align-items: start;
+ padding: 8px;
+ cursor: pointer;
+ transition: all 300ms;
+ height: 100%;
+}
+
+.hero-button-card:hover {
+ background: rgba(0, 0, 0, 1);
+ border: 1px solid rgb(149, 149, 149);
+}
+
+.hero-main-heading h1 {
+ font-size: clamp(1.8rem, 1.5vw, 2.0rem) !important;
+ margin: 0 !important;
+ font-weight: 500 !important;
+ color: white !important;
+}
+
+.hero-button-card h2 {
+ font-size: clamp(0.9rem, 1.5vw, 1rem) !important;
+ margin: 0 !important;
+ font-weight: 500 !important;
+ color: white !important;
+}
+
+.hero-button-card p {
+ font-size: clamp(0.65rem, 1.5vw, 0.7rem) !important;
+ color: #c0c0c0 !important;
+ font-weight: 500;
+}
+
+.hero-main-heading h2,
+.hero-button-card p {
+ font-size: clamp(0.55rem, 1.5vw, 0.6rem) !important;
+ line-height: normal;
+ margin: 0;
+ font-weight: 500;
+ text-align: left;
+}
+
+.hero-main-heading h2 {
+ color: white !important;
+}
+
+/* Hero Rocks */
+
+.rocks {
+ position: absolute;
+ top: 0;
+ left: 0;
+ height: 100%;
+ width: 100%;
+ object-fit: cover;
+ background-position: center;
+ background-size: cover;
+ transform:
+ translateZ(calc(var(--parallax-perspective) * (var(--depth, 0) * -1))) scale(calc(1 + var(--depth, 1)));
+ transform-origin: 50vw 50vh;
+ will-change: transform;
+ pointer-events: none;
+ z-index: calc(10 - var(--depth, 0));
+}
+
+.rocks img {
+ width: 100%;
+ height: 100%;
+ object-fit: cover;
+}
+
+.flicker-img {
+ animation: flickerLight 10s infinite;
+ will-change: filter;
+}
+
+.foreground-rocks {
+ z-index: 3;
+ padding-bottom: 200px;
+
+}
+
+.foreground-rocks-2 {
+ z-index: 2;
+
+}
+
+.mid-rocks {
+ z-index: 2;
+
+}
+
+.background-rocks {
+ position: absolute;
+ z-index: 1;
+ height: 100%;
+ object-fit: cover;
+}
+
+.background-chroma {
+ position: absolute;
+ z-index: 0;
+ height: 100%;
+ width: 100%;
+ object-fit: cover;
+}
+
+.background-chroma img {
+ object-fit: cover;
+}
+
+.background-rocks img {
+ object-fit: cover;
+ transform: translateZ(0);
+ image-rendering: smooth;
+ /* filter: brightness(0.5); */
+}
+
+.scroll-button span {
+ margin-left: 5px;
+}
+
+
+
+.dark-bottom {
+ position: absolute;
+ bottom: 0;
+ background: linear-gradient(0deg, #000000 0%, rgba(255, 179, 0, 0) 100%);
+ height: 100vh;
+ width: 100%;
+ z-index: 10;
+}
+
+/* Sections Customization */
+.section-container {
+ position: relative;
+ display: flex;
+ width: 100%;
+}
+
+.section-container-background-primary {
+ background: white;
+}
+
+.section-container-background-secondary {
+ background: #F9F9F9;
+}
+
+.section-inner-content h1 {
+ font-size: clamp(1.1rem, 1.5vw, 1.2rem) !important;
+ margin: 0 !important;
+ font-weight: 500 !important;
+}
+
+.section-inner-content h2 {
+ font-size: clamp(0.65rem, 1.5vw, 0.7rem) !important;
+ color: #6E6E6E;
+ font-weight: 500;
+}
+
+.section-inner-content span {
+ margin: 0;
+}
+
+.section-header {
+ display: flex;
+ width: 100%;
+ justify-content: space-between;
+}
+
+.section-header h1 {
+ margin-block-start: 0.7em !important;
+}
+
+.divider {
+ height: 1px;
+ width: 100%;
+ background: #D4D4D4;
+}
+
+.section-inner-content {
+ display: flex;
+ flex-direction: column;
+ width: 100%;
+ max-width: var(--sections-max-width);
+ z-index: 2;
+ margin-left: auto;
+ margin-right: auto;
+ padding: 2rem 0.8rem;
+ transition: all 1s;
+ min-height: 70vh;
+}
+
+.section-monetize {
+ justify-content: space-between;
+ position: relative;
+ overflow: hidden;
+}
+
+.section-monetize>*:not(:last-child) {
+ z-index: 3;
+}
+
+.double-column-content {
+ display: flex;
+ width: 100%;
+ padding: 48px 0px;
+ height: 100%;
+}
+
+.double-single-grid-content {
+ display: grid;
+ grid-template-columns: repeat(2, 1fr);
+ grid-template-rows: auto auto;
+ gap: 2rem;
+ width: 100%;
+ padding: 48px 0px;
+}
+
+.column-container {
+ width: 100%;
+ height: 100%;
+ min-height: 70vh;
+}
+
+.grid-container {
+ position: relative;
+ display: flex;
+ min-height: 450px;
+ width: 100%;
+ background: white;
+ overflow: hidden;
+ border-radius: 4px;
+}
+
+.full-span {
+ grid-column: span 2;
+ min-height: 600px;
+ overflow: hidden;
+}
+
+.column-text {
+ display: flex;
+ flex-direction: column;
+ height: 100%;
+ width: 100%;
+ justify-content: space-between;
+ padding: 16px;
+}
+
+.feature-cards {
+ display: grid;
+ width: 100%;
+ grid-template-columns: repeat(3, 1fr);
+ gap: 0.5rem;
+ margin-top: 4rem;
+}
+
+.card {
+ display: flex;
+ flex-direction: column;
+ justify-content: space-between;
+ background: #F9F9F9;
+ border: 1px solid #E6E6E6;
+ border-radius: 4px;
+ min-height: 130px;
+ padding: 8px;
+ overflow: hidden;
+}
+
+.card p {
+ font-size: clamp(0.65rem, 1.5vw, 0.7rem) !important;
+ color: #6E6E6E !important;
+ font-weight: 500;
+ line-height: normal;
+}
+
+.transparent-card {
+ min-height: 125px;
+ display: flex;
+ width: 100%;
+ background: rgba(255, 255, 255, 0.559);
+}
+
+.transparent-card p {
+ line-height: normal;
+}
+
+.card-row {
+ display: flex;
+ width: 100%;
+ overflow: hidden;
+}
+
+.grid-card-header {
+ display: flex;
+ flex-direction: column;
+ gap: 4px;
+}
+
+.card span {
+ /* background: white; */
+ width: fit-content;
+}
+
+.card p {
+ margin: 0;
+}
+
+.cta-wrapper {
+ display: flex;
+ gap: 8px;
+ width: 100%;
+
+ align-items: center;
+}
+
+.cta-right {
+ justify-content: end;
+}
+
+.cta-left {
+ justify-content: start;
+}
+
+.main-button {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background: #E4EABB;
+ padding: 12.5px 60px;
+ border-radius: 4px;
+ font-size: clamp(0.6rem, 1.5vw, 0.7rem);
+ cursor: pointer;
+}
+
+.fig-container {
+ position: relative;
+ height: 100%;
+ display: flex;
+ width: 100%;
+ justify-content: center;
+ overflow: hidden;
+}
+
+
+.fig {
+ position: absolute;
+ width: 100%;
+ max-width: 400px;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+}
+
+.what-is-hyperbeam-fig {
+ position: absolute;
+ width: 100%;
+ top: 50%;
+ left: 50%;
+ max-width: 600px;
+ transform: translate(-50%, -50%);
+}
+
+.power-web2-web3-fig {
+ position: absolute;
+ width: 100%;
+ top: 0%;
+ right: 0%;
+ max-width: 700px;
+ transform: translate(-10%, -10%);
+
+}
+
+
+
+.monetize-fig {
+ position: absolute;
+ top: 0%;
+ left: 0%;
+
+ transform: translate(0%, 0%);
+ z-index: 1;
+ opacity: 20%;
+
+}
+
+/* Footer Customization */
+.md-footer-meta {
+ display: none !important;
+}
+
+.md-footer__link {
+ margin: 0;
+}
+
+.md-footer {
+ border-top: .05rem solid #00000012;
+ background-color: white;
+ color: black;
+}
+
+.md-footer__direction {
+ font-size: clamp(0.35rem, 1.5vw, 0.45rem);
+}
+
+.md-footer__title {
+ font-size: clamp(0.75rem, 1.5vw, 0.85rem);
+}
+
+/* Last Updated Date */
+.md-source-file {
+ border-top: 1px solid #ddd;
+ /* Adds a divider above the text */
+ padding-top: 10px;
+ /* Adds some space above the text */
+ font-size: 0.9em;
+ /* Makes the font size slightly smaller */
+ font-style: italic;
+ /* Makes the text italic */
+ margin-top: 2rem !important;
+ margin-bottom: 2rem !important;
+}
+
+.md-source-file .md-source-file__fact {
+ display: flex;
+ align-items: center;
+ gap: 0;
+}
+
+.md-source-file .md-icon {
+ width: 1em;
+ /* Adjusts the icon size proportionally */
+ height: 1em;
+ /* Adjusts the icon size proportionally */
+ margin-right: 0.5em;
+ /* Adds space between the icon and the text */
+}
+
+.md-source-file .git-revision-date-localized-plugin::before {
+ content: "Last updated: ";
+ /* Adds the prefix before the date */
+}
+
+
+@keyframes flickerLight {
+
+ 0%,
+ 100% {
+ filter: brightness(1.2);
+ }
+
+ 10% {
+ filter: brightness(1);
+ }
+
+ 30% {
+ filter: brightness(1.1);
+ }
+
+ 50% {
+ filter: brightness(0.9);
+ }
+
+ 70% {
+ filter: brightness(1.1);
+ }
+
+ 90% {
+ filter: brightness(1.3);
+ }
+}
\ No newline at end of file
diff --git a/docs/build-all.sh b/docs/build-all.sh
index 0057af668..80f075d5c 100755
--- a/docs/build-all.sh
+++ b/docs/build-all.sh
@@ -1,134 +1,365 @@
#!/bin/bash
# Script to build HyperBEAM documentation in one seamless command
-# This includes compiling with rebar3, generating edoc, processing source code docs, and building/serving with mkdocs
+#
+# Usage: ./docs/build-all.sh [-v | --verbose]
+# -v, --verbose: Show detailed output from rebar3 and mkdocs commands.
-# Ensure we're in the root directory of the project
-ROOT_DIR="$(dirname "$(realpath "$0")")/.."
-cd "$ROOT_DIR" || { echo "Failed to change to root directory"; exit 1; }
+# --- Color Definitions ---
+GREEN='\033[0;32m'
+RED='\033[0;31m'
+YELLOW='\033[0;33m'
+BLUE='\033[0;34m'
+BOLD='\033[1m'
+NC='\033[0m' # No Color
+
+# HyperBEAM Logo Colors
+NEON_GREEN='\033[38;5;46m'
+CYAN='\033[38;5;51m'
+BRIGHT_YELLOW='\033[38;5;226m'
+MAGENTA='\033[38;5;201m'
+BRIGHT_RED='\033[38;5;196m'
+BLACK='\033[38;5;0m'
+GRAY='\033[38;5;245m'
+
+# --- Helper Functions ---
+log_success() {
+ echo -e "${GREEN}✓ $1${NC}"
+}
+
+log_info() {
+ echo -e "${BLUE}→ $1${NC}"
+}
+
+log_step() {
+ echo -e "\n${YELLOW}${BOLD}$1${NC}"
+}
+
+log_error() {
+ echo -e "${RED}✗ $1${NC}"
+}
+
+# --- Variable Defaults ---
+VERBOSE=false
+
+# --- Parse Command Line Arguments ---
+while [[ $# -gt 0 ]]; do
+ key="$1"
-echo "Building HyperBEAM documentation from $ROOT_DIR"
+ case $key in
+ -v|--verbose)
+ VERBOSE=true
+ log_info "Verbose mode enabled"
+ shift # past argument
+ ;;
+ *)
+ # unknown option
+ log_error "Unknown option: $1"
+ # Optionally, show usage here and exit
+ exit 1
+ ;;
+ esac
+done
-# Step 1: Compile the project with rebar3
-echo "Compiling project with rebar3..."
-rebar3 compile || { echo "rebar3 compile failed"; exit 1; }
+# --- Display HyperBEAM ASCII Logo ---
+display_logo() {
+ echo -e "
+${NEON_GREEN} ++ ${BLACK}${BOLD} ${NC}
+${NEON_GREEN} +++ ${BLACK}${BOLD} _ ${NC}
+${NEON_GREEN} ++++* ${BLACK}${BOLD}| |__ _ _ _ __ ___ _ __ ${NC}
+${NEON_GREEN} :+++*${BRIGHT_YELLOW}## ${BLACK}${BOLD} | '_ \\| | | | '_ \\ / _ \\ '__| ${NC}
+${NEON_GREEN} ++**${BRIGHT_YELLOW}#### ${BLACK}${BOLD} | | | | |_| | |_) | __/ | ${NC}
+${NEON_GREEN} +++${BRIGHT_YELLOW}####${NEON_GREEN}*** ${BLACK}${BOLD} |_| |_|\\__, | .__/ \\___|_| ${NC}
+${NEON_GREEN} +*${BRIGHT_YELLOW}##${NEON_GREEN}****${MAGENTA}+-- ${BLACK}${BOLD} |___/|_| ${NC}
+${MAGENTA} -**${BRIGHT_YELLOW}##${NEON_GREEN}**${MAGENTA}+------ ${BLACK}${BOLD} BEAM.${NC}
+${MAGENTA} -##${NEON_GREEN}*+${BRIGHT_RED}---:::::::
+${GRAY} =${GRAY}%%${NEON_GREEN}*+${BRIGHT_RED}=-:::::::::${GRAY} DECENTRALIZED OPERATING SYSTEM${NC}
+"
+}
-echo "Compilation completed successfully"
+# --- Script Start ---
+display_logo
+log_step "DOCUMENTATION BUILD"
-# Step 2: Generate edoc documentation
-echo "Generating edoc documentation..."
-rebar3 edoc || { echo "rebar3 edoc failed"; exit 1; }
+# Ensure we're in the root directory of the project
+ROOT_DIR="$(dirname "$(realpath "$0")")/.."
+cd "$ROOT_DIR" || { log_error "Failed to change to root directory"; exit 1; }
-echo "Edoc generation completed successfully"
+# --- Step 1: Compile the project with rebar3 ---
+log_step "Compiling project"
+if [ "$VERBOSE" = true ]; then
+ rebar3 compile || { log_error "rebar3 compile failed"; exit 1; }
+else
+ rebar3 compile > /dev/null 2>&1 || { log_error "rebar3 compile failed"; exit 1; }
+fi
+log_success "Compilation completed"
-# Step 3: Process source code documentation
-echo "Processing source code documentation..."
-# Updated path for source code docs
-DOCS_DIR="$ROOT_DIR/docs/source-code-docs"
+# --- Step 2: Generate edoc documentation ---
+log_step "Generating edoc documentation"
+if [ "$VERBOSE" = true ]; then
+ rebar3 edoc || { log_error "rebar3 edoc failed"; exit 1; }
+else
+ rebar3 edoc > /dev/null 2>&1 || { log_error "rebar3 edoc failed"; exit 1; }
+fi
+log_success "Edoc generation completed"
+
+# --- Step 3: Process source code documentation ---
+log_step "Processing source code documentation"
+DOCS_DIR="$ROOT_DIR/docs/resources/source-code"
INDEX_FILE="$DOCS_DIR/index.md"
-# Check if the directory exists
+# Check if the directory and index file exist
if [ ! -d "$DOCS_DIR" ]; then
- echo "Error: Source code docs directory not found at $DOCS_DIR"
+ log_error "Source code docs directory not found at $DOCS_DIR"
exit 1
fi
-# Check if the index file exists
+
if [ ! -f "$INDEX_FILE" ]; then
- echo "Error: Source code index file not found at $INDEX_FILE"
+ log_error "Source code index file not found at $INDEX_FILE"
exit 1
fi
-# Step 3.1: Recreate module list in index.md
-
-# Overwrite the file with the header content
-echo "# Source Code Documentation" > "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "Welcome to the source code documentation for HyperBEAM. This section provides detailed insights into the codebase, helping developers understand the structure, functionality, and implementation details of HyperBEAM and its components." >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "## Overview" >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "HyperBEAM is built with a modular architecture to ensure scalability, maintainability, and extensibility. The source code is organized into distinct components, each serving a specific purpose within the ecosystem." >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "## Sections" >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "- **HyperBEAM Core**: The main framework that orchestrates data processing, storage, and routing." >> "$INDEX_FILE"
-echo "- **Compute Unit**: Handles computational tasks and integrates with the HyperBEAM core for distributed processing." >> "$INDEX_FILE"
-echo "- **Trusted Execution Environment (TEE)**: Ensures secure execution of sensitive operations." >> "$INDEX_FILE"
-echo "- **Client Libraries**: Tools and SDKs for interacting with HyperBEAM, including the JavaScript client." >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "## Getting Started" >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "To explore the source code, you can clone the repository from [GitHub](https://github.com/permaweb/HyperBEAM). For detailed setup instructions, refer to the [Development Setup](../contribute/setup.md) guide." >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "## Navigation" >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "Use the navigation menu to dive into specific parts of the codebase. Each module includes detailed documentation, code comments, and examples to assist in understanding and contributing to the project." >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "## Contributing" >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "We welcome contributions to HyperBEAM. If you're interested in contributing, please review the [Contribution Guidelines](../contribute/guidelines.md) for information on coding standards, pull request processes, and more. " >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-
-# Append the table header
-echo "
" >> "$INDEX_FILE"
-
-# Get list of markdown files (excluding index.md and README.md), limit to first 10 for index.md
-# Updated find command to use the correct DOCS_DIR
-MODULE_FILES=$(find "$DOCS_DIR" -maxdepth 1 -type f -name "*.md" -not -name "index.md" -not -name "README.md" | sort | head -n 10)
-
-# Reset count
-count=0
-
-# Add each module to the table for index.md
-for file in $MODULE_FILES; do
- filename=$(basename "$file")
- module_name="${filename%.md}"
- # Use relative path for link
- echo "
" >> "$INDEX_FILE"
- count=$((count + 1))
- if [ $count -eq 10 ]; then
- break
- fi
-done
-
-# Close the table and add note
-echo "
" >> "$INDEX_FILE"
-echo "" >> "$INDEX_FILE"
-echo "*Note: This is a partial list. Navigate through the menu or search for specific modules for detailed documentation.*" >> "$INDEX_FILE"
+# --- Step 3.1: Remove ToC entries for Function Index and Function Details ---
+log_info "Cleaning module files"
-echo "Updated module list in $INDEX_FILE"
-
-# Step 3.2: Remove only the ToC entries for Function Index and Function Details
-
-# Process each markdown file (excluding index.md and README.md)
-# Updated find command
-for file in $(find "$DOCS_DIR" -maxdepth 1 -type f -name "*.md" -not -name "index.md" -not -name "README.md"); do
- # Temporary file for processing
+find "$DOCS_DIR" -maxdepth 1 -type f -name "*.md" -not -name "index.md" -not -name "README.md" | while read -r file; do
TEMP_MODULE_FILE=$(mktemp)
- # Remove only the ToC entries for Function Index and Function Details
awk '
- # Skip the ToC lines for Function Index and Function Details
+ /^\* \[Description\]\(#description\)$/ { next; }
/^\* \[Function Index\]\(#index\)$/ { next; }
/^\* \[Function Details\]\(#functions\)$/ { next; }
-
- # Print all other lines
+ /^\* \[Data Types\]\(#types\)$/ { next; }
{ print; }
' "$file" > "$TEMP_MODULE_FILE"
- # Replace the original file with the cleaned-up content
mv "$TEMP_MODULE_FILE" "$file"
+done
+
+# --- Step 3.2: Add GitHub links to source code files ---
+log_info "Adding GitHub repository links to source code files"
+
+# Base GitHub repository URL
+GITHUB_BASE_URL="https://github.com/permaweb/HyperBEAM/blob/main/src"
+
+# Process only files in the resources/source-code directory
+find "$DOCS_DIR" -maxdepth 1 -type f -name "*.md" -not -name "index.md" -not -name "README.md" | while read -r file; do
+ TEMP_MODULE_FILE_CLEANED=$(mktemp)
+ TEMP_MODULE_FILE_FINAL=$(mktemp)
+
+ # Get the module name from the filename
+ module_name=$(basename "$file" .md)
+
+ # Define the exact header pattern to remove
+ # Note: Assumes module names are simple enough not to need complex regex escaping.
+ header_pattern="^# Module ${module_name} #$"
- echo "Removed ToC entries in $file"
+ # Remove the old header line using sed
+ # Use -i option for in-place editing on a temporary copy first to avoid issues with read/write on same file descriptor
+ cp "$file" "$TEMP_MODULE_FILE_CLEANED"
+ sed -i'' -e "/${header_pattern}/d" "$TEMP_MODULE_FILE_CLEANED"
+
+ # Add the new GitHub link header at the top of the final temp file
+ # Using the user's updated format with "Module" text
+ echo "# [Module $module_name.erl]($GITHUB_BASE_URL/$module_name.erl)" > "$TEMP_MODULE_FILE_FINAL"
+ echo "" >> "$TEMP_MODULE_FILE_FINAL"
+
+ # Append the cleaned content (without the old header)
+ cat "$TEMP_MODULE_FILE_CLEANED" >> "$TEMP_MODULE_FILE_FINAL"
+
+ # Replace the original file
+ mv "$TEMP_MODULE_FILE_FINAL" "$file"
+
+ # Clean up the intermediate temp file
+ rm "$TEMP_MODULE_FILE_CLEANED"
done
-echo "Source code documentation processing completed"
+log_success "GitHub links added and old headers removed"
+
+# --- Step 3.3: Update mkdocs.yml navigation with current module list ---
+# log_info "Updating mkdocs.yml navigation"
+
+# # Create temporary file for the new mkdocs.yml
+# MKDOCS_TEMP=$(mktemp)
+# MKDOCS_FILE="$ROOT_DIR/mkdocs.yml"
+
+# # Process mkdocs.yml file to remove old modules
+# awk '
+# BEGIN { in_modules = 0; skip_modules = 0; }
+# /^ *- Modules:/ {
+# print $0;
+# in_modules = 1;
+# skip_modules = 1;
+# next;
+# }
+# {
+# if (skip_modules == 0) {
+# print $0;
+# }
+# if (in_modules == 1 && $0 ~ /^ *-/) {
+# if ($0 !~ /^ *- Modules:/) {
+# in_modules = 0;
+# skip_modules = 0;
+# print $0;
+# }
+# }
+# }
+# ' "$MKDOCS_FILE" > "$MKDOCS_TEMP"
+
+# # Find the position to insert module entries
+# INSERT_LINE=$(grep -n "^ *- Modules:" "$MKDOCS_TEMP" | cut -d: -f1)
+
+# if [ -z "$INSERT_LINE" ]; then
+# log_error "Could not find '- Modules:' section in mkdocs.yml"
+# # Clean up temp file before exiting
+# rm -f "$MKDOCS_TEMP"
+# exit 1
+# fi
+
+# # Prepare head and tail parts
+# head -n "$INSERT_LINE" "$MKDOCS_TEMP" > "${MKDOCS_TEMP}.head"
+# tail -n +$((INSERT_LINE + 1)) "$MKDOCS_TEMP" > "${MKDOCS_TEMP}.tail"
+
+# # Use an associative array to track added modules
+# declare -A added_modules
+# MODULE_LINES="" # Accumulate module lines here
+
+# # Use process substitution to read modules without a subshell per iteration
+# while IFS= read -r module_file; do
+# # Check if module_file is empty or not a file (safety check)
+# if [[ -z "$module_file" || ! -f "$module_file" ]]; then
+# continue
+# fi
+
+# module_name=$(basename "$module_file" .md)
+
+# # Only add the module if its basename hasn't been added yet
+# if [[ -z "${added_modules[$module_name]}" ]]; then
+# # Append the line to a variable instead of echoing directly
+# MODULE_LINES+=" - $module_name: 'resources/source-code/$module_name.md'\n"
+# added_modules[$module_name]=1
+# fi
+# # Feed the loop using process substitution <(...)
+# done < <(find "$DOCS_DIR" -maxdepth 1 -type f -name "*.md" -not -name "index.md" -not -name "README.md" | sort -u)
+
+# # Assemble the final mkdocs.yml
+# {
+# cat "${MKDOCS_TEMP}.head"
+# # Echo the accumulated module lines (use printf for robustness)
+# printf "%b" "$MODULE_LINES"
+# cat "${MKDOCS_TEMP}.tail"
+# } > "$MKDOCS_FILE"
+
+# # Clean up temporary files
+# rm -f "$MKDOCS_TEMP" "${MKDOCS_TEMP}.head" "${MKDOCS_TEMP}.tail"
+
+# log_success "mkdocs.yml navigation updated"
+
+# --- Step 4: Build and serve mkdocs ---
+log_step "Building mkdocs documentation"
+if [ "$VERBOSE" = true ]; then
+ mkdocs build || { log_error "mkdocs build failed"; exit 1; }
+else
+ mkdocs build > /dev/null 2>&1 || { log_error "mkdocs build failed"; exit 1; }
+fi
+
+# Find the latest CSS files with their hashes
+MAIN_CSS=$(find ./mkdocs-site/assets/stylesheets -name "main.*.min.css" | sort | tail -n 1)
+PALETTE_CSS=$(find ./mkdocs-site/assets/stylesheets -name "palette.*.min.css" | sort | tail -n 1)
+
+# Extract just the filenames from the paths
+MAIN_CSS_FILE=$(basename "$MAIN_CSS")
+PALETTE_CSS_FILE=$(basename "$PALETTE_CSS")
+
+# Find all HTML files and replace the CSS references in each one
+log_info "Updating CSS references in HTML files"
+find ./mkdocs-site -type f -name "*.html" | while read -r html_file; do
+ sed -i'' -e "s|MAIN\.CSS|assets/stylesheets/$MAIN_CSS_FILE|g" "$html_file"
+ sed -i'' -e "s|MAIN_PALETTE\.CSS|assets/stylesheets/$PALETTE_CSS_FILE|g" "$html_file"
+done
+
+# Remove .html-e files
+find ./mkdocs-site -type f -name "*.html-e" -delete
+
+log_success "MkDocs build completed"
+
+# --- Step 5: Generate LLM context files ---
+log_step "Generating LLM context files"
+
+LLM_SUMMARY_FILE="$ROOT_DIR/docs/llms.txt"
+LLM_FULL_FILE="$ROOT_DIR/docs/llms-full.txt"
+DOC_DIRS=(
+ "$ROOT_DIR/docs/introduction"
+ "$ROOT_DIR/docs/run"
+ "$ROOT_DIR/docs/build"
+ "$ROOT_DIR/docs/devices"
+ "$ROOT_DIR/docs/resources"
+)
+
+# Get current timestamp
+GENERATION_TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+
+# Generate llms.txt (routes and summary)
+log_info "Creating summary and routes file"
+cat > "$LLM_SUMMARY_FILE" <> "$LLM_SUMMARY_FILE"
+ echo "### $SECTION_HEADING" >> "$LLM_SUMMARY_FILE"
+ echo "" >> "$LLM_SUMMARY_FILE"
+
+ find "$DOC_DIR" -type f -name "*.md" -print |
+ sed "s|^$ROOT_DIR/||" |
+ sed 's/^docs\///' |
+ sed 's/\.md$//' |
+ sort |
+ while IFS= read -r base_path; do
+ html_path="${base_path}.html"
+ md_path_relative="docs/${base_path}.md"
+ md_file_path="$ROOT_DIR/$md_path_relative"
+
+ if [ -f "$md_file_path" ]; then
+ title=$(grep -m 1 '^# ' "$md_file_path" 2>/dev/null | sed 's/^# //')
+ else
+ title=""
+ fi
+
+ if [ -z "$title" ]; then
+ title=$(basename "$base_path" | sed -e 's/-/ /g' -e 's/\b\(.\)/\u\1/g')
+ fi
+
+ echo "* [$title](./$html_path)" >> "$LLM_SUMMARY_FILE"
+ done
+done
+
+# Generate llms-full.txt (concatenated content)
+log_info "Creating full documentation file"
+echo "Generated: $GENERATION_TIMESTAMP" > "$LLM_FULL_FILE"
+echo "" >> "$LLM_FULL_FILE"
+
+find "${DOC_DIRS[@]}" -type f -name "*.md" | sort | while read -r doc_file; do
+ relative_path="${doc_file#$ROOT_DIR/}"
+ echo "--- START OF FILE: $relative_path ---" >> "$LLM_FULL_FILE"
+ cat "$doc_file" >> "$LLM_FULL_FILE"
+ echo "" >> "$LLM_FULL_FILE"
+ echo "--- END OF FILE: $relative_path ---" >> "$LLM_FULL_FILE"
+ echo "" >> "$LLM_FULL_FILE"
+done
-# Step 4: Build and serve mkdocs
-echo "Building and serving mkdocs documentation..."
-# Run mkdocs from the root directory
-# Remove --site-dir flag to use the one specified in mkdocs.yml (which is 'site')
-mkdocs build || { echo "mkdocs build failed"; exit 1; }
-mkdocs serve || { echo "mkdocs serve failed"; exit 1; }
+log_success "LLM context files generated"
-echo "Documentation build and serve completed"
\ No newline at end of file
+# --- Final success message ---
+echo -e "\n${GREEN}${BOLD}✓ Documentation build completed successfully${NC}\n"
diff --git a/docs/build/exposing-process-state.md b/docs/build/exposing-process-state.md
new file mode 100644
index 000000000..a15e8985e
--- /dev/null
+++ b/docs/build/exposing-process-state.md
@@ -0,0 +1,109 @@
+# Exposing Process State with the Patch Device
+
+The [`~patch@1.0`](../resources/source-code/dev_patch.md) device provides a mechanism for AO processes to expose parts of their internal state, making it readable via direct HTTP GET requests along the process's HyperPATH.
+
+## Why Use the Patch Device?
+
+Standard AO process execution typically involves sending a message to a process, letting it compute, and then potentially reading results from its outbox or state after the computation is scheduled and finished. This is asynchronous.
+
+The `patch` device allows for a more direct, synchronous-like read pattern. A process can use it to "patch" specific data elements from its internal state into a location that becomes directly accessible via a HyperPATH GET request *before* the full asynchronous scheduling might complete.
+
+This is particularly useful for:
+
+* **Web Interfaces:** Building frontends that need to quickly read specific data points from an AO process without waiting for a full message round-trip.
+* **Data Feeds:** Exposing specific metrics or state variables for monitoring or integration with other systems.
+* **Caching:** Allowing frequently accessed data to be retrieved efficiently via simple HTTP GETs.
+
+## How it Works
+
+1. **Process Logic:** Inside your AO process code (e.g., in Lua or WASM), when you want to expose data, you construct an **Outbound Message** targeted at the [`~patch@1.0`](../resources/source-code/dev_patch.md) device.
+2. **Patch Message Format:** This outbound message typically includes tags that specify:
+ * `device = 'patch@1.0'`
+ * A `cache` tag containing a table. The **keys** within this table become the final segments in the HyperPATH used to access the data, and the **values** are the data itself.
+ * Example Lua using `aos`: `Send({ Target = ao.id, device = 'patch@1.0', cache = { mydatakey = MyValue } })`
+3. **HyperBEAM Execution:** When HyperBEAM executes the process schedule and encounters this outbound message:
+ * It invokes the `dev_patch` module.
+ * `dev_patch` inspects the message.
+ * It takes the keys from the `cache` table (`mydatakey` in the example) and their associated values (`MyValue`) and makes these values available under the `/cache/` path segment.
+4. **HTTP Access:** You (or any HTTP client) can now access this data directly using a GET request:
+ ```
+ GET /~process@1.0/compute/cache/
+ # Or potentially using /now/
+ GET /~process@1.0/now/cache/
+ ```
+ The HyperBEAM node serving the request will resolve the path up to `/compute/cache` (or `/now/cache`), then use the logic associated with the patched data (`mydatakey`) to return the `MyValue` directly.
+
+## Initial State Sync (Optional)
+
+It can be beneficial to expose the initial state of your process via the `patch` device as soon as the process is loaded or spawned. This makes key data points immediately accessible via HTTP GET requests without requiring an initial interaction message to trigger a `Send` to the patch device.
+
+This pattern typically involves checking a flag within your process state to ensure the initial sync only happens once. Here's an example from the Token Blueprint, demonstrating how to sync `Balances` and `TotalSupply` right after the process starts:
+
+```lua
+-- Place this logic at the top level of your process script,
+-- outside of specific handlers, so it runs on load.
+
+-- Initialize the sync flag if it doesn't exist
+InitialSync = InitialSync or 'INCOMPLETE'
+
+-- Sync state on spawn/load if not already done
+if InitialSync == 'INCOMPLETE' then
+ -- Send the relevant state variables to the patch device
+ Send({ device = 'patch@1.0', cache = { balances = Balances, totalsupply = TotalSupply } })
+ -- Update the flag to prevent re-syncing on subsequent executions
+ InitialSync = 'COMPLETE'
+ print("Initial state sync complete. Balances and TotalSupply patched.")
+end
+```
+
+**Explanation:**
+
+1. `InitialSync = InitialSync or 'INCOMPLETE'`: This line ensures the `InitialSync` variable exists in the process state, initializing it to `'INCOMPLETE'` if it's the first time the code runs.
+2. `if InitialSync == 'INCOMPLETE' then`: The code proceeds only if the initial sync hasn't been marked as complete.
+3. `Send(...)`: The relevant state (`Balances`, `TotalSupply`) is sent to the `patch` device, making it available under `/cache/balances` and `/cache/totalsupply`.
+4. `InitialSync = 'COMPLETE'`: The flag is updated, so this block won't execute again in future message handlers within the same process lifecycle.
+
+This ensures that clients or frontends can immediately query essential data like token balances as soon as the process ID is known, improving the responsiveness of applications built on AO.
+
+## Example (Lua in `aos`)
+
+```lua
+-- In your process code (e.g., loaded via .load)
+Handlers.add(
+ "PublishData",
+ Handlers.utils.hasMatchingTag("Action", "PublishData"),
+ function (msg)
+ local dataToPublish = "Some important state: " .. math.random()
+ -- Expose 'currentstatus' key under the 'cache' path
+ Send({ device = 'patch@1.0', cache = { currentstatus = dataToPublish } })
+ print("Published data to /cache/currentstatus")
+ end
+)
+
+-- Spawning and interacting
+[aos]> MyProcess = spawn(MyModule)
+
+[aos]> Send({ Target = MyProcess, Action = "PublishData" })
+-- Wait a moment for scheduling
+
+```
+
+## Avoiding Key Conflicts
+
+When defining keys within the `cache` table (e.g., `cache = { mydatakey = MyValue }`), these keys become path segments under `/cache/` (e.g., `/compute/cache/mydatakey` or `/now/cache/mydatakey`). It's important to choose keys that do not conflict with existing, reserved path segments used by HyperBEAM or the `~process` device itself for state access.
+
+Using reserved keywords as your cache keys can lead to routing conflicts or prevent you from accessing your patched data as expected. While the exact list can depend on device implementations, it's wise to avoid keys commonly associated with state access, such as: `now`, `compute`, `state`, `info`, `test`.
+
+It's recommended to use descriptive and specific keys for your cached data to prevent clashes with the underlying HyperPATH routing mechanisms. For example, instead of `cache = { state = ... }`, prefer `cache = { myappstate = ... }` or `cache = { usercount = ... }`.
+
+!!! warning
+ Be aware that HTTP path resolution is case-insensitive and automatically normalizes paths to lowercase. While the `patch` device itself stores keys with case sensitivity (e.g., distinguishing `MyKey` from `mykey`), accessing them via an HTTP GET request will treat `/cache/MyKey` and `/cache/mykey` as the same path. This means that using keys that only differ in case (like `MyKey` and `mykey` in your `cache` table) will result in unpredictable behavior or data overwrites when accessed via HyperPATH. To prevent these issues, it is **strongly recommended** to use **consistently lowercase keys** within the `cache` table (e.g., `mykey`, `usercount`, `appstate`).
+
+## Key Points
+
+* **Path Structure:** The data is exposed under the `/cache/` path segment. The tag name you use *inside* the `cache` table in the `Send` call (e.g., `currentstatus`) becomes the final segment in the accessible HyperPATH (e.g., `/compute/cache/currentstatus`).
+* **Data Types:** The `patch` device typically handles basic data types (strings, numbers) within the `cache` table effectively. Complex nested tables might require specific encoding or handling.
+* **`compute` vs `now`:** Accessing patched data via `/compute/cache/...` typically serves the last known patched value quickly. Accessing via `/now/cache/...` might involve more computation to ensure the absolute latest state before checking for the patched key under `/cache/`.
+* **Not a Replacement for State:** Patching is primarily for *exposing* reads. It doesn't replace the core state management within your process handler logic.
+
+By using the `patch` device, you can make parts of your AO process state easily and efficiently readable over standard HTTP, bridging the gap between decentralized computation and web-based applications.
\ No newline at end of file
diff --git a/docs/build/extending-hyperbeam.md b/docs/build/extending-hyperbeam.md
new file mode 100644
index 000000000..c033a8d93
--- /dev/null
+++ b/docs/build/extending-hyperbeam.md
@@ -0,0 +1,83 @@
+# Extending HyperBEAM
+
+HyperBEAM's modular design, built on AO-Core principles and Erlang/OTP, makes it highly extensible. You can add new functionalities or modify existing behaviors primarily by creating new **Devices** or implementing **Pre/Post-Processors**.
+
+!!! warning "Advanced Topic"
+ Extending HyperBEAM requires a good understanding of Erlang/OTP, the AO-Core protocol, and HyperBEAM's internal architecture. This guide provides a high-level overview; detailed implementation requires deeper exploration of the source code.
+
+## Approach 1: Creating New Devices
+
+This is the most common way to add significant new capabilities.
+A Device is essentially an Erlang module (typically named `dev_*.erl`) that processes AO-Core messages.
+
+**Steps:**
+
+1. **Define Purpose:** Clearly define what your device will do. What kind of messages will it process? What state will it manage (if any)? What functions (keys) will it expose?
+2. **Create Module:** Create a new Erlang module (e.g., `src/dev_my_new_device.erl`).
+3. **Implement `info/0..2` (Optional but Recommended):** Define an `info` function to signal capabilities and requirements to HyperBEAM (e.g., exported keys, variant/version ID).
+ ```erlang
+ info() ->
+ #{
+ variant => <<"MyNewDevice/1.0">>,
+ exports => [<<"do_something">>, <<"get_status">>]
+ }.
+ ```
+4. **Implement Key Functions:** Create Erlang functions corresponding to the keys your device exposes. These functions typically take `StateMessage`, `InputMessage`, and `Environment` as arguments and return `{ok, NewMessage}` or `{error, Reason}`.
+ ```erlang
+ do_something(StateMsg, InputMsg, Env) ->
+ % ... perform action based on InputMsg ...
+ NewState = ..., % Calculate new state
+ {ok, NewState}.
+
+ get_status(StateMsg, _InputMsg, _Env) ->
+ % ... read status from StateMsg ...
+ StatusData = ...,
+ {ok, StatusData}.
+ ```
+5. **Handle State (If Applicable):** Devices can be stateless or stateful. Stateful devices manage their state within the `StateMessage` passed between function calls.
+6. **Register Device:** Ensure HyperBEAM knows about your device. This might involve adding it to build configurations or potentially a dynamic registration mechanism if available.
+7. **Testing:** Write EUnit tests for your device's functions.
+
+**Example Idea:** A device that bridges to another blockchain network, allowing AO processes to read data or trigger transactions on that chain.
+
+## Approach 2: Building Pre/Post-Processors
+
+Pre/post-processors allow you to intercept incoming requests *before* they reach the target device/process (`preprocess`) or modify the response *after* execution (`postprocess`). These are often implemented using the `dev_stack` device or specific hooks within the request handling pipeline.
+
+**Use Cases:**
+
+* **Authentication/Authorization:** Checking signatures or permissions before allowing execution.
+* **Request Modification:** Rewriting requests, adding metadata, or routing based on specific criteria.
+* **Response Formatting:** Changing the structure or content type of the response.
+* **Metering/Logging:** Recording request details or charging for usage before or after execution.
+
+**Implementation:**
+
+Processors often involve checking specific conditions (like request path or headers) and then either:
+
+a. Passing the request through unchanged.
+b. Modifying the request/response message structure.
+c. Returning an error or redirect.
+
+
+**Example Idea:** A preprocessor that automatically adds a timestamp tag to all incoming messages for a specific process.
+
+
+## Approach 3: Custom Routing Strategies
+
+While `dev_router` provides basic strategies (round-robin, etc.), you could potentially implement a custom load balancing or routing strategy module that `dev_router` could be configured to use. This would involve understanding the interfaces expected by `dev_router`.
+
+**Example Idea:** A routing strategy that queries worker nodes for their specific capabilities before forwarding a request.
+
+## Getting Started
+
+1. **Familiarize Yourself:** Deeply understand Erlang/OTP and the HyperBEAM codebase (`src/` directory), especially [`hb_ao.erl`](../resources/source-code/hb_ao.md), [`hb_message.erl`](../resources/source-code/hb_message.md), and existing `dev_*.erl` modules relevant to your idea.
+2. **Study Examples:** Look at simple devices like `dev_patch.erl` or more complex ones like `dev_process.erl` to understand patterns.
+3. **Start Small:** Implement a minimal version of your idea first.
+4. **Test Rigorously:** Use `rebar3 eunit` extensively.
+5. **Engage Community:** Ask questions in developer channels if you get stuck.
+
+Extending HyperBEAM allows you to tailor the AO network's capabilities to specific needs, contributing to its rich and evolving ecosystem.
diff --git a/docs/build/get-started-building-on-ao-core.md b/docs/build/get-started-building-on-ao-core.md
new file mode 100644
index 000000000..117b923da
--- /dev/null
+++ b/docs/build/get-started-building-on-ao-core.md
@@ -0,0 +1,132 @@
+# Getting Started Building on AO-Core
+
+Welcome to building on AO, the decentralized supercomputer!
+
+AO combines the permanent storage of Arweave with the flexible, scalable computation enabled by the AO-Core protocol and its HyperBEAM implementation. This allows you to create truly autonomous applications, agents, and services that run trustlessly and permissionlessly.
+
+## Core Idea: Processes & Messages
+
+At its heart, building on AO involves:
+
+1. **Creating Processes:** Think of these as independent programs or stateful contracts. Each process has a unique ID and maintains its own state.
+2. **Sending Messages:** You interact with processes by sending them messages. These messages trigger computations, update state, or cause the process to interact with other processes or the outside world.
+
+Messages are processed by [Devices](../begin/ao-devices.md), which define *how* the computation happens (e.g., running WASM code, executing Lua scripts, managing state transitions).
+
+## Starting `aos`: Your Development Environment
+
+The primary tool for interacting with AO and developing processes is `aos`, a command-line interface and development environment.
+
+=== "npm"
+ ```bash
+ npm i -g https://get_ao.arweave.net
+ ```
+
+=== "bun"
+ ```bash
+ bun install -g https://get_ao.arweave.net
+ ```
+
+=== "pnpm"
+ ```bash
+ pnpm add -g https://get_ao.arweave.net
+ ```
+
+**Starting `aos`:**
+
+Simply run the command in your terminal:
+
+```bash
+aos
+```
+
+This connects you to an interactive Lua environment running within a **process** on the AO network. This process acts as your command-line interface (CLI) to the AO network, allowing you to interact with other processes, manage your wallet, and develop new AO processes. By default, it connects to a process running on the mainnet Compute Unit (CU).
+
+**What `aos` is doing:**
+
+* **Connecting:** Establishes a connection from your terminal to a remote process running the `aos` environment.
+* **Loading Wallet:** Looks for a default Arweave key file (usually `~/.aos.json` or specified via arguments) to load into the remote process context for signing outgoing messages.
+* **Providing Interface:** Gives you a Lua prompt (`[aos]>`) within the remote process where you can:
+ * Load code for new persistent processes on the network.
+ * Send messages to existing network processes.
+ * Inspect process state.
+ * Manage your local environment.
+
+## Your First Interaction: Assigning a Variable
+
+From the `aos` prompt, you can assign a variable. Let's assign a basic Lua process that just holds some data:
+
+```lua
+[aos]> myVariable = "Hello from aos!"
+-- This assigns the string "Hello from aos!" to the variable 'myVariable'
+-- within the current process's Lua environment.
+
+[aos]> myVariable
+-- Displays the content of 'myVariable'
+Hello from aos!
+```
+
+
+## Your First Handler
+
+Follow these steps to create and interact with your first message handler in AO:
+
+1. **Create a Lua File to Handle Messages:**
+ Create a new file named `main.lua` in your local directory and add the following Lua code:
+
+ ```lua
+ Handlers.add(
+ "HelloWorld",
+ function(msg)
+ -- This function gets called when a message with Action = "HelloWorld" arrives.
+ print("Handler triggered by message from: " .. msg.From)
+ -- It replies to the sender with a new message containing the specified data.
+ msg.reply({ Data = "Hello back from your process!" })
+ end
+ )
+
+ print("HelloWorld handler loaded.") -- Confirmation message
+ ```
+
+ * `Handlers.add`: Registers a function to handle incoming messages.
+ * `"HelloWorld"`: The name of this handler. It will be triggered by messages with `Action = "HelloWorld"`.
+ * `function(msg)`: The function that executes when the handler is triggered. `msg` contains details about the incoming message (like `msg.From`, the sender's process ID).
+ * `msg.reply({...})`: Sends a response message back to the original sender. The response must be a Lua table, typically containing a `Data` field.
+
+2. **Load the Handler into `aos`:**
+ From your `aos` prompt, load the handler code into your running process:
+
+ ```lua
+ [aos]> .load main.lua
+ ```
+
+3. **Send a Message to Trigger the Handler:**
+ Now, send a message to your own process (`ao.id` refers to the current process ID) with the action that matches your handler's name:
+
+ ```lua
+ [aos]> Send({ Target = ao.id, Action = "HelloWorld" })
+ ```
+
+4. **Observe the Output:**
+ You should see two things happen in your `aos` terminal:
+ * The `print` statement from your handler: `Handler triggered by message from: `
+ * A notification about the reply message: `New Message From : Data = Hello back from your process!`
+
+5. **Inspect the Reply Message:**
+ The reply message sent by your handler is now in your process's inbox. You can inspect its data like this:
+
+ ```lua
+ [aos]> Inbox[#Inbox].Data
+ ```
+ This should output: `"Hello back from your process!"`
+
+You've successfully created a handler, loaded it into your AO process, triggered it with a message, and received a reply!
+
+## Next Steps
+
+This is just the beginning. To dive deeper:
+
+* **AO Cookbook:** Explore practical examples and recipes for common tasks: [AO Cookbook](https://cookbook_ao.arweave.net/)
+* **Expose Process State:** Learn how to make your process data accessible via HTTP using the `patch` device: [Exposing Process State](./exposing-process-state.md)
+* **Serverless Compute:** Discover how to run WASM or Lua computations within your processes: [Serverless Decentralized Compute](./serverless-decentralized-compute.md)
+* **aos Documentation:** Refer to the official `aos` documentation for detailed commands and usage.
diff --git a/docs/build/serverless-decentralized-compute.md b/docs/build/serverless-decentralized-compute.md
new file mode 100644
index 000000000..cb1c589c6
--- /dev/null
+++ b/docs/build/serverless-decentralized-compute.md
@@ -0,0 +1,82 @@
+# Serverless Decentralized Compute on AO
+
+AO enables powerful "serverless" computation patterns by allowing you to run code (WASM, Lua) directly within decentralized processes, triggered by messages. Furthermore, if computations are performed on nodes running in Trusted Execution Environments (TEEs), you can obtain cryptographic attestations verifying the execution integrity.
+
+## Core Concept: Compute Inside Processes
+
+Instead of deploying code to centralized servers, you deploy code *to* the Arweave permaweb and instantiate it as an AO process. Interactions happen by sending messages to this process ID.
+
+* **Code Deployment:** Your WASM binary or Lua script is uploaded to Arweave, getting a permanent transaction ID.
+* **Process Spawning:** You create an AO process, associating it with your code's transaction ID and specifying the appropriate compute device ([`~wasm64@1.0`](../devices/wasm64-at-1-0.md) or [`~lua@5.3a`](../devices/lua-at-5-3a.md)).
+* **Execution via Messages:** Sending a message to the process ID triggers the HyperBEAM node (that picks up the message) to:
+ 1. Load the process state.
+ 2. Fetch the associated WASM/Lua code from Arweave.
+ 3. Execute the code using the relevant device ([`dev_wasm`](../resources/source-code/dev_wasm.md) or [`dev_lua`](../resources/source-code/dev_lua.md)), passing the message data and current state.
+ 4. Update the process state based on the execution results.
+
+
+## TEE Attestations (via [`~snp@1.0`](../resources/source-code/dev_snp.md))
+
+If a HyperBEAM node performing these computations runs within a supported Trusted Execution Environment (like AMD SEV-SNP), it can provide cryptographic proof of execution.
+
+* **How it works:** The [`~snp@1.0`](../resources/source-code/dev_snp.md) device interacts with the TEE hardware.
+* **Signed Responses:** When a TEE-enabled node processes your message (e.g., executes your WASM function), the HTTP response containing the result can be cryptographically signed by a key that *provably* only exists inside the TEE.
+* **Verification:** Clients receiving this response can verify the signature against the TEE platform's attestation mechanism (e.g., AMD's KDS) to gain high confidence that the computation was performed correctly and confidentially within the secure environment, untampered by the node operator.
+
+**Obtaining Attested Responses:**
+
+This usually involves interacting with nodes specifically advertised as TEE-enabled. The exact mechanism for requesting and verifying attestations depends on the specific TEE technology and node configuration.
+
+* The HTTP response headers might contain specific signature or attestation data (e.g., using HTTP Message Signatures RFC-9421 via [`dev_codec_httpsig`](../resources/source-code/dev_codec_httpsig.md)).
+* You might query the [`~snp@1.0`](../resources/source-code/dev_snp.md) device directly on the node to get its attestation report.
+
+Refer to documentation on [TEE Nodes](./run/tee-nodes.md) and the [`~snp@1.0`](../resources/source-code/dev_snp.md) device for details.
+
+By leveraging WASM, Lua, and optional TEE attestations, AO provides a powerful platform for building complex, verifiable, and truly decentralized serverless applications.
diff --git a/docs/devices/json-at-1-0.md b/docs/devices/json-at-1-0.md
new file mode 100644
index 000000000..eec5e7985
--- /dev/null
+++ b/docs/devices/json-at-1-0.md
@@ -0,0 +1,42 @@
+# Device: ~json@1.0
+
+## Overview
+
+The [`~json@1.0`](../resources/source-code/dev_json_iface.md) device provides a mechanism to interact with JSON (JavaScript Object Notation) data structures using HyperPATHs. It allows treating a JSON document or string as a stateful entity against which HyperPATH queries can be executed.
+
+This device is useful for:
+
+* Serializing and deserializing JSON data.
+* Querying and modifying JSON objects.
+* Integrating with other devices and operations via HyperPATH chaining.
+
+## Core Functions (Keys)
+
+### Serialization
+
+* **`GET /~json@1.0/serialize` (Direct Serialize Action)**
+ * **Action:** Serializes the input message or data into a JSON string.
+ * **Example:** `GET /~json@1.0/serialize` - serializes the current message as JSON.
+ * **HyperPATH:** The path segment `/serialize` directly follows the device identifier.
+
+* **`GET //~json@1.0/serialize` (Chained Serialize Action)**
+ * **Action:** Takes arbitrary data output from `` (another device or operation) and returns its serialized JSON string representation.
+ * **Example:** `GET /~meta@1.0/info/~json@1.0/serialize` - fetches node info from the meta device and then pipes it to the JSON device to serialize the result as JSON.
+ * **HyperPATH:** This segment (`/~json@1.0/serialize`) is appended to a previous HyperPATH segment.
+
+## HyperPATH Chaining Example
+
+The JSON device is particularly useful in HyperPATH chains to convert output from other devices into JSON format:
+
+```
+GET /~meta@1.0/info/~json@1.0/serialize
+```
+
+This retrieves the node configuration from the meta device and serializes it to JSON.
+
+## See Also
+
+- [Message Device](../resources/source-code/dev_message.md) - Works well with JSON serialization
+- [Meta Device](../resources/source-code/dev_meta.md) - Can provide configuration data to serialize
+
+[json module](../resources/source-code/dev_codec_json.md)
\ No newline at end of file
diff --git a/docs/devices/lua-at-5-3a.md b/docs/devices/lua-at-5-3a.md
new file mode 100644
index 000000000..4c961bca2
--- /dev/null
+++ b/docs/devices/lua-at-5-3a.md
@@ -0,0 +1,70 @@
+# Device: ~lua@5.3a
+
+## Overview
+
+The [`~lua@5.3a`](../resources/source-code/dev_lua.md) device enables the execution of Lua scripts within the HyperBEAM environment. It provides an isolated sandbox where Lua code can process incoming messages, interact with other devices, and manage state.
+
+## Core Concept: Lua Script Execution
+
+This device allows processes to perform computations defined in Lua scripts. Similar to the [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device, it manages the lifecycle of a Lua execution state associated with the process.
+
+## Key Functions (Keys)
+
+These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process.
+
+* **`init`**
+ * **Action:** Initializes the Lua environment for the process. It finds and loads the Lua script(s) associated with the process, creates a `luerl` state, applies sandboxing rules if specified, installs the [`dev_lua_lib`](../resources/source-code/dev_lua_lib.md) (providing AO-specific functions like `ao.send`), and stores the initialized state in the process's private area (`priv/state`).
+ * **Inputs (Expected in Process Definition or `init` Message):**
+ * `script`: Can be:
+ * An Arweave Transaction ID of the Lua script file.
+ * A list of script IDs or script message maps.
+ * A message map containing the Lua script in its `body` tag (Content-Type `application/lua` or `text/x-lua`).
+ * A map where keys are module names and values are script IDs/messages.
+ * `sandbox`: (Optional) Controls Lua sandboxing. Can be `true` (uses default sandbox list), `false` (no sandbox), or a map/list specifying functions to disable and their return values.
+ * **Outputs (Stored in `priv/`):**
+ * `state`: The initialized `luerl` state handle.
+* **`` (Default Handler - `compute`)**
+ * **Action:** Executes a specific function within the loaded Lua script(s). This is the default handler; if a key matching a Lua function name is called on the device, this logic runs.
+ * **Inputs (Expected in Process State or Incoming Message):**
+ * `priv/state`: The Lua state obtained during `init`.
+ * The **key** being accessed (used as the default function name).
+ * `function` or `body/function`: (Optional) Overrides the function name derived from the key.
+ * `parameters` or `body/parameters`: (Optional) Arguments to pass to the Lua function. Defaults to a list containing the process message, the request message, and an empty options map.
+ * **Response:** The results returned by the Lua function call, typically encoded. The device also updates the `priv/state` with the Lua state after execution.
+* **`snapshot`**
+ * **Action:** Captures the current state of the running Lua environment. `luerl` state is serializable.
+ * **Inputs:** `priv/state`.
+ * **Outputs:** A message containing the serialized Lua state, typically tagged with `[Prefix]/State`.
+* **`normalize` (Internal Helper)**
+ * **Action:** Ensures a consistent state representation by loading a Lua state from a snapshot (`[Prefix]/State`) if a live state (`priv/state`) isn't already present.
+* **`functions`**
+ * **Action:** Returns a list of all globally defined functions within the current Lua state.
+ * **Inputs:** `priv/state`.
+ * **Response:** A list of function names.
+
+## Sandboxing
+
+The `sandbox` option in the process definition restricts potentially harmful Lua functions (like file I/O, OS commands, loading arbitrary code). By default (`sandbox = true`), common dangerous functions are disabled. You can customize the sandbox rules.
+
+## AO Library (`dev_lua_lib`)
+
+The `init` function automatically installs a helper library ([`dev_lua_lib`](../resources/source-code/dev_lua_lib.md)) into the Lua state. This library typically provides functions for interacting with the AO environment from within the Lua script, such as:
+
+* `ao.send({ Target = ..., ... })`: To send messages from the process.
+* Access to message tags and data.
+
+## Usage within `dev_stack`
+
+Like [`~wasm64@1.0`](../resources/source-code/dev_wasm.md), the `~lua@5.3a` device is typically used within an execution stack.
+
+```text
+# Example Process Definition Snippet
+Execution-Device: stack@1.0
+Execution-Stack: scheduler@1.0, lua@5.3a
+Script:
+Sandbox: true
+```
+
+This device offers a lightweight, integrated scripting capability for AO processes, suitable for a wide range of tasks from simple logic to more complex state management and interactions.
+
+[lua module](../resources/source-code/dev_lua.md)
diff --git a/docs/devices/message-at-1-0.md b/docs/devices/message-at-1-0.md
new file mode 100644
index 000000000..000bdb860
--- /dev/null
+++ b/docs/devices/message-at-1-0.md
@@ -0,0 +1,74 @@
+# Device: ~message@1.0
+
+## Overview
+
+The [`~message@1.0`](../resources/source-code/dev_message.md) device is a fundamental built-in device in HyperBEAM. It serves as the identity device for standard AO-Core messages, which are represented as Erlang maps internally. Its primary function is to allow manipulation and inspection of these message maps directly via HyperPATH requests, without needing a persistent process state.
+
+This device is particularly useful for:
+
+* Creating and modifying transient messages on the fly using query parameters.
+* Retrieving specific values from a message map.
+* Inspecting the keys of a message.
+* Handling message commitments and verification (though often delegated to specialized commitment devices like [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)).
+
+## Core Functionality
+
+The `message@1.0` device treats the message itself as the state it operates on. Key operations are accessed via path segments in the HyperPATH.
+
+### Key Access (`/key`)
+
+To retrieve the value associated with a specific key in the message map, simply append the key name to the path. Key lookup is case-insensitive.
+
+**Example:**
+
+```
+GET /~message@1.0&hello=world&Key=Value/key
+```
+
+**Response:**
+
+```
+"Value"
+```
+
+### Reserved Keys
+
+The `message@1.0` device reserves several keys for specific operations:
+
+* **`get`**: (Default operation if path segment matches a key in the map) Retrieves the value of a specified key. Behaves identically to accessing `/key` directly.
+* **`set`**: Modifies the message by adding or updating key-value pairs. Requires additional parameters (usually in the request body or subsequent path segments/query params, depending on implementation specifics).
+ * Supports deep merging of maps.
+ * Setting a key to `unset` removes it.
+ * Overwriting keys that are part of existing commitments will typically remove those commitments unless the new value matches the old one.
+* **`set_path`**: A special case for setting the `path` key itself, which cannot be done via the standard `set` operation.
+* **`remove`**: Removes one or more specified keys from the message. Requires an `item` or `items` parameter.
+* **`keys`**: Returns a list of all public (non-private) keys present in the message map.
+* **`id`**: Calculates and returns the ID (hash) of the message. Considers active commitments based on specified `committers`. May delegate ID calculation to a device specified by the message\'s `id-device` key or the default ([`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)).
+* **`commit`**: Creates a commitment (e.g., a signature) for the message. Requires parameters like `commitment-device` and potentially committer information. Delegates the actual commitment generation to the specified device (default [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)).
+* **`committers`**: Returns a list of committers associated with the commitments in the message. Can be filtered by request parameters.
+* **`commitments`**: Used internally and in requests to filter or specify which commitments to operate on (e.g., for `id` or `verify`).
+* **`verify`**: Verifies the commitments attached to the message. Can be filtered by `committers` or specific `commitment` IDs in the request. Delegates verification to the device specified in each commitment (`commitment-device`).
+
+### Private Keys
+
+Keys prefixed with `priv` (e.g., `priv_key`, `private.data`) are considered private and cannot be accessed or listed via standard `get` or `keys` operations.
+
+## HyperPATH Example
+
+This example demonstrates creating a transient message and retrieving a value:
+
+```
+GET /~message@1.0&hello=world&k=v/k
+```
+
+**Breakdown:**
+
+1. `~message@1.0`: Sets the root device.
+2. `&hello=world&k=v`: Query parameters create the initial message: `#{ <<"hello">> => <<"world">>, <<"k">> => <<"v">> }`.
+3. `/k`: The path segment requests the value for the key `k`.
+
+**Response:**
+
+```
+"v"
+```
\ No newline at end of file
diff --git a/docs/devices/meta-at-1-0.md b/docs/devices/meta-at-1-0.md
new file mode 100644
index 000000000..b448a135b
--- /dev/null
+++ b/docs/devices/meta-at-1-0.md
@@ -0,0 +1,55 @@
+# Device: ~meta@1.0
+
+## Overview
+
+The [`~meta@1.0`](../resources/source-code/dev_meta.md) device provides access to metadata and configuration information about the local HyperBEAM node and the broader AO network.
+
+This device is essential for:
+
+## Core Functions (Keys)
+
+### `info`
+
+Retrieves or modifies the node's configuration message (often referred to as `NodeMsg` internally).
+
+* **`GET /~meta@1.0/info`**
+ * **Action:** Returns the current node configuration message.
+ * **Response:** A message map containing the node's settings. Sensitive keys (like private wallets) are filtered out. Dynamically generated keys like the node's public `address` are added if a wallet is configured.
+* **`POST /~meta@1.0/info`**
+ * **Action:** Updates the node's configuration message. Requires the request to be signed by the node's configured `operator` key/address.
+ * **Request Body:** A message map containing the configuration keys and values to update.
+ * **Response:** Confirmation message indicating success or failure.
+ * **Note:** Once a node's configuration is marked as `initialized = permanent`, it cannot be changed via this method.
+
+## Key Configuration Parameters Managed by `~meta`
+
+While the `info` key is the primary interaction point, the `NodeMsg` managed by `~meta` holds crucial configuration parameters affecting the entire node's behavior, including (but not limited to):
+
+* `port`: HTTP server port.
+* `priv_wallet` / `key_location`: Path to the node's Arweave key file.
+* `operator`: The address designated as the node operator (defaults to the address derived from `priv_wallet`).
+* `initialized`: Status indicating if the node setup is temporary or permanent.
+* `preprocessor` / `postprocessor`: Optional messages defining pre/post-processing logic for requests.
+* `routes`: Routing table used by [`dev_router`](../resources/source-code/dev_router.md).
+* `store`: Configuration for data storage.
+* `trace`: Debug tracing options.
+* `p4_*`: Payment configuration.
+* `faff_*`: Access control lists.
+
+*(Refer to `hb_opts.erl` for a comprehensive list of options.)*
+
+## Utility Functions (Internal/Module Level)
+
+The [`dev_meta.erl`](../resources/source-code/dev_meta.md) module also contains helper functions used internally or callable from other Erlang modules:
+
+* `is_operator(, ) -> boolean()`: Checks if the signer of `RequestMsg` matches the configured `operator` in `NodeMsg`.
+
+## Pre/Post-Processing Hooks
+
+The `~meta` device applies the node's configured `preprocessor` message before resolving the main request and the `postprocessor` message after obtaining the result, allowing for global interception and modification of requests/responses.
+
+## Initialization
+
+Before a node can process general requests, it usually needs to be initialized. Attempts to access devices other than `~meta@1.0/info` before initialization typically result in an error. Initialization often involves setting essential parameters like the operator key via a `POST` to `info`.
+
+[meta module](../resources/source-code/dev_meta.md)
\ No newline at end of file
diff --git a/docs/devices/overview.md b/docs/devices/overview.md
new file mode 100644
index 000000000..5a9ce0d1b
--- /dev/null
+++ b/docs/devices/overview.md
@@ -0,0 +1,26 @@
+# Devices
+
+Devices are the core functional units within HyperBEAM and AO-Core. They define how messages are processed and what actions can be performed.
+
+Each device listed here represents a specific capability available to AO processes and nodes. Understanding these devices is key to building complex applications and configuring your HyperBEAM node effectively.
+
+## Available Devices
+
+Below is a list of documented built-in devices. Each page details the device's purpose, available functions (keys), and usage examples where applicable.
+
+* **[`~message@1.0`](./message-at-1-0.md):** Base message handling and manipulation.
+* **[`~meta@1.0`](./meta-at-1-0.md):** Node configuration and metadata.
+* **[`~process@1.0`](./process-at-1-0.md):** Persistent, shared process execution environment.
+* **[`~scheduler@1.0`](./scheduler-at-1-0.md):** Message scheduling and execution ordering for processes.
+* **[`~wasm64@1.0`](./wasm64-at-1-0.md):** WebAssembly (WASM) execution engine.
+* **[`~lua@5.3a`](./lua-at-5-3a.md):** Lua script execution engine.
+* **[`~relay@1.0`](./relay-at-1-0.md):** Relaying messages to other nodes or HTTP endpoints.
+* **[`~json@1.0`](./json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs.
+
+*(More devices will be documented here as specifications are finalized and reviewed.)*
+
+## Device Naming and Versioning
+
+Devices are typically referenced using a name and version, like `~@` (e.g., `~process@1.0`). The tilde (`~`) often indicates a primary, user-facing device, while internal or utility devices might use a `dev_` prefix in the source code (e.g., `dev_router`).
+
+Versioning indicates the specific interface and behavior of the device. Changes to a device that break backward compatibility usually result in a version increment.
diff --git a/docs/devices/process-at-1-0.md b/docs/devices/process-at-1-0.md
new file mode 100644
index 000000000..090d5d6d0
--- /dev/null
+++ b/docs/devices/process-at-1-0.md
@@ -0,0 +1,72 @@
+# Device: ~process@1.0
+
+## Overview
+
+The [`~process@1.0`](../resources/source-code/dev_process.md) device represents a persistent, shared execution environment within HyperBEAM, analogous to a process or actor in other systems. It allows for stateful computation and interaction over time.
+
+## Core Concept: Orchestration
+
+A message tagged with `Device: process@1.0` (the "Process Definition Message") doesn't typically perform computation itself. Instead, it defines *which other devices* should be used for key aspects of its lifecycle:
+
+* **Scheduler Device:** Determines the order of incoming messages (assignments) to be processed. (Defaults to [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md)).
+* **Execution Device:** Executes the actual computation based on the current state and the scheduled message. Often configured as [`dev_stack`](../resources/source-code/dev_stack.md) to allow multiple computational steps (e.g., running WASM, applying cron jobs, handling proofs).
+* **Push Device:** Handles the injection of new messages into the process\'s schedule. (Defaults to [`~push@1.0`](../resources/source-code/dev_push.md)).
+
+The `~process@1.0` device acts as a router, intercepting requests and delegating them to the appropriate configured device (scheduler, executor, etc.) by temporarily swapping the device tag on the message before resolving.
+
+## Key Functions (Keys)
+
+These keys are accessed via HyperPATHs relative to the Process Definition Message ID (``).
+
+* **`GET /~process@1.0/schedule`**
+ * **Action:** Delegates to the configured Scheduler Device (via the process's `schedule/3` function) to retrieve the current schedule or state.
+ * **Response:** Depends on the Scheduler Device implementation (e.g., list of message IDs).
+* **`POST /~process@1.0/schedule`**
+ * **Action:** Delegates to the configured Push Device (via the process's `push/3` function) to add a new message to the process's schedule.
+ * **Request Body:** The message to be added.
+ * **Response:** Confirmation or result from the Push Device.
+* **`GET /~process@1.0/compute/`**
+ * **Action:** Computes the process state up to a specific point identified by `` (either a slot number or a message ID within the schedule). It retrieves assignments from the Scheduler Device and applies them sequentially using the configured Execution Device.
+ * **Response:** The process state message after executing up to the target slot/message.
+ * **Caching:** Results are cached aggressively (see [`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to avoid recomputation.
+* **`GET /~process@1.0/now`**
+ * **Action:** Computes and returns the `Results` key from the *latest* known state of the process. This typically involves computing all pending assignments.
+ * **Response:** The value of the `Results` key from the final state.
+* **`GET /~process@1.0/slot`**
+ * **Action:** Delegates to the configured Scheduler Device to query information about a specific slot or the current slot number.
+ * **Response:** Depends on the Scheduler Device implementation.
+* **`GET /~process@1.0/snapshot`**
+ * **Action:** Delegates to the configured Execution Device to generate a snapshot of the current process state. This often involves running the execution stack in a specific "map" mode to gather state from different components.
+ * **Response:** A message representing the process snapshot, often marked for caching.
+
+## Process Definition Example
+
+A typical process definition message might look like this (represented conceptually):
+
+```text
+Device: process@1.0
+Scheduler-Device: [`scheduler@1.0`](../resources/source-code/dev_scheduler.md)
+Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md)
+Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "[`cron@1.0`](../resources/source-code/dev_cron.md)", "[`wasm64@1.0`](../resources/source-code/dev_wasm.md)", "[`PoDA@1.0`](../resources/source-code/dev_poda.md)"
+Cron-Frequency: 10-Minutes
+WASM-Image:
+PoDA:
+ Device: [`PoDA/1.0`](../resources/source-code/dev_poda.md)
+ Authority:
+ Authority:
+ Quorum: 2
+```
+
+This defines a process that uses:
+* The standard scheduler.
+* A stack executor that runs scheduling logic, cron jobs, a WASM module, and a Proof-of-Data-Availability check.
+
+## State Management & Caching
+
+`~process@1.0` relies heavily on caching ([`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to optimize performance. Full state snapshots and intermediate results are cached periodically (configurable via `Cache-Frequency` and `Cache-Keys` options) to avoid recomputing the entire history for every request.
+
+## Initialization (`init`)
+
+Processes often require an initialization step before they can process messages. This is typically triggered by calling the `init` key on the configured Execution Device via the process path (`/~process@1.0/init`). This allows components within the execution stack (like WASM modules) to set up their initial state.
+
+[process module](../resources/source-code/dev_process.md)
diff --git a/docs/devices/relay-at-1-0.md b/docs/devices/relay-at-1-0.md
new file mode 100644
index 000000000..9d432568c
--- /dev/null
+++ b/docs/devices/relay-at-1-0.md
@@ -0,0 +1,46 @@
+# Device: ~relay@1.0
+
+## Overview
+
+The [`~relay@1.0`](../resources/source-code/dev_relay.md) device enables HyperBEAM nodes to send messages to external HTTP endpoints or other AO nodes.
+
+## Core Concept: Message Forwarding
+
+This device acts as an HTTP client within the AO ecosystem. It allows a node or process to make outbound HTTP requests.
+
+## Key Functions (Keys)
+
+* **`call`**
+ * **Action:** Sends an HTTP request to a specified target and waits synchronously for the response.
+ * **Inputs (from Request Message or Base Message M1):**
+ * `target`: (Optional) A message map defining the request to be sent. Defaults to the original incoming request (`Msg2` or `M1`).
+ * `relay-path` or `path`: The URL/path to send the request to.
+ * `relay-method` or `method`: The HTTP method (GET, POST, etc.).
+ * `relay-body` or `body`: The request body.
+ * `requires-sign`: (Optional, boolean) If true, the request message (`target`) will be signed using the node's key before sending. Defaults to `false`.
+ * `http-client`: (Optional) Specify a custom HTTP client module to use (defaults to node's configured `relay_http_client`).
+ * **Response:** `{ok, }` where `` is the full message received from the remote peer, or `{error, Reason}`.
+ * **Example HyperPATH:**
+ ```
+ GET /~relay@1.0/call?method=GET&path=https://example.com
+ ```
+* **`cast`**
+ * **Action:** Sends an HTTP request asynchronously. The device returns immediately after spawning a process to send the request; it does not wait for or return the response from the remote peer.
+ * **Inputs:** Same as `call`.
+ * **Response:** `{ok, <<"OK">>}`.
+* **`preprocess`**
+ * **Action:** This function is designed to be used as a node's global `preprocessor` (configured via [`~meta@1.0`](../resources/source-code/dev_meta.md)). When configured, it intercepts *all* incoming requests to the node and automatically rewrites them to be relayed via the `call` key. This effectively turns the node into a pure forwarding proxy, using its routing table ([`dev_router`](../resources/source-code/dev_router.md)) to determine the destination.
+ * **Response:** A message structure that invokes `/~relay@1.0/call` with the original request as the target body.
+
+## Use Cases
+
+* **Inter-Node Communication:** Sending messages between HyperBEAM nodes.
+* **External API Calls:** Allowing AO processes to interact with traditional web APIs.
+* **Routing Nodes:** Nodes configured with the `preprocess` key act as dedicated routers/proxies.
+* **Client-Side Relaying:** A local HyperBEAM instance can use `~relay@1.0` to forward requests to public compute nodes.
+
+## Interaction with Routing
+
+When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by `hb_http:request/2`. This function often utilizes the node's routing configuration ([`dev_router`](../resources/source-code/dev_router.md)) to determine the specific peer/URL to send the request to, especially if the target path is an AO process ID or another internal identifier rather than a full external URL.
+
+[relay module](../resources/source-code/dev_relay.md)
diff --git a/docs/devices/scheduler-at-1-0.md b/docs/devices/scheduler-at-1-0.md
new file mode 100644
index 000000000..2922d699c
--- /dev/null
+++ b/docs/devices/scheduler-at-1-0.md
@@ -0,0 +1,65 @@
+# Device: ~scheduler@1.0
+
+## Overview
+
+The [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md) device manages the queueing and ordering of messages targeted at a specific process ([`~process@1.0`](../resources/source-code/dev_process.md)). It ensures that messages are processed according to defined scheduling rules.
+
+## Core Concept: Message Ordering
+
+When messages are sent to an AO process (typically via the [`~push@1.0`](../resources/source-code/dev_push.md) device or a `POST` to the process's `/schedule` endpoint), they are added to a queue managed by the Scheduler Device associated with that process. The scheduler ensures that messages are processed one after another in a deterministic order, typically based on arrival time and potentially other factors like message nonces or timestamps (depending on the specific scheduler implementation details).
+
+The [`~process@1.0`](../resources/source-code/dev_process.md) device interacts with its configured Scheduler Device (which defaults to `~scheduler@1.0`) primarily through the `next` key to retrieve the next message to be executed.
+
+## Slot System
+
+Slots are a fundamental concept in the `~scheduler@1.0` device, providing a structured mechanism for organizing and sequencing computation.
+
+* **Sequential Ordering:** Slots act as numbered containers (starting at 0) that hold specific messages or tasks to be processed in a deterministic order.
+* **State Tracking:** The `at-slot` key in a process's state (or a similar internal field like `current-slot` within the scheduler itself) tracks execution progress, indicating which messages have been processed and which are pending. The `slot` function can be used to query this.
+* **Assignment Storage:** Each slot contains an "assignment" - the cryptographically verified message waiting to be executed. These assignments are retrieved using the `schedule` function or internally via `next`.
+* **Schedule Organization:** The collection of all slots for a process forms its "schedule".
+* **Application Scenarios:**
+ * **Scheduling Messages:** When a message is posted to a process (e.g., via `register`), it's assigned to the next available slot.
+ * **Status Monitoring:** Clients can query a process's current slot (via the `slot` function) to check progress.
+ * **Task Retrieval:** Processes find their next task by requesting the next assignment via the `next` function, which implicitly uses the next slot number based on the current state.
+ * **Distributed Consistency:** Slots ensure deterministic execution order across nodes, crucial for maintaining consistency in AO.
+
+This slotting mechanism is central to AO processes built on HyperBEAM, allowing for deterministic, verifiable computation.
+
+## Key Functions (Keys)
+
+These keys are typically accessed via the [`~process@1.0`](../resources/source-code/dev_process.md) device, which delegates the calls to its configured scheduler.
+
+* **`schedule` (Handler for `GET /~process@1.0/schedule`)**
+ * **Action:** Retrieves the list of pending assignments (messages) for the process. May support cursor-based traversal for long schedules.
+ * **Response:** A message map containing the assignments, often keyed by slot number or message ID.
+* **`register` (Handler for `POST /~process@1.0/schedule`)**
+ * **Action:** Adds/registers a new message to the process's schedule. If this is the first message for a process, it might initialize the scheduler state.
+ * **Request Body:** The message to schedule.
+ * **Response:** Confirmation, potentially including the assigned slot or message ID.
+* **`slot` (Handler for `GET /~process@1.0/slot`)**
+ * **Action:** Queries the current or a specific slot number within the process's schedule.
+ * **Response:** Information about the requested slot, such as the current highest slot number.
+* **`status` (Handler for `GET /~process@1.0/status`)**
+ * **Action:** Retrieves status information about the scheduler for the process.
+ * **Response:** A status message.
+* **`next` (Internal Key used by [`~process@1.0`](../resources/source-code/dev_process.md))**
+ * **Action:** Retrieves the next assignment message from the schedule based on the process's current `at-slot` state.
+ * **State Management:** Requires the current process state (`Msg1`) containing the `at-slot` key.
+ * **Response:** `{ok, #{ "body" => , "state" => }}` or `{error, Reason}` if no next assignment is found.
+ * **Caching & Lookahead:** The implementation uses internal caching (`dev_scheduler_cache`, `priv/assignments`) and potentially background lookahead workers to optimize fetching subsequent assignments.
+* **`init` (Internal Key)**
+ * **Action:** Initializes the scheduler state for a process, often called when the process itself is initialized.
+* **`checkpoint` (Internal Key)**
+ * **Action:** Triggers the scheduler to potentially persist its current state or perform other checkpointing operations.
+
+## Interaction with Other Components
+
+* **[`~process@1.0`](../resources/source-code/dev_process.md):** The primary user of the scheduler, calling `next` to drive process execution.
+* **[`~push@1.0`](../resources/source-code/dev_push.md):** Often used to add messages to the schedule via `POST /schedule`.
+* **`dev_scheduler_cache`:** Internal module used for caching assignments locally on the node to reduce latency.
+* **Scheduling Unit (SU):** Schedulers may interact with external entities (like Arweave gateways or dedicated SU nodes) to fetch or commit schedules, although `~scheduler@1.0` aims for a simpler, often node-local or SU-client model.
+
+`~scheduler@1.0` provides the fundamental mechanism for ordered, sequential execution within the potentially asynchronous and parallel environment of AO.
+
+[scheduler module](../resources/source-code/dev_scheduler.md)
diff --git a/docs/devices/wasm64-at-1-0.md b/docs/devices/wasm64-at-1-0.md
new file mode 100644
index 000000000..492d86e40
--- /dev/null
+++ b/docs/devices/wasm64-at-1-0.md
@@ -0,0 +1,63 @@
+# Device: ~wasm64@1.0
+
+## Overview
+
+The [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device enables the execution of 64-bit WebAssembly (WASM) code within the HyperBEAM environment. It provides a sandboxed environment for running compiled code from various languages (like Rust, C++, Go) that target WASM.
+
+## Core Concept: WASM Execution
+
+This device allows AO processes to perform complex computations defined in WASM modules, which can be written in languages like Rust, C++, C, Go, etc., and compiled to WASM.
+
+The device manages the lifecycle of a WASM instance associated with the process state.
+
+## Key Functions (Keys)
+
+These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process.
+
+* **`init`**
+ * **Action:** Initializes the WASM environment for the process. It locates the WASM image (binary), starts a WAMR instance, and stores the instance handle and helper functions (for reading/writing WASM memory) in the process's private state (`priv/...`).
+ * **Inputs (Expected in Process Definition or `init` Message):**
+ * `[Prefix]/image`: The Arweave Transaction ID of the WASM binary, or the WASM binary itself, or a message containing the WASM binary in its body.
+ * `[Prefix]/Mode`: (Optional) Specifies execution mode (`WASM` (default) or `AOT` if allowed by node config).
+ * **Outputs (Stored in `priv/`):**
+ * `[Prefix]/instance`: The handle to the running WAMR instance.
+ * `[Prefix]/write`: A function to write data into the WASM instance's memory.
+ * `[Prefix]/read`: A function to read data from the WASM instance's memory.
+ * `[Prefix]/import-resolver`: A function used to handle calls *from* the WASM module back *to* the AO environment (imports).
+* **`compute`**
+ * **Action:** Executes a function within the initialized WASM instance. It retrieves the target function name and parameters from the incoming message or process definition and calls the WASM instance via `hb_beamr`.
+ * **Inputs (Expected in Process State or Incoming Message):**
+ * `priv/[Prefix]/instance`: The handle obtained during `init`.
+ * `function` or `body/function`: The name of the WASM function to call.
+ * `parameters` or `body/parameters`: A list of parameters to pass to the WASM function.
+ * **Outputs (Stored in `results/`):**
+ * `results/[Prefix]/type`: The result type returned by the WASM function.
+ * `results/[Prefix]/output`: The actual result value returned by the WASM function.
+* **`import`**
+ * **Action:** Handles calls originating *from* the WASM module (imports). The default implementation (`default_import_resolver`) resolves these calls by treating them as sub-calls within the AO environment, allowing WASM code to invoke other AO device functions or access process state via the `hb_ao:resolve` mechanism.
+ * **Inputs (Provided by `hb_beamr`):** Module name, function name, arguments, signature.
+ * **Response:** Returns the result of the resolved AO call back to the WASM instance.
+* **`snapshot`**
+ * **Action:** Captures the current memory state of the running WASM instance. This is used for checkpointing and restoring process state.
+ * **Inputs:** `priv/[Prefix]/instance`.
+ * **Outputs:** A message containing the raw binary snapshot of the WASM memory state, typically tagged with `[Prefix]/State`.
+* **`normalize` (Internal Helper)**
+ * **Action:** Ensures a consistent state representation for computation, primarily by loading a WASM instance from a snapshot (`[Prefix]/State`) if a live instance (`priv/[Prefix]/instance`) isn't already present. This allows resuming execution from a cached state.
+* **`terminate`**
+ * **Action:** Stops and cleans up the running WASM instance associated with the process.
+ * **Inputs:** `priv/[Prefix]/instance`.
+
+## Usage within `dev_stack`
+
+The `~wasm64@1.0` device is almost always used as part of an execution stack configured in the Process Definition Message and managed by [`dev_stack`](../resources/source-code/dev_stack.md). [`dev_stack`](../resources/source-code/dev_stack.md) ensures that `init` is called on the first pass, `compute` on subsequent passes, and potentially `snapshot` or `terminate` as needed.
+
+```text
+# Example Process Definition Snippet
+Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md)
+Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "wasm64@1.0"
+WASM-Image:
+```
+
+This setup allows AO processes to leverage the computational power and language flexibility offered by WebAssembly in a decentralized, verifiable manner.
+
+[wasm module](../resources/source-code/dev_wasm.md)
diff --git a/docs/index.md b/docs/index.md
index 65c13e39f..8b1378917 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,77 +1 @@
-
-
-
-
-
-
-
-
-
-
-
-
hyperBEAM.
-
DOCUMENTATION
-
-
-
-
-!!! warning "Platform Support"
- This documentation is currently written specifically for **Ubuntu 22.04**. Support for macOS and other platforms will be added in future updates.
-
-## Overview
-
-HyperBEAM is a client implementation of the AO-Core protocol, written in Erlang. It enables a decentralized computing platform where programs run as independent processes, communicate via asynchronous message passing, and operate across a distributed network of nodes.
-
-For detailed technical information about HyperBEAM's architecture and functionality, see the [HyperBEAM Overview](hyperbeam/index.md).
-
-### What is AO-Core?
-
-AO-Core is a protocol built to enable decentralized computations, offering a series of universal primitives. Instead of enforcing a single, monolithic architecture, AO-Core provides a framework into which any number of different computational models, encapsulated as primitive and composable devices, can be attached.
-
-AO-Core's protocol is built upon the following primitives:
-
-- **Hashpaths**: A mechanism for referencing locations in a program's state-space prior to execution
-- **Unified data structure**: For representing program states as HTTP documents
-- **Attestation protocol**: For expressing attestations of states found at particular hashpaths
-- **Meta-VM**: Allowing various state transformation programs (virtual machines and computational models, expressed in the form of devices) to be executed inside the AO-Core protocol
-
-
-
-## Quick Start Guide
-
-To get started with HyperBEAM:
-
-1. [Check system requirements](getting-started/requirements.md)
-2. [Install dependencies](getting-started/installation/index.md)
-3. [Set up HyperBEAM](hyperbeam/setup.md)
-4. [Configure the Compute Unit](compute-unit/setup.md)
-5. [Verify your installation](guides/integration.md)
-
-## Documentation Structure
-
-This documentation is organized into the following sections:
-
-- **[Getting Started](getting-started/index.md)**: System requirements and installation instructions
-- **[HyperBEAM](hyperbeam/index.md)**: Core setup, configuration, and testing
-- **[Compute Unit](compute-unit/index.md)**: Setup and configuration of the CU component
-- **[Guides](guides/index.md)**: Step-by-step tutorials and walkthroughs
-- **[Reference](reference/index.md)**: API documentation and troubleshooting
-
-## Community and Support
-
-- **GitHub HyperBEAM**: [permaweb/HyperBEAM](https://github.com/permaweb/HyperBEAM)
-- **Github Local CU**: [permaweb/local-cu](https://github.com/permaweb/local-cu)
-- **Discord**: [Join the community](https://discord.gg/V3yjzrBxPM)
-- **Issues**: [File a bug report](https://github.com/permaweb/HyperBEAM/issues)
-
-## License
-
-HyperBEAM is open-source software licensed under the [MIT License](https://github.com/permaweb/HyperBEAM/blob/main/LICENSE.md).
diff --git a/docs/introduction/ao-devices.md b/docs/introduction/ao-devices.md
new file mode 100644
index 000000000..818e0d42d
--- /dev/null
+++ b/docs/introduction/ao-devices.md
@@ -0,0 +1,60 @@
+# AO Devices
+
+In AO-Core and its implementation HyperBEAM, **Devices** are modular components responsible for processing and interpreting [Messages](./what-is-ao-core.md#core-concepts). They define the specific logic for how computations are performed, data is handled, or interactions occur within the AO ecosystem.
+
+Think of Devices as specialized engines or services that can be plugged into the AO framework. This modularity is key to AO's flexibility and extensibility.
+
+## Purpose of Devices
+
+* **Define Computation:** Devices dictate *how* a message's instructions are executed. One device might run WASM code, another might manage process state, and yet another might simply relay data.
+* **Enable Specialization:** Nodes running HyperBEAM can choose which Devices to support, allowing them to specialize in certain tasks (e.g., high-compute tasks, storage-focused tasks, secure TEE operations).
+* **Promote Modularity:** New functionalities can be added to AO by creating new Devices, without altering the core protocol.
+* **Distribute Workload:** Different Devices can handle different parts of a complex task, enabling parallel processing and efficient resource utilization across the network.
+
+## Familiar Examples
+
+HyperBEAM includes many preloaded devices that provide core functionality. Some key examples include:
+
+* **[`~meta@1.0`](../devices/meta-at-1-0.md):** Configures the node itself (hardware specs, supported devices, payment info).
+* **[`~process@1.0`](../devices/process-at-1-0.md):** Manages persistent, shared computational states (like traditional smart contracts, but more flexible).
+* **[`~scheduler@1.0`](../devices/scheduler-at-1-0.md):** Handles the ordering and execution of messages within a process.
+* **[`~wasm64@1.0`](../devices/wasm64-at-1-0.md):** Executes WebAssembly (WASM) code, allowing for complex computations written in languages like Rust, C++, etc.
+* **[`~lua@5.3a`](../devices/lua-at-5-3a.md):** Executes Lua scripts.
+* **[`~relay@1.0`](../devices/relay-at-1-0.md):** Forwards messages between AO nodes or to external HTTP endpoints.
+* **[`~json@1.0`](../devices/json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs.
+* **[`~message@1.0`](../devices/message-at-1-0.md):** Manages message state and processing.
+* **[`~patch@1.0`](../guides/exposing-process-state.md):** Applies state updates directly to a process, often used for migrating or managing process data.
+
+## Beyond the Basics
+
+Devices aren't limited to just computation or state management. They can represent more abstract concepts:
+
+* **Security Devices ([`~snp@1.0`](../resources/source-code/dev_snp.md), [`dev_codec_httpsig`](../resources/source-code/dev_codec_httpsig.md)):** Handle tasks related to Trusted Execution Environments (TEEs) or message signing, adding layers of security and verification.
+* **Payment/Access Control Devices ([`~p4@1.0`](../resources/source-code/dev_p4.md), [`~faff@1.0`](../resources/source-code/dev_faff.md)):** Manage metering, billing, or access control for node services.
+* **Workflow/Utility Devices ([`dev_cron`](../resources/source-code/dev_cron.md), [`dev_stack`](../resources/source-code/dev_stack.md), [`dev_monitor`](../resources/source-code/dev_monitor.md)):** Coordinate complex execution flows, schedule tasks, or monitor process activity.
+
+## Using Devices
+
+Devices are typically invoked via [HyperPATHs](./pathing-in-ao-core.md). The path specifies which Device should interpret the subsequent parts of the path or the request body.
+
+```
+# Example: Execute the 'now' key on the process device for a specific process
+/~process@1.0/now
+
+# Example: Relay a GET request via the relay device
+/~relay@1.0/call?method=GET&path=https://example.com
+```
+
+The specific functions or 'keys' available for each Device are documented individually. See the [Devices section](../devices/index.md) for details on specific built-in devices.
+
+## The Potential of Devices
+
+The modular nature of AO Devices opens up vast possibilities for future expansion and innovation. The current set of preloaded and community devices is just the beginning. As the AO ecosystem evolves, we can anticipate the development of new devices catering to increasingly specialized needs:
+
+* **Specialized Hardware Integration:** Devices could be created to interface directly with specialized hardware accelerators like GPUs (for AI/ML tasks such as running large language models), TPUs, or FPGAs, allowing AO processes to leverage high-performance computing resources securely and verifiably.
+* **Advanced Cryptography:** New devices could implement cutting-edge cryptographic techniques, such as zero-knowledge proofs (ZKPs) or fully homomorphic encryption (FHE), enabling enhanced privacy and complex computations on encrypted data.
+* **Cross-Chain & Off-Chain Bridges:** Devices could act as secure bridges to other blockchain networks or traditional Web2 APIs, facilitating seamless interoperability and data exchange between AO and the wider digital world.
+* **AI/ML Specific Devices:** Beyond raw GPU access, specialized devices could offer higher-level AI/ML functionalities, like optimized model inference engines or distributed training frameworks.
+* **Domain-Specific Logic:** Communities or organizations could develop devices tailored to specific industries or use cases, such as decentralized finance (DeFi) primitives, scientific computing libraries, or decentralized identity management systems.
+
+The Device framework ensures that AO can adapt and grow, incorporating new technologies and computational paradigms without requiring fundamental changes to the core protocol. This extensibility is key to AO's long-term vision of becoming a truly global, decentralized computer.
diff --git a/docs/introduction/pathing-in-ao-core.md b/docs/introduction/pathing-in-ao-core.md
new file mode 100644
index 000000000..2c1a82be4
--- /dev/null
+++ b/docs/introduction/pathing-in-ao-core.md
@@ -0,0 +1,132 @@
+# Pathing in AO-Core
+
+## Overview
+
+Understanding how to construct and interpret paths in AO-Core is fundamental to working with HyperBEAM. This guide explains the structure and components of AO-Core paths, enabling you to effectively interact with processes and access their data.
+
+## HyperPATH Structure
+
+Let's examine a typical HyperBEAM endpoint piece-by-piece:
+
+```
+https://router-1.forward.computer/~process@1.0/now
+```
+
+### Node URL (`router-1.forward.computer`)
+
+The HTTP response from this node includes a signature from the host's key. By accessing the [`~snp@1.0`](../resources/source-code/dev_snp.md) device, you can verify that the node is running in a genuine Trusted Execution Environment (TEE), ensuring computation integrity. You can replace `router-1.forward.computer` with any HyperBEAM TEE node operated by any party while maintaining trustless guarantees.
+
+### Process Path (`/~process@1.0`)
+
+Every path in AO-Core represents a program. Think of the URL bar as a Unix-style command-line interface, providing access to AO's trustless and verifiable compute. Each path component (between `/` characters) represents a step in the computation. In this example, we instruct the AO-Core node to:
+
+1. Load a specific message from its caches (local, another node, or Arweave)
+2. Interpret it with the [`~process@1.0`](../devices/process-at-1-0.md) device
+3. The process device implements a shared computing environment with consistent state between users
+
+### State Access (`/now` or `/compute`)
+
+Devices in AO-Core expose keys accessible via path components. Each key executes a function on the device:
+
+- `now`: Calculates real-time process state
+- `compute`: Serves the latest known state (faster than checking for new messages)
+
+Under the surface, these keys represent AO-Core messages. As we progress through the path, AO-Core applies each message to the existing state. You can access the full process state by visiting:
+```
+/~process@1.0/now
+```
+
+### State Navigation
+
+You can browse through sub-messages and data fields by accessing them as keys. For example, if a process stores its interaction count in a field named `cache`, you can access it like this:
+```
+/~process@1.0/compute/cache
+```
+This shows the 'cache' of your process. Each response is:
+
+- A message with a signature attesting to its correctness
+- A hashpath describing its generation
+- Transferable to other AO-Core nodes for uninterrupted execution
+
+### Query Parameters and Type Casting
+
+Beyond path segments, HyperBEAM URLs can include query parameters that utilize a special type casting syntax. This allows specifying the desired data type for a parameter directly within the URL using the format `key+type=value`.
+
+- **Syntax**: A `+` symbol separates the parameter key from its intended type (e.g., `count+integer=42`, `items+list="apple",7`).
+- **Mechanism**: The HyperBEAM node identifies the `+type` suffix (e.g., `+integer`, `+list`, `+map`, `+float`, `+atom`, `+resolve`). It then uses internal functions ([`hb_singleton:maybe_typed`](../resources/source-code/hb_singleton.md) and [`dev_codec_structured:decode_value`](../resources/source-code/dev_codec_structured.md)) to decode and cast the provided value string into the corresponding Erlang data type before incorporating it into the message.
+- **Supported Types**: Common types include `integer`, `float`, `list`, `map`, `atom`, `binary` (often implicit), and `resolve` (for path resolution). List values often follow the [HTTP Structured Fields format (RFC 8941)](https://www.rfc-editor.org/rfc/rfc8941.html).
+
+This powerful feature enables the expression of complex data structures directly in URLs.
+
+## Examples
+
+The following examples illustrate using HyperPATH with various AO-Core processes and devices. While these cover a few specific use cases, HyperBEAM's extensible nature allows interaction with any device or process via HyperPATH. For a deeper understanding, we encourage exploring the [source code](https://github.com/permaweb/hyperbeam) and experimenting with different paths.
+
+### Example 1: Accessing Full Process State
+
+To get the complete, real-time state of a process identified by ``, use the `/now` path component with the [`~process@1.0`](../devices/process-at-1-0.md) device:
+
+```
+GET /~process@1.0/now
+```
+
+This instructs the AO-Core node to load the process and execute the `now` function on the [`~process@1.0`](../devices/process-at-1-0.md) device.
+
+### Example 2: Navigating to Specific Process Data
+
+If a process maintains its state in a map and you want to access a specific field, like `at-slot`, using the faster `/compute` endpoint:
+
+```
+GET /~process@1.0/compute/cache
+```
+
+This accesses the `compute` key on the [`~process@1.0`](../devices/process-at-1-0.md) device and then navigates to the `cache` key within the resulting state map. Using this path, you will see the latest 'cache' of your process (the number of interactions it has received). Every piece of relevant information about your process can be accessed similarly, effectively providing a native API.
+
+(Note: This represents direct navigation within the process state structure. For accessing data specifically published via the `~patch@1.0` device, see the documentation on [Exposing Process State](../build/exposing-process-state.md), which typically uses the `/cache/` path.)
+
+### Example 3: Basic `~message@1.0` Usage
+
+Here's a simple example of using [`~message@1.0`](../devices/message-at-1-0.md) to create a message and retrieve a value:
+
+```
+GET /~message@1.0&greeting="Hello"&count+integer=42/count
+```
+
+1. **Base:** `/` - The base URL of the HyperBEAM node.
+2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md)
+3. **Query Params:** `greeting="Hello"` (binary) and `count+integer=42` (integer), forming the message `#{ <<"greeting">> => <<"Hello">>, <<"count">> => 42 }`.
+4. **Path:** `/count` tells `~message@1.0` to retrieve the value associated with the key `count`.
+
+**Response:** The integer `42`.
+
+### Example 4: Using the `~message@1.0` Device with Type Casting
+
+The [`~message@1.0`](../devices/message-at-1-0.md) device can be used to construct and query transient messages, utilizing type casting in query parameters.
+
+Consider the following URL:
+
+```
+GET /~message@1.0&name="Alice"&age+integer=30&items+list="apple",1,"banana"&config+map=key1="val1";key2=true/[PATH]
+```
+
+HyperBEAM processes this as follows:
+
+1. **Base:** `/` - The base URL of the HyperBEAM node.
+2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md)
+3. **Query Parameters (with type casting):**
+ * `name="Alice"` -> `#{ <<"name">> => <<"Alice">> }` (binary)
+ * `age+integer=30` -> `#{ <<"age">> => 30 }` (integer)
+ * `items+list="apple",1,"banana"` -> `#{ <<"items">> => [<<"apple">>, 1, <<"banana">>] }` (list)
+ * `config+map=key1="val1";key2=true` -> `#{ <<"config">> => #{<<"key1">> => <<"val1">>, <<"key2">> => true} }` (map)
+4. **Initial Message Map:** A combination of the above key-value pairs.
+5. **Path Evaluation:**
+ * If `[PATH]` is `/items/1`, the response is the integer `1`.
+ * If `[PATH]` is `/config/key1`, the response is the binary `<<"val1">>`.
+
+## Best Practices
+
+1. Always verify cryptographic signatures on responses
+2. Use appropriate caching strategies for frequently accessed data
+3. Implement proper error handling for network requests
+4. Consider rate limits and performance implications
+5. Keep sensitive data secure and use appropriate authentication methods
\ No newline at end of file
diff --git a/docs/introduction/what-is-ao-core.md b/docs/introduction/what-is-ao-core.md
new file mode 100644
index 000000000..11e59a6bc
--- /dev/null
+++ b/docs/introduction/what-is-ao-core.md
@@ -0,0 +1,22 @@
+# What is AO-Core?
+
+AO-Core is the foundational protocol underpinning the [AO Computer](https://ao.arweave.net). It defines a minimal, generalized model for decentralized computation built around standard web technologies like HTTP. Think of it as a way to interpret the Arweave permaweb not just as static storage, but as a dynamic, programmable, and infinitely scalable computing environment.
+
+## Core Concepts
+
+AO-Core revolves around three fundamental components:
+
+1. **Messages:** The smallest units of data and computation. Messages can be simple data blobs or maps of named functions. They are the primary means of communication and triggering execution within the system. Messages are cryptographically linked, forming a verifiable computation graph.
+2. **Devices:** Modules responsible for interpreting and processing messages. Each device defines specific logic for how messages are handled (e.g., executing WASM, storing data, relaying information). This modular design allows nodes to specialize and the system to be highly extensible.
+3. **Paths:** Structures that link messages over time, creating a verifiable history of computations. Paths allow users to navigate the computation graph and access specific states or results. They leverage `HashPaths`, cryptographic fingerprints representing the sequence of operations leading to a specific message state, ensuring traceability and integrity.
+
+## Key Principles
+
+* **Minimalism:** AO-Core provides the simplest possible representation of data and computation, avoiding prescriptive consensus mechanisms or specific VM requirements.
+* **HTTP Native:** Designed for compatibility with HTTP protocols, making it accessible via standard web tools and infrastructure.
+* **Scalability:** By allowing parallel message processing and modular device execution, AO-Core enables hyper-parallel computing, overcoming the limitations of traditional sequential blockchains.
+* **Permissionlessness & Trustlessness:** While AO-Core itself is minimal, it provides the framework upon which higher-level protocols like AO can build systems that allow anyone to participate (`permissionlessness`) without needing to trust intermediaries (`trustlessness`). Users can choose their desired security and performance trade-offs.
+
+AO-Core transforms the permanent data storage of Arweave into a global, shared computation space, enabling the creation of complex, autonomous, and scalable decentralized applications.
+
+
\ No newline at end of file
diff --git a/docs/introduction/what-is-hyperbeam.md b/docs/introduction/what-is-hyperbeam.md
new file mode 100644
index 000000000..3d9f669c4
--- /dev/null
+++ b/docs/introduction/what-is-hyperbeam.md
@@ -0,0 +1,40 @@
+# What is HyperBEAM?
+
+HyperBEAM is the primary, production-ready implementation of the [AO-Core protocol](./what-is-ao-core.md), built on the robust Erlang/OTP framework. It serves as a decentralized operating system, powering the AO Computer—a scalable, trust-minimized, distributed supercomputer built on permanent storage. HyperBEAM provides the runtime environment and essential services to execute AO-Core computations across a network of distributed nodes.
+
+## Why HyperBEAM Matters
+
+HyperBEAM transforms the abstract concepts of AO-Core—such as [Messages](./what-is-ao-core.md#core-concepts), [Devices](./what-is-ao-core.md#core-concepts), and [Paths](./what-is-ao-core.md#core-concepts)—into a concrete, operational system. Here's why it's pivotal to the AO ecosystem:
+
+- **Modularity via Devices:** HyperBEAM introduces a uniquely modular architecture centered around [Devices](./ao-devices.md). These pluggable components define specific computational logic (like running WASM, managing state, or relaying data), allowing for unprecedented flexibility, specialization, and extensibility in a decentralized system.
+- **Decentralized OS:** It equips nodes with the infrastructure to join the AO network, manage resources, execute computations, and communicate seamlessly.
+- **Erlang/OTP Powerhouse:** Leveraging the BEAM virtual machine, HyperBEAM inherits Erlang's concurrency, fault tolerance, and scalability—perfect for distributed systems with lightweight processes and message passing.
+- **Hardware Independence:** It abstracts underlying hardware, allowing diverse nodes to contribute resources without compatibility issues.
+- **Node Coordination:** It governs how nodes join the network, offer services through specific Devices, and interact with one another.
+- **Verifiable Computation:** Through hashpaths and the Converge Protocol, HyperBEAM ensures computation results are cryptographically verified and trustworthy.
+
+In essence, HyperBEAM is the engine that drives the AO Computer, enabling a vision of decentralized, verifiable computing at scale.
+
+## Core Components & Features
+
+- **Pluggable Devices:** The heart of HyperBEAM's extensibility. It includes essential built-in devices like [`~meta`](../devices/meta-at-1-0.md), [`~relay`](../devices/relay-at-1-0.md), [`~process`](../devices/process-at-1-0.md), [`~scheduler`](../devices/scheduler-at-1-0.md), and [`~wasm64`](../devices/wasm64-at-1-0.md) for core functionality, but the system is designed for easy addition of new custom devices.
+- **Message System:** Everything in HyperBEAM is a "Message"—a map of named functions or binary data that can be processed, transformed, and cryptographically verified.
+- **HTTP Interface:** Nodes expose an HTTP server for interaction via standard web requests and HyperPATHs, structured URLs that represent computation paths.
+- **Modularity:** Its design supports easy extension, allowing new devices and functionalities to be added effortlessly.
+
+## Architecture
+
+* **Initialization Flow:** When a HyperBEAM node starts, it initializes the name service, scheduler registry, timestamp server, and HTTP server, establishing core services for process management, timing, communication, and storage.
+* **Compute Model:** Computation follows the pattern \`Message1(Message2) => Message3\`, where messages are resolved through their devices and [paths](./pathing-in-ao-core.md). The integrity and history of these computations are ensured by **hashpaths**, which serves as a cryptographic audit trail.
+* **Scheduler System:** The scheduler component manages execution order using ["slots"](../devices/scheduler-at-1-0.md#slot-system) — sequential positions that guarantee deterministic computation.
+* **Process Slots:** Each process has numbered slots starting from 0 that track message execution order, ensuring consistent computation even across distributed nodes.
+
+## HTTP API and Pathing
+
+HyperBEAM exposes a powerful HTTP API that allows for interacting with processes and accessing data through structured URL patterns. We call URLs that represent computation paths "HyperPATHs". The URL bar effectively functions as a command-line interface for AO's trustless and verifiable compute.
+
+For a comprehensive guide on constructing and interpreting paths in AO-Core, including detailed examples and best practices, see [Pathing in AO-Core](./pathing-in-ao-core.md).
+
+In essence, HyperBEAM is the engine that powers the AO Computer, enabling the vision of a scalable, trust-minimized, decentralized supercomputer built on permanent storage.
+
+*See also: [HyperBEAM GitHub Repository](https://github.com/permaweb/HyperBEAM)*
diff --git a/docs/js/custom-header.js b/docs/js/custom-header.js
new file mode 100644
index 000000000..0f84e44b1
--- /dev/null
+++ b/docs/js/custom-header.js
@@ -0,0 +1,75 @@
+(function () {
+ let currentPath = window.location.pathname;
+
+ function updateHeaderAndMainClass() {
+ const header = document.querySelector(".md-header");
+ const main = document.querySelector("main");
+ const tabs = document.querySelector(".md-tabs");
+
+ const segments = window.location.pathname.split("/").filter(Boolean);
+ const arweavePath = segments.length === 1 && segments[0].length === 43;
+ const isHomepage = segments.length === 0 || arweavePath;
+
+ if (!header || !main) return;
+
+ if (isHomepage) {
+ header.classList.add("custom-homepage-header");
+ main.classList.add("custom-homepage-main");
+ main.classList.remove("md-main");
+ if (tabs) tabs.style.display = "none";
+ } else {
+ header.classList.remove("custom-homepage-header");
+ main.classList.remove("custom-homepage-main");
+ main.classList.add("md-main");
+ if (tabs) tabs.style.display = "";
+ }
+ }
+
+ // Initial run
+ updateHeaderAndMainClass();
+
+ // Watch for URL changes
+ const observer = new MutationObserver(() => {
+ if (window.location.pathname !== currentPath) {
+ currentPath = window.location.pathname;
+ updateHeaderAndMainClass();
+ }
+ });
+
+ observer.observe(document.body, { childList: true, subtree: true });
+
+ window.addEventListener("popstate", updateHeaderAndMainClass);
+})();
+
+document.addEventListener("DOMContentLoaded", function () {
+ function updateMainClass() {
+ const mainElement = document.querySelector("main");
+ const isHomepage = window.location.pathname === "/";
+
+ // Apply the homepage class if on the homepage, else remove it
+ if (isHomepage) {
+ mainElement.classList.add("custom-homepage-main");
+ mainElement.classList.remove("md-main");
+ } else {
+ mainElement.classList.add("md-main");
+ mainElement.classList.remove("custom-homepage-main");
+ }
+ }
+
+ // Initial update on page load
+ updateMainClass();
+
+ // Listen for link clicks and update the class after navigation
+ const links = document.querySelectorAll("a");
+ links.forEach((link) => {
+ link.addEventListener("click", function (event) {
+ // Small delay to ensure the page has started loading
+ setTimeout(updateMainClass, 0);
+ });
+ });
+
+ // Listen for popstate events (back/forward navigation)
+ window.addEventListener("popstate", function () {
+ setTimeout(updateMainClass, 500);
+ });
+});
diff --git a/docs/js/header-scroll.js b/docs/js/header-scroll.js
new file mode 100644
index 000000000..1a27e8e35
--- /dev/null
+++ b/docs/js/header-scroll.js
@@ -0,0 +1,91 @@
+document.addEventListener('DOMContentLoaded', function() {
+ const header = document.querySelector('.md-header');
+ const paddingTargetElement = document.querySelector('.md-content'); // Element for padding adjustments
+ const contentVisibilityTargetElement = document.querySelector('.md-main__inner.md-grid'); // Element to hide/show with transition
+
+ if (!header || !paddingTargetElement || !contentVisibilityTargetElement) {
+ console.error('Header scroll: Required elements (.md-header, .md-content, or .md-main__inner.md-grid) not found.');
+ return;
+ }
+
+ const HIDING_CLASS = 'content--initializing';
+
+ // Function to inject CSS for transition and initial hiding
+ function injectTransitionStyles() {
+ const styleId = 'md-content-transition-style';
+ if (document.getElementById(styleId)) {
+ return; // Style already added
+ }
+ const styleElement = document.createElement('style');
+ styleElement.id = styleId;
+ styleElement.textContent = `
+ .md-main__inner.md-grid { /* Style for the element to be shown with transition */
+ opacity: 0;
+ transition: opacity 200ms ease-in-out; /* Tiny transition */
+ }
+ .${HIDING_CLASS} { /* Class to initially hide the content */
+ display: none !important;
+ opacity: 0 !important; /* Ensure opacity is 0 when hidden */
+ }
+ `;
+ document.head.appendChild(styleElement);
+ }
+
+ // Initially hide the content and set up for transition
+ injectTransitionStyles();
+ contentVisibilityTargetElement.classList.add(HIDING_CLASS);
+
+ let headerHeight = 0;
+
+ // Function to update paddings based on header state
+ function updatePaddings() {
+ const currentHeaderHeight = header.offsetHeight;
+ if (currentHeaderHeight > 0) {
+ headerHeight = currentHeaderHeight;
+ }
+
+ if (header.classList.contains('header-hidden')) {
+ if (paddingTargetElement) paddingTargetElement.style.paddingTop = '75px';
+ document.documentElement.style.scrollPaddingTop = '0';
+ } else {
+ if (paddingTargetElement) paddingTargetElement.style.paddingTop = headerHeight + 'px';
+ document.documentElement.style.scrollPaddingTop = headerHeight + 'px';
+ }
+ }
+
+ // Function to initialize header state and reveal content
+ function initializeHeaderState() {
+ headerHeight = header.offsetHeight;
+ updatePaddings(); // Apply padding to paddingTargetElement
+
+ // Make content displayable (it's still opacity 0 due to injected styles)
+ contentVisibilityTargetElement.classList.remove(HIDING_CLASS);
+
+ // Trigger the opacity transition to fade in the content
+ requestAnimationFrame(() => {
+ contentVisibilityTargetElement.style.opacity = 1;
+ });
+ }
+
+ window.addEventListener('load', initializeHeaderState);
+
+ window.addEventListener('scroll', function() {
+ const scrollTop = window.scrollY || document.documentElement.scrollTop;
+
+ if (scrollTop > 0) { // When scrolling down / header should be hidden
+ if (!header.classList.contains('header-hidden')) {
+ header.classList.add('header-hidden');
+ updatePaddings();
+ }
+ } else { // When at the top / header should be visible
+ if (header.classList.contains('header-hidden')) {
+ header.classList.remove('header-hidden');
+ updatePaddings();
+ }
+ }
+ });
+
+ window.addEventListener('resize', function() {
+ updatePaddings();
+ });
+});
\ No newline at end of file
diff --git a/docs/js/parallax.js b/docs/js/parallax.js
new file mode 100644
index 000000000..a09767c5f
--- /dev/null
+++ b/docs/js/parallax.js
@@ -0,0 +1,39 @@
+document.addEventListener("DOMContentLoaded", () => {
+ const header = document.querySelector(".custom-homepage-header");
+ const scrollContainer = document.querySelector(".custom-homepage-main");
+
+ if (!header || !scrollContainer)
+ return console.log("Missing header or scroll container");
+
+ let needsUpdate = false;
+
+ function updateHeaderFade() {
+ needsUpdate = false;
+ const scrollTop = scrollContainer.scrollTop;
+
+ const fadeStart = window.innerHeight * 1.35; // fade starts 80% into hero
+ const fadeEnd = window.innerHeight * 1.45; // fade finishes at 120%
+
+ let opacity;
+ if (scrollTop <= fadeStart) {
+ opacity = 0;
+ } else if (scrollTop >= fadeEnd) {
+ opacity = 1;
+ } else {
+ opacity = (scrollTop - fadeStart) / (fadeEnd - fadeStart);
+ }
+
+ header.style.backgroundColor = `rgba(255, 255, 255, ${opacity})`;
+ header.style.filter = `invert(${1 - opacity})`;
+ }
+
+ scrollContainer.addEventListener("scroll", () => {
+ if (!needsUpdate) {
+ needsUpdate = true;
+ requestAnimationFrame(updateHeaderFade);
+ }
+ });
+
+ window.addEventListener("resize", updateHeaderFade);
+ requestAnimationFrame(updateHeaderFade); // run on load
+});
diff --git a/docs/js/toc-highlight.js b/docs/js/toc-highlight.js
new file mode 100644
index 000000000..1d4e1416d
--- /dev/null
+++ b/docs/js/toc-highlight.js
@@ -0,0 +1,124 @@
+document.addEventListener("DOMContentLoaded", function () {
+ /**
+ * Fixes navigation highlighting in MkDocs Material Theme:
+ * 1. If a list item has both an active label and an active link, remove active from label
+ * 2. If a parent item has active children, remove active from the parent's links
+ * 3. When scroll position is at the top, reactivate the parent navigation item
+ */
+ function fixNavigationHighlighting() {
+ // First fix case where both label and anchor in same item are active
+ document.querySelectorAll(".md-nav__item").forEach(function (item) {
+ const label = item.querySelector("label.md-nav__link--active");
+ const link = item.querySelector("a.md-nav__link--active");
+
+ // If both exist in the same item, keep only the link active
+ if (label && link) {
+ label.classList.remove("md-nav__link--active");
+ }
+ });
+
+ // Check if scroll position is at the top
+ const atTop = window.scrollY === 0;
+
+ // Now fix nested navigation (parent sections shouldn't be active when children are, unless at top)
+ document
+ .querySelectorAll(".md-nav__item--active")
+ .forEach(function (activeItem) {
+ // Check if this active item contains other active items
+ const hasActiveChildren = activeItem.querySelector(
+ ".md-nav__link--active",
+ );
+
+ // console.log("Has Active Children:", hasActiveChildren);
+
+ if (hasActiveChildren && !atTop) {
+ // Remove active class from parent's links
+ const parentLinks = activeItem.querySelectorAll(
+ ":scope > a.md-nav__link--active, :scope > label.md-nav__link--active",
+ );
+ parentLinks.forEach(function (link) {
+ link.classList.remove("md-nav__link--active");
+ });
+ } else if (!hasActiveChildren && atTop) {
+ // Reactivate parent link if at top and no active children
+ const parentLinks = activeItem.querySelectorAll(
+ ":scope > a.md-nav__link, :scope > label.md-nav__link",
+ );
+ parentLinks.forEach(function (link) {
+ link.classList.add("md-nav__link--active");
+ });
+ }
+ });
+ }
+
+ // Initial run
+ fixNavigationHighlighting();
+
+ // Set up a mutation observer to detect changes
+ const observer = new MutationObserver(function (mutations) {
+ let shouldUpdate = false;
+
+ for (const mutation of mutations) {
+ if (
+ mutation.type === "attributes" &&
+ mutation.attributeName === "class" &&
+ (mutation.target.classList.contains("md-nav__link--active") ||
+ mutation.target.classList.contains("md-nav__item--active"))
+ ) {
+ shouldUpdate = true;
+ break;
+ }
+ }
+
+ if (shouldUpdate) {
+ fixNavigationHighlighting();
+ }
+ });
+
+ // Observe all navigation elements
+ document
+ .querySelectorAll(".md-nav__item, .md-nav__link")
+ .forEach(function (el) {
+ observer.observe(el, { attributes: true });
+ });
+
+ // Update on navigation events
+ window.addEventListener("popstate", function () {
+ setTimeout(fixNavigationHighlighting, 100);
+ });
+
+ window.addEventListener("load", fixNavigationHighlighting);
+
+ // Update on scroll with throttling
+ let scrollTimeout;
+ window.addEventListener("scroll", function () {
+ if (!scrollTimeout) {
+ scrollTimeout = setTimeout(function () {
+ fixNavigationHighlighting();
+ scrollTimeout = null;
+ }, 50);
+ }
+ });
+
+ document.addEventListener("click", function (e) {
+ if (e.target.closest(".md-nav__link")) {
+ setTimeout(fixNavigationHighlighting, 50);
+ }
+ });
+
+ // Add click event handling for navigation tabs
+ const tabLinks = document.querySelectorAll('nav.md-tabs .md-tabs__list .md-tabs__item a');
+ tabLinks.forEach(link => {
+ link.addEventListener('click', function(event) {
+ // Basic check if it's an internal link
+ if (link.hostname === window.location.hostname || !link.hostname.length) {
+ console.log('Tab clicked, forcing full reload for:', link.href);
+ console.log('Updating navigation highlighting before navigation');
+ fixNavigationHighlighting();
+ event.preventDefault();
+ event.stopPropagation();
+ window.location.href = link.href;
+ }
+ }, true); // Use capture phase to catch the event before the theme's handler
+ });
+});
diff --git a/docs/js/utc-time.js b/docs/js/utc-time.js
new file mode 100644
index 000000000..13ab1fb74
--- /dev/null
+++ b/docs/js/utc-time.js
@@ -0,0 +1,21 @@
+window.addEventListener("DOMContentLoaded", () => {
+ const timeDiv = document.createElement("div");
+ timeDiv.id = "utc-time";
+ timeDiv.style.cssText = `
+ font-size: clamp(0.4rem, 1.5vw, 0.5rem);
+ user-select: none;
+ `;
+
+ const updateTime = () => {
+ const now = new Date();
+ timeDiv.textContent = "UTC " + now.toISOString().substring(11, 19); // HH:MM:SS
+ };
+
+ updateTime();
+ setInterval(updateTime, 1000); // update every second
+
+ const headerOptions = document.querySelector(".md-header__option");
+ if (headerOptions) {
+ headerOptions.replaceWith(timeDiv); // Replace existing element
+ }
+});
diff --git a/docs/llms-full.txt b/docs/llms-full.txt
new file mode 100644
index 000000000..29c722720
--- /dev/null
+++ b/docs/llms-full.txt
@@ -0,0 +1,21424 @@
+Generated: 2025-05-15T13:32:25Z
+
+--- START OF FILE: docs/build/exposing-process-state.md ---
+# Exposing Process State with the Patch Device
+
+The [`~patch@1.0`](../resources/source-code/dev_patch.md) device provides a mechanism for AO processes to expose parts of their internal state, making it readable via direct HTTP GET requests along the process's HyperPATH.
+
+## Why Use the Patch Device?
+
+Standard AO process execution typically involves sending a message to a process, letting it compute, and then potentially reading results from its outbox or state after the computation is scheduled and finished. This is asynchronous.
+
+The `patch` device allows for a more direct, synchronous-like read pattern. A process can use it to "patch" specific data elements from its internal state into a location that becomes directly accessible via a HyperPATH GET request *before* the full asynchronous scheduling might complete.
+
+This is particularly useful for:
+
+* **Web Interfaces:** Building frontends that need to quickly read specific data points from an AO process without waiting for a full message round-trip.
+* **Data Feeds:** Exposing specific metrics or state variables for monitoring or integration with other systems.
+* **Caching:** Allowing frequently accessed data to be retrieved efficiently via simple HTTP GETs.
+
+## How it Works
+
+1. **Process Logic:** Inside your AO process code (e.g., in Lua or WASM), when you want to expose data, you construct an **Outbound Message** targeted at the [`~patch@1.0`](../resources/source-code/dev_patch.md) device.
+2. **Patch Message Format:** This outbound message typically includes tags that specify:
+ * `device = 'patch@1.0'`
+ * A `cache` tag containing a table. The **keys** within this table become the final segments in the HyperPATH used to access the data, and the **values** are the data itself.
+ * Example Lua using `aos`: `Send({ Target = ao.id, device = 'patch@1.0', cache = { mydatakey = MyValue } })`
+3. **HyperBEAM Execution:** When HyperBEAM executes the process schedule and encounters this outbound message:
+ * It invokes the `dev_patch` module.
+ * `dev_patch` inspects the message.
+ * It takes the keys from the `cache` table (`mydatakey` in the example) and their associated values (`MyValue`) and makes these values available under the `/cache/` path segment.
+4. **HTTP Access:** You (or any HTTP client) can now access this data directly using a GET request:
+ ```
+ GET /~process@1.0/compute/cache/
+ # Or potentially using /now/
+ GET /~process@1.0/now/cache/
+ ```
+ The HyperBEAM node serving the request will resolve the path up to `/compute/cache` (or `/now/cache`), then use the logic associated with the patched data (`mydatakey`) to return the `MyValue` directly.
+
+## Initial State Sync (Optional)
+
+It can be beneficial to expose the initial state of your process via the `patch` device as soon as the process is loaded or spawned. This makes key data points immediately accessible via HTTP GET requests without requiring an initial interaction message to trigger a `Send` to the patch device.
+
+This pattern typically involves checking a flag within your process state to ensure the initial sync only happens once. Here's an example from the Token Blueprint, demonstrating how to sync `Balances` and `TotalSupply` right after the process starts:
+
+```lua
+-- Place this logic at the top level of your process script,
+-- outside of specific handlers, so it runs on load.
+
+-- Initialize the sync flag if it doesn't exist
+InitialSync = InitialSync or 'INCOMPLETE'
+
+-- Sync state on spawn/load if not already done
+if InitialSync == 'INCOMPLETE' then
+ -- Send the relevant state variables to the patch device
+ Send({ device = 'patch@1.0', cache = { balances = Balances, totalsupply = TotalSupply } })
+ -- Update the flag to prevent re-syncing on subsequent executions
+ InitialSync = 'COMPLETE'
+ print("Initial state sync complete. Balances and TotalSupply patched.")
+end
+```
+
+**Explanation:**
+
+1. `InitialSync = InitialSync or 'INCOMPLETE'`: This line ensures the `InitialSync` variable exists in the process state, initializing it to `'INCOMPLETE'` if it's the first time the code runs.
+2. `if InitialSync == 'INCOMPLETE' then`: The code proceeds only if the initial sync hasn't been marked as complete.
+3. `Send(...)`: The relevant state (`Balances`, `TotalSupply`) is sent to the `patch` device, making it available under `/cache/balances` and `/cache/totalsupply`.
+4. `InitialSync = 'COMPLETE'`: The flag is updated, so this block won't execute again in future message handlers within the same process lifecycle.
+
+This ensures that clients or frontends can immediately query essential data like token balances as soon as the process ID is known, improving the responsiveness of applications built on AO.
+
+## Example (Lua in `aos`)
+
+```lua
+-- In your process code (e.g., loaded via .load)
+Handlers.add(
+ "PublishData",
+ Handlers.utils.hasMatchingTag("Action", "PublishData"),
+ function (msg)
+ local dataToPublish = "Some important state: " .. math.random()
+ -- Expose 'currentstatus' key under the 'cache' path
+ Send({ device = 'patch@1.0', cache = { currentstatus = dataToPublish } })
+ print("Published data to /cache/currentstatus")
+ end
+)
+
+-- Spawning and interacting
+[aos]> MyProcess = spawn(MyModule)
+
+[aos]> Send({ Target = MyProcess, Action = "PublishData" })
+-- Wait a moment for scheduling
+
+```
+
+## Avoiding Key Conflicts
+
+When defining keys within the `cache` table (e.g., `cache = { mydatakey = MyValue }`), these keys become path segments under `/cache/` (e.g., `/compute/cache/mydatakey` or `/now/cache/mydatakey`). It's important to choose keys that do not conflict with existing, reserved path segments used by HyperBEAM or the `~process` device itself for state access.
+
+Using reserved keywords as your cache keys can lead to routing conflicts or prevent you from accessing your patched data as expected. While the exact list can depend on device implementations, it's wise to avoid keys commonly associated with state access, such as: `now`, `compute`, `state`, `info`, `test`.
+
+It's recommended to use descriptive and specific keys for your cached data to prevent clashes with the underlying HyperPATH routing mechanisms. For example, instead of `cache = { state = ... }`, prefer `cache = { myappstate = ... }` or `cache = { usercount = ... }`.
+
+!!! warning
+ Be aware that HTTP path resolution is case-insensitive and automatically normalizes paths to lowercase. While the `patch` device itself stores keys with case sensitivity (e.g., distinguishing `MyKey` from `mykey`), accessing them via an HTTP GET request will treat `/cache/MyKey` and `/cache/mykey` as the same path. This means that using keys that only differ in case (like `MyKey` and `mykey` in your `cache` table) will result in unpredictable behavior or data overwrites when accessed via HyperPATH. To prevent these issues, it is **strongly recommended** to use **consistently lowercase keys** within the `cache` table (e.g., `mykey`, `usercount`, `appstate`).
+
+## Key Points
+
+* **Path Structure:** The data is exposed under the `/cache/` path segment. The tag name you use *inside* the `cache` table in the `Send` call (e.g., `currentstatus`) becomes the final segment in the accessible HyperPATH (e.g., `/compute/cache/currentstatus`).
+* **Data Types:** The `patch` device typically handles basic data types (strings, numbers) within the `cache` table effectively. Complex nested tables might require specific encoding or handling.
+* **`compute` vs `now`:** Accessing patched data via `/compute/cache/...` typically serves the last known patched value quickly. Accessing via `/now/cache/...` might involve more computation to ensure the absolute latest state before checking for the patched key under `/cache/`.
+* **Not a Replacement for State:** Patching is primarily for *exposing* reads. It doesn't replace the core state management within your process handler logic.
+
+By using the `patch` device, you can make parts of your AO process state easily and efficiently readable over standard HTTP, bridging the gap between decentralized computation and web-based applications.
+--- END OF FILE: docs/build/exposing-process-state.md ---
+
+--- START OF FILE: docs/build/extending-hyperbeam.md ---
+# Extending HyperBEAM
+
+HyperBEAM's modular design, built on AO-Core principles and Erlang/OTP, makes it highly extensible. You can add new functionalities or modify existing behaviors primarily by creating new **Devices** or implementing **Pre/Post-Processors**.
+
+!!! warning "Advanced Topic"
+ Extending HyperBEAM requires a good understanding of Erlang/OTP, the AO-Core protocol, and HyperBEAM's internal architecture. This guide provides a high-level overview; detailed implementation requires deeper exploration of the source code.
+
+## Approach 1: Creating New Devices
+
+This is the most common way to add significant new capabilities.
+A Device is essentially an Erlang module (typically named `dev_*.erl`) that processes AO-Core messages.
+
+**Steps:**
+
+1. **Define Purpose:** Clearly define what your device will do. What kind of messages will it process? What state will it manage (if any)? What functions (keys) will it expose?
+2. **Create Module:** Create a new Erlang module (e.g., `src/dev_my_new_device.erl`).
+3. **Implement `info/0..2` (Optional but Recommended):** Define an `info` function to signal capabilities and requirements to HyperBEAM (e.g., exported keys, variant/version ID).
+ ```erlang
+ info() ->
+ #{
+ variant => <<"MyNewDevice/1.0">>,
+ exports => [<<"do_something">>, <<"get_status">>]
+ }.
+ ```
+4. **Implement Key Functions:** Create Erlang functions corresponding to the keys your device exposes. These functions typically take `StateMessage`, `InputMessage`, and `Environment` as arguments and return `{ok, NewMessage}` or `{error, Reason}`.
+ ```erlang
+ do_something(StateMsg, InputMsg, Env) ->
+ % ... perform action based on InputMsg ...
+ NewState = ..., % Calculate new state
+ {ok, NewState}.
+
+ get_status(StateMsg, _InputMsg, _Env) ->
+ % ... read status from StateMsg ...
+ StatusData = ...,
+ {ok, StatusData}.
+ ```
+5. **Handle State (If Applicable):** Devices can be stateless or stateful. Stateful devices manage their state within the `StateMessage` passed between function calls.
+6. **Register Device:** Ensure HyperBEAM knows about your device. This might involve adding it to build configurations or potentially a dynamic registration mechanism if available.
+7. **Testing:** Write EUnit tests for your device's functions.
+
+**Example Idea:** A device that bridges to another blockchain network, allowing AO processes to read data or trigger transactions on that chain.
+
+## Approach 2: Building Pre/Post-Processors
+
+Pre/post-processors allow you to intercept incoming requests *before* they reach the target device/process (`preprocess`) or modify the response *after* execution (`postprocess`). These are often implemented using the `dev_stack` device or specific hooks within the request handling pipeline.
+
+**Use Cases:**
+
+* **Authentication/Authorization:** Checking signatures or permissions before allowing execution.
+* **Request Modification:** Rewriting requests, adding metadata, or routing based on specific criteria.
+* **Response Formatting:** Changing the structure or content type of the response.
+* **Metering/Logging:** Recording request details or charging for usage before or after execution.
+
+**Implementation:**
+
+Processors often involve checking specific conditions (like request path or headers) and then either:
+
+a. Passing the request through unchanged.
+b. Modifying the request/response message structure.
+c. Returning an error or redirect.
+
+
+**Example Idea:** A preprocessor that automatically adds a timestamp tag to all incoming messages for a specific process.
+
+
+## Approach 3: Custom Routing Strategies
+
+While `dev_router` provides basic strategies (round-robin, etc.), you could potentially implement a custom load balancing or routing strategy module that `dev_router` could be configured to use. This would involve understanding the interfaces expected by `dev_router`.
+
+**Example Idea:** A routing strategy that queries worker nodes for their specific capabilities before forwarding a request.
+
+## Getting Started
+
+1. **Familiarize Yourself:** Deeply understand Erlang/OTP and the HyperBEAM codebase (`src/` directory), especially [`hb_ao.erl`](../resources/source-code/hb_ao.md), [`hb_message.erl`](../resources/source-code/hb_message.md), and existing `dev_*.erl` modules relevant to your idea.
+2. **Study Examples:** Look at simple devices like `dev_patch.erl` or more complex ones like `dev_process.erl` to understand patterns.
+3. **Start Small:** Implement a minimal version of your idea first.
+4. **Test Rigorously:** Use `rebar3 eunit` extensively.
+5. **Engage Community:** Ask questions in developer channels if you get stuck.
+
+Extending HyperBEAM allows you to tailor the AO network's capabilities to specific needs, contributing to its rich and evolving ecosystem.
+
+--- END OF FILE: docs/build/extending-hyperbeam.md ---
+
+--- START OF FILE: docs/build/get-started-building-on-ao-core.md ---
+# Getting Started Building on AO-Core
+
+Welcome to building on AO, the decentralized supercomputer!
+
+AO combines the permanent storage of Arweave with the flexible, scalable computation enabled by the AO-Core protocol and its HyperBEAM implementation. This allows you to create truly autonomous applications, agents, and services that run trustlessly and permissionlessly.
+
+## Core Idea: Processes & Messages
+
+At its heart, building on AO involves:
+
+1. **Creating Processes:** Think of these as independent programs or stateful contracts. Each process has a unique ID and maintains its own state.
+2. **Sending Messages:** You interact with processes by sending them messages. These messages trigger computations, update state, or cause the process to interact with other processes or the outside world.
+
+Messages are processed by [Devices](../begin/ao-devices.md), which define *how* the computation happens (e.g., running WASM code, executing Lua scripts, managing state transitions).
+
+## Starting `aos`: Your Development Environment
+
+The primary tool for interacting with AO and developing processes is `aos`, a command-line interface and development environment.
+
+=== "npm"
+ ```bash
+ npm i -g https://get_ao.arweave.net
+ ```
+
+=== "bun"
+ ```bash
+ bun install -g https://get_ao.arweave.net
+ ```
+
+=== "pnpm"
+ ```bash
+ pnpm add -g https://get_ao.arweave.net
+ ```
+
+**Starting `aos`:**
+
+Simply run the command in your terminal:
+
+```bash
+aos
+```
+
+This connects you to an interactive Lua environment running within a **process** on the AO network. This process acts as your command-line interface (CLI) to the AO network, allowing you to interact with other processes, manage your wallet, and develop new AO processes. By default, it connects to a process running on the mainnet Compute Unit (CU).
+
+**What `aos` is doing:**
+
+* **Connecting:** Establishes a connection from your terminal to a remote process running the `aos` environment.
+* **Loading Wallet:** Looks for a default Arweave key file (usually `~/.aos.json` or specified via arguments) to load into the remote process context for signing outgoing messages.
+* **Providing Interface:** Gives you a Lua prompt (`[aos]>`) within the remote process where you can:
+ * Load code for new persistent processes on the network.
+ * Send messages to existing network processes.
+ * Inspect process state.
+ * Manage your local environment.
+
+## Your First Interaction: Assigning a Variable
+
+From the `aos` prompt, you can assign a variable. Let's assign a basic Lua process that just holds some data:
+
+```lua
+[aos]> myVariable = "Hello from aos!"
+-- This assigns the string "Hello from aos!" to the variable 'myVariable'
+-- within the current process's Lua environment.
+
+[aos]> myVariable
+-- Displays the content of 'myVariable'
+Hello from aos!
+```
+
+
+## Your First Handler
+
+Follow these steps to create and interact with your first message handler in AO:
+
+1. **Create a Lua File to Handle Messages:**
+ Create a new file named `main.lua` in your local directory and add the following Lua code:
+
+ ```lua
+ Handlers.add(
+ "HelloWorld",
+ function(msg)
+ -- This function gets called when a message with Action = "HelloWorld" arrives.
+ print("Handler triggered by message from: " .. msg.From)
+ -- It replies to the sender with a new message containing the specified data.
+ msg.reply({ Data = "Hello back from your process!" })
+ end
+ )
+
+ print("HelloWorld handler loaded.") -- Confirmation message
+ ```
+
+ * `Handlers.add`: Registers a function to handle incoming messages.
+ * `"HelloWorld"`: The name of this handler. It will be triggered by messages with `Action = "HelloWorld"`.
+ * `function(msg)`: The function that executes when the handler is triggered. `msg` contains details about the incoming message (like `msg.From`, the sender's process ID).
+ * `msg.reply({...})`: Sends a response message back to the original sender. The response must be a Lua table, typically containing a `Data` field.
+
+2. **Load the Handler into `aos`:**
+ From your `aos` prompt, load the handler code into your running process:
+
+ ```lua
+ [aos]> .load main.lua
+ ```
+
+3. **Send a Message to Trigger the Handler:**
+ Now, send a message to your own process (`ao.id` refers to the current process ID) with the action that matches your handler's name:
+
+ ```lua
+ [aos]> Send({ Target = ao.id, Action = "HelloWorld" })
+ ```
+
+4. **Observe the Output:**
+ You should see two things happen in your `aos` terminal:
+ * The `print` statement from your handler: `Handler triggered by message from: `
+ * A notification about the reply message: `New Message From : Data = Hello back from your process!`
+
+5. **Inspect the Reply Message:**
+ The reply message sent by your handler is now in your process's inbox. You can inspect its data like this:
+
+ ```lua
+ [aos]> Inbox[#Inbox].Data
+ ```
+ This should output: `"Hello back from your process!"`
+
+You've successfully created a handler, loaded it into your AO process, triggered it with a message, and received a reply!
+
+## Next Steps
+
+This is just the beginning. To dive deeper:
+
+* **AO Cookbook:** Explore practical examples and recipes for common tasks: [AO Cookbook](https://cookbook_ao.arweave.net/)
+* **Expose Process State:** Learn how to make your process data accessible via HTTP using the `patch` device: [Exposing Process State](./exposing-process-state.md)
+* **Serverless Compute:** Discover how to run WASM or Lua computations within your processes: [Serverless Decentralized Compute](./serverless-decentralized-compute.md)
+* **aos Documentation:** Refer to the official `aos` documentation for detailed commands and usage.
+
+--- END OF FILE: docs/build/get-started-building-on-ao-core.md ---
+
+--- START OF FILE: docs/build/serverless-decentralized-compute.md ---
+# Serverless Decentralized Compute on AO
+
+AO enables powerful "serverless" computation patterns by allowing you to run code (WASM, Lua) directly within decentralized processes, triggered by messages. Furthermore, if computations are performed on nodes running in Trusted Execution Environments (TEEs), you can obtain cryptographic attestations verifying the execution integrity.
+
+## Core Concept: Compute Inside Processes
+
+Instead of deploying code to centralized servers, you deploy code *to* the Arweave permaweb and instantiate it as an AO process. Interactions happen by sending messages to this process ID.
+
+* **Code Deployment:** Your WASM binary or Lua script is uploaded to Arweave, getting a permanent transaction ID.
+* **Process Spawning:** You create an AO process, associating it with your code's transaction ID and specifying the appropriate compute device ([`~wasm64@1.0`](../devices/wasm64-at-1-0.md) or [`~lua@5.3a`](../devices/lua-at-5-3a.md)).
+* **Execution via Messages:** Sending a message to the process ID triggers the HyperBEAM node (that picks up the message) to:
+ 1. Load the process state.
+ 2. Fetch the associated WASM/Lua code from Arweave.
+ 3. Execute the code using the relevant device ([`dev_wasm`](../resources/source-code/dev_wasm.md) or [`dev_lua`](../resources/source-code/dev_lua.md)), passing the message data and current state.
+ 4. Update the process state based on the execution results.
+
+
+## TEE Attestations (via [`~snp@1.0`](../resources/source-code/dev_snp.md))
+
+If a HyperBEAM node performing these computations runs within a supported Trusted Execution Environment (like AMD SEV-SNP), it can provide cryptographic proof of execution.
+
+* **How it works:** The [`~snp@1.0`](../resources/source-code/dev_snp.md) device interacts with the TEE hardware.
+* **Signed Responses:** When a TEE-enabled node processes your message (e.g., executes your WASM function), the HTTP response containing the result can be cryptographically signed by a key that *provably* only exists inside the TEE.
+* **Verification:** Clients receiving this response can verify the signature against the TEE platform's attestation mechanism (e.g., AMD's KDS) to gain high confidence that the computation was performed correctly and confidentially within the secure environment, untampered by the node operator.
+
+**Obtaining Attested Responses:**
+
+This usually involves interacting with nodes specifically advertised as TEE-enabled. The exact mechanism for requesting and verifying attestations depends on the specific TEE technology and node configuration.
+
+* The HTTP response headers might contain specific signature or attestation data (e.g., using HTTP Message Signatures RFC-9421 via [`dev_codec_httpsig`](../resources/source-code/dev_codec_httpsig.md)).
+* You might query the [`~snp@1.0`](../resources/source-code/dev_snp.md) device directly on the node to get its attestation report.
+
+Refer to documentation on [TEE Nodes](./run/tee-nodes.md) and the [`~snp@1.0`](../resources/source-code/dev_snp.md) device for details.
+
+By leveraging WASM, Lua, and optional TEE attestations, AO provides a powerful platform for building complex, verifiable, and truly decentralized serverless applications.
+
+--- END OF FILE: docs/build/serverless-decentralized-compute.md ---
+
+--- START OF FILE: docs/devices/json-at-1-0.md ---
+# Device: ~json@1.0
+
+## Overview
+
+The [`~json@1.0`](../resources/source-code/dev_json_iface.md) device provides a mechanism to interact with JSON (JavaScript Object Notation) data structures using HyperPATHs. It allows treating a JSON document or string as a stateful entity against which HyperPATH queries can be executed.
+
+This device is useful for:
+
+* Serializing and deserializing JSON data.
+* Querying and modifying JSON objects.
+* Integrating with other devices and operations via HyperPATH chaining.
+
+## Core Functions (Keys)
+
+### Serialization
+
+* **`GET /~json@1.0/serialize` (Direct Serialize Action)**
+ * **Action:** Serializes the input message or data into a JSON string.
+ * **Example:** `GET /~json@1.0/serialize` - serializes the current message as JSON.
+ * **HyperPATH:** The path segment `/serialize` directly follows the device identifier.
+
+* **`GET //~json@1.0/serialize` (Chained Serialize Action)**
+ * **Action:** Takes arbitrary data output from `` (another device or operation) and returns its serialized JSON string representation.
+ * **Example:** `GET /~meta@1.0/info/~json@1.0/serialize` - fetches node info from the meta device and then pipes it to the JSON device to serialize the result as JSON.
+ * **HyperPATH:** This segment (`/~json@1.0/serialize`) is appended to a previous HyperPATH segment.
+
+## HyperPATH Chaining Example
+
+The JSON device is particularly useful in HyperPATH chains to convert output from other devices into JSON format:
+
+```
+GET /~meta@1.0/info/~json@1.0/serialize
+```
+
+This retrieves the node configuration from the meta device and serializes it to JSON.
+
+## See Also
+
+- [Message Device](../resources/source-code/dev_message.md) - Works well with JSON serialization
+- [Meta Device](../resources/source-code/dev_meta.md) - Can provide configuration data to serialize
+
+[json module](../resources/source-code/dev_codec_json.md)
+--- END OF FILE: docs/devices/json-at-1-0.md ---
+
+--- START OF FILE: docs/devices/lua-at-5-3a.md ---
+# Device: ~lua@5.3a
+
+## Overview
+
+The [`~lua@5.3a`](../resources/source-code/dev_lua.md) device enables the execution of Lua scripts within the HyperBEAM environment. It provides an isolated sandbox where Lua code can process incoming messages, interact with other devices, and manage state.
+
+## Core Concept: Lua Script Execution
+
+This device allows processes to perform computations defined in Lua scripts. Similar to the [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device, it manages the lifecycle of a Lua execution state associated with the process.
+
+## Key Functions (Keys)
+
+These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process.
+
+* **`init`**
+ * **Action:** Initializes the Lua environment for the process. It finds and loads the Lua script(s) associated with the process, creates a `luerl` state, applies sandboxing rules if specified, installs the [`dev_lua_lib`](../resources/source-code/dev_lua_lib.md) (providing AO-specific functions like `ao.send`), and stores the initialized state in the process's private area (`priv/state`).
+ * **Inputs (Expected in Process Definition or `init` Message):**
+ * `script`: Can be:
+ * An Arweave Transaction ID of the Lua script file.
+ * A list of script IDs or script message maps.
+ * A message map containing the Lua script in its `body` tag (Content-Type `application/lua` or `text/x-lua`).
+ * A map where keys are module names and values are script IDs/messages.
+ * `sandbox`: (Optional) Controls Lua sandboxing. Can be `true` (uses default sandbox list), `false` (no sandbox), or a map/list specifying functions to disable and their return values.
+ * **Outputs (Stored in `priv/`):**
+ * `state`: The initialized `luerl` state handle.
+* **`` (Default Handler - `compute`)**
+ * **Action:** Executes a specific function within the loaded Lua script(s). This is the default handler; if a key matching a Lua function name is called on the device, this logic runs.
+ * **Inputs (Expected in Process State or Incoming Message):**
+ * `priv/state`: The Lua state obtained during `init`.
+ * The **key** being accessed (used as the default function name).
+ * `function` or `body/function`: (Optional) Overrides the function name derived from the key.
+ * `parameters` or `body/parameters`: (Optional) Arguments to pass to the Lua function. Defaults to a list containing the process message, the request message, and an empty options map.
+ * **Response:** The results returned by the Lua function call, typically encoded. The device also updates the `priv/state` with the Lua state after execution.
+* **`snapshot`**
+ * **Action:** Captures the current state of the running Lua environment. `luerl` state is serializable.
+ * **Inputs:** `priv/state`.
+ * **Outputs:** A message containing the serialized Lua state, typically tagged with `[Prefix]/State`.
+* **`normalize` (Internal Helper)**
+ * **Action:** Ensures a consistent state representation by loading a Lua state from a snapshot (`[Prefix]/State`) if a live state (`priv/state`) isn't already present.
+* **`functions`**
+ * **Action:** Returns a list of all globally defined functions within the current Lua state.
+ * **Inputs:** `priv/state`.
+ * **Response:** A list of function names.
+
+## Sandboxing
+
+The `sandbox` option in the process definition restricts potentially harmful Lua functions (like file I/O, OS commands, loading arbitrary code). By default (`sandbox = true`), common dangerous functions are disabled. You can customize the sandbox rules.
+
+## AO Library (`dev_lua_lib`)
+
+The `init` function automatically installs a helper library ([`dev_lua_lib`](../resources/source-code/dev_lua_lib.md)) into the Lua state. This library typically provides functions for interacting with the AO environment from within the Lua script, such as:
+
+* `ao.send({ Target = ..., ... })`: To send messages from the process.
+* Access to message tags and data.
+
+## Usage within `dev_stack`
+
+Like [`~wasm64@1.0`](../resources/source-code/dev_wasm.md), the `~lua@5.3a` device is typically used within an execution stack.
+
+```text
+# Example Process Definition Snippet
+Execution-Device: stack@1.0
+Execution-Stack: scheduler@1.0, lua@5.3a
+Script:
+Sandbox: true
+```
+
+This device offers a lightweight, integrated scripting capability for AO processes, suitable for a wide range of tasks from simple logic to more complex state management and interactions.
+
+[lua module](../resources/source-code/dev_lua.md)
+
+--- END OF FILE: docs/devices/lua-at-5-3a.md ---
+
+--- START OF FILE: docs/devices/message-at-1-0.md ---
+# Device: ~message@1.0
+
+## Overview
+
+The [`~message@1.0`](../resources/source-code/dev_message.md) device is a fundamental built-in device in HyperBEAM. It serves as the identity device for standard AO-Core messages, which are represented as Erlang maps internally. Its primary function is to allow manipulation and inspection of these message maps directly via HyperPATH requests, without needing a persistent process state.
+
+This device is particularly useful for:
+
+* Creating and modifying transient messages on the fly using query parameters.
+* Retrieving specific values from a message map.
+* Inspecting the keys of a message.
+* Handling message commitments and verification (though often delegated to specialized commitment devices like [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)).
+
+## Core Functionality
+
+The `message@1.0` device treats the message itself as the state it operates on. Key operations are accessed via path segments in the HyperPATH.
+
+### Key Access (`/key`)
+
+To retrieve the value associated with a specific key in the message map, simply append the key name to the path. Key lookup is case-insensitive.
+
+**Example:**
+
+```
+GET /~message@1.0&hello=world&Key=Value/key
+```
+
+**Response:**
+
+```
+"Value"
+```
+
+### Reserved Keys
+
+The `message@1.0` device reserves several keys for specific operations:
+
+* **`get`**: (Default operation if path segment matches a key in the map) Retrieves the value of a specified key. Behaves identically to accessing `/key` directly.
+* **`set`**: Modifies the message by adding or updating key-value pairs. Requires additional parameters (usually in the request body or subsequent path segments/query params, depending on implementation specifics).
+ * Supports deep merging of maps.
+ * Setting a key to `unset` removes it.
+ * Overwriting keys that are part of existing commitments will typically remove those commitments unless the new value matches the old one.
+* **`set_path`**: A special case for setting the `path` key itself, which cannot be done via the standard `set` operation.
+* **`remove`**: Removes one or more specified keys from the message. Requires an `item` or `items` parameter.
+* **`keys`**: Returns a list of all public (non-private) keys present in the message map.
+* **`id`**: Calculates and returns the ID (hash) of the message. Considers active commitments based on specified `committers`. May delegate ID calculation to a device specified by the message\'s `id-device` key or the default ([`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)).
+* **`commit`**: Creates a commitment (e.g., a signature) for the message. Requires parameters like `commitment-device` and potentially committer information. Delegates the actual commitment generation to the specified device (default [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)).
+* **`committers`**: Returns a list of committers associated with the commitments in the message. Can be filtered by request parameters.
+* **`commitments`**: Used internally and in requests to filter or specify which commitments to operate on (e.g., for `id` or `verify`).
+* **`verify`**: Verifies the commitments attached to the message. Can be filtered by `committers` or specific `commitment` IDs in the request. Delegates verification to the device specified in each commitment (`commitment-device`).
+
+### Private Keys
+
+Keys prefixed with `priv` (e.g., `priv_key`, `private.data`) are considered private and cannot be accessed or listed via standard `get` or `keys` operations.
+
+## HyperPATH Example
+
+This example demonstrates creating a transient message and retrieving a value:
+
+```
+GET /~message@1.0&hello=world&k=v/k
+```
+
+**Breakdown:**
+
+1. `~message@1.0`: Sets the root device.
+2. `&hello=world&k=v`: Query parameters create the initial message: `#{ <<"hello">> => <<"world">>, <<"k">> => <<"v">> }`.
+3. `/k`: The path segment requests the value for the key `k`.
+
+**Response:**
+
+```
+"v"
+```
+--- END OF FILE: docs/devices/message-at-1-0.md ---
+
+--- START OF FILE: docs/devices/meta-at-1-0.md ---
+# Device: ~meta@1.0
+
+## Overview
+
+The [`~meta@1.0`](../resources/source-code/dev_meta.md) device provides access to metadata and configuration information about the local HyperBEAM node and the broader AO network.
+
+This device is essential for:
+
+## Core Functions (Keys)
+
+### `info`
+
+Retrieves or modifies the node's configuration message (often referred to as `NodeMsg` internally).
+
+* **`GET /~meta@1.0/info`**
+ * **Action:** Returns the current node configuration message.
+ * **Response:** A message map containing the node's settings. Sensitive keys (like private wallets) are filtered out. Dynamically generated keys like the node's public `address` are added if a wallet is configured.
+* **`POST /~meta@1.0/info`**
+ * **Action:** Updates the node's configuration message. Requires the request to be signed by the node's configured `operator` key/address.
+ * **Request Body:** A message map containing the configuration keys and values to update.
+ * **Response:** Confirmation message indicating success or failure.
+ * **Note:** Once a node's configuration is marked as `initialized = permanent`, it cannot be changed via this method.
+
+## Key Configuration Parameters Managed by `~meta`
+
+While the `info` key is the primary interaction point, the `NodeMsg` managed by `~meta` holds crucial configuration parameters affecting the entire node's behavior, including (but not limited to):
+
+* `port`: HTTP server port.
+* `priv_wallet` / `key_location`: Path to the node's Arweave key file.
+* `operator`: The address designated as the node operator (defaults to the address derived from `priv_wallet`).
+* `initialized`: Status indicating if the node setup is temporary or permanent.
+* `preprocessor` / `postprocessor`: Optional messages defining pre/post-processing logic for requests.
+* `routes`: Routing table used by [`dev_router`](../resources/source-code/dev_router.md).
+* `store`: Configuration for data storage.
+* `trace`: Debug tracing options.
+* `p4_*`: Payment configuration.
+* `faff_*`: Access control lists.
+
+*(Refer to `hb_opts.erl` for a comprehensive list of options.)*
+
+## Utility Functions (Internal/Module Level)
+
+The [`dev_meta.erl`](../resources/source-code/dev_meta.md) module also contains helper functions used internally or callable from other Erlang modules:
+
+* `is_operator(, ) -> boolean()`: Checks if the signer of `RequestMsg` matches the configured `operator` in `NodeMsg`.
+
+## Pre/Post-Processing Hooks
+
+The `~meta` device applies the node's configured `preprocessor` message before resolving the main request and the `postprocessor` message after obtaining the result, allowing for global interception and modification of requests/responses.
+
+## Initialization
+
+Before a node can process general requests, it usually needs to be initialized. Attempts to access devices other than `~meta@1.0/info` before initialization typically result in an error. Initialization often involves setting essential parameters like the operator key via a `POST` to `info`.
+
+[meta module](../resources/source-code/dev_meta.md)
+--- END OF FILE: docs/devices/meta-at-1-0.md ---
+
+--- START OF FILE: docs/devices/overview.md ---
+# Devices
+
+Devices are the core functional units within HyperBEAM and AO-Core. They define how messages are processed and what actions can be performed.
+
+Each device listed here represents a specific capability available to AO processes and nodes. Understanding these devices is key to building complex applications and configuring your HyperBEAM node effectively.
+
+## Available Devices
+
+Below is a list of documented built-in devices. Each page details the device's purpose, available functions (keys), and usage examples where applicable.
+
+* **[`~message@1.0`](./message-at-1-0.md):** Base message handling and manipulation.
+* **[`~meta@1.0`](./meta-at-1-0.md):** Node configuration and metadata.
+* **[`~process@1.0`](./process-at-1-0.md):** Persistent, shared process execution environment.
+* **[`~scheduler@1.0`](./scheduler-at-1-0.md):** Message scheduling and execution ordering for processes.
+* **[`~wasm64@1.0`](./wasm64-at-1-0.md):** WebAssembly (WASM) execution engine.
+* **[`~lua@5.3a`](./lua-at-5-3a.md):** Lua script execution engine.
+* **[`~relay@1.0`](./relay-at-1-0.md):** Relaying messages to other nodes or HTTP endpoints.
+* **[`~json@1.0`](./json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs.
+
+*(More devices will be documented here as specifications are finalized and reviewed.)*
+
+## Device Naming and Versioning
+
+Devices are typically referenced using a name and version, like `~@` (e.g., `~process@1.0`). The tilde (`~`) often indicates a primary, user-facing device, while internal or utility devices might use a `dev_` prefix in the source code (e.g., `dev_router`).
+
+Versioning indicates the specific interface and behavior of the device. Changes to a device that break backward compatibility usually result in a version increment.
+
+--- END OF FILE: docs/devices/overview.md ---
+
+--- START OF FILE: docs/devices/process-at-1-0.md ---
+# Device: ~process@1.0
+
+## Overview
+
+The [`~process@1.0`](../resources/source-code/dev_process.md) device represents a persistent, shared execution environment within HyperBEAM, analogous to a process or actor in other systems. It allows for stateful computation and interaction over time.
+
+## Core Concept: Orchestration
+
+A message tagged with `Device: process@1.0` (the "Process Definition Message") doesn't typically perform computation itself. Instead, it defines *which other devices* should be used for key aspects of its lifecycle:
+
+* **Scheduler Device:** Determines the order of incoming messages (assignments) to be processed. (Defaults to [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md)).
+* **Execution Device:** Executes the actual computation based on the current state and the scheduled message. Often configured as [`dev_stack`](../resources/source-code/dev_stack.md) to allow multiple computational steps (e.g., running WASM, applying cron jobs, handling proofs).
+* **Push Device:** Handles the injection of new messages into the process\'s schedule. (Defaults to [`~push@1.0`](../resources/source-code/dev_push.md)).
+
+The `~process@1.0` device acts as a router, intercepting requests and delegating them to the appropriate configured device (scheduler, executor, etc.) by temporarily swapping the device tag on the message before resolving.
+
+## Key Functions (Keys)
+
+These keys are accessed via HyperPATHs relative to the Process Definition Message ID (``).
+
+* **`GET /~process@1.0/schedule`**
+ * **Action:** Delegates to the configured Scheduler Device (via the process's `schedule/3` function) to retrieve the current schedule or state.
+ * **Response:** Depends on the Scheduler Device implementation (e.g., list of message IDs).
+* **`POST /~process@1.0/schedule`**
+ * **Action:** Delegates to the configured Push Device (via the process's `push/3` function) to add a new message to the process's schedule.
+ * **Request Body:** The message to be added.
+ * **Response:** Confirmation or result from the Push Device.
+* **`GET /~process@1.0/compute/`**
+ * **Action:** Computes the process state up to a specific point identified by `` (either a slot number or a message ID within the schedule). It retrieves assignments from the Scheduler Device and applies them sequentially using the configured Execution Device.
+ * **Response:** The process state message after executing up to the target slot/message.
+ * **Caching:** Results are cached aggressively (see [`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to avoid recomputation.
+* **`GET /~process@1.0/now`**
+ * **Action:** Computes and returns the `Results` key from the *latest* known state of the process. This typically involves computing all pending assignments.
+ * **Response:** The value of the `Results` key from the final state.
+* **`GET /~process@1.0/slot`**
+ * **Action:** Delegates to the configured Scheduler Device to query information about a specific slot or the current slot number.
+ * **Response:** Depends on the Scheduler Device implementation.
+* **`GET /~process@1.0/snapshot`**
+ * **Action:** Delegates to the configured Execution Device to generate a snapshot of the current process state. This often involves running the execution stack in a specific "map" mode to gather state from different components.
+ * **Response:** A message representing the process snapshot, often marked for caching.
+
+## Process Definition Example
+
+A typical process definition message might look like this (represented conceptually):
+
+```text
+Device: process@1.0
+Scheduler-Device: [`scheduler@1.0`](../resources/source-code/dev_scheduler.md)
+Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md)
+Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "[`cron@1.0`](../resources/source-code/dev_cron.md)", "[`wasm64@1.0`](../resources/source-code/dev_wasm.md)", "[`PoDA@1.0`](../resources/source-code/dev_poda.md)"
+Cron-Frequency: 10-Minutes
+WASM-Image:
+PoDA:
+ Device: [`PoDA/1.0`](../resources/source-code/dev_poda.md)
+ Authority:
+ Authority:
+ Quorum: 2
+```
+
+This defines a process that uses:
+* The standard scheduler.
+* A stack executor that runs scheduling logic, cron jobs, a WASM module, and a Proof-of-Data-Availability check.
+
+## State Management & Caching
+
+`~process@1.0` relies heavily on caching ([`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to optimize performance. Full state snapshots and intermediate results are cached periodically (configurable via `Cache-Frequency` and `Cache-Keys` options) to avoid recomputing the entire history for every request.
+
+## Initialization (`init`)
+
+Processes often require an initialization step before they can process messages. This is typically triggered by calling the `init` key on the configured Execution Device via the process path (`/~process@1.0/init`). This allows components within the execution stack (like WASM modules) to set up their initial state.
+
+[process module](../resources/source-code/dev_process.md)
+
+--- END OF FILE: docs/devices/process-at-1-0.md ---
+
+--- START OF FILE: docs/devices/relay-at-1-0.md ---
+# Device: ~relay@1.0
+
+## Overview
+
+The [`~relay@1.0`](../resources/source-code/dev_relay.md) device enables HyperBEAM nodes to send messages to external HTTP endpoints or other AO nodes.
+
+## Core Concept: Message Forwarding
+
+This device acts as an HTTP client within the AO ecosystem. It allows a node or process to make outbound HTTP requests.
+
+## Key Functions (Keys)
+
+* **`call`**
+ * **Action:** Sends an HTTP request to a specified target and waits synchronously for the response.
+ * **Inputs (from Request Message or Base Message M1):**
+ * `target`: (Optional) A message map defining the request to be sent. Defaults to the original incoming request (`Msg2` or `M1`).
+ * `relay-path` or `path`: The URL/path to send the request to.
+ * `relay-method` or `method`: The HTTP method (GET, POST, etc.).
+ * `relay-body` or `body`: The request body.
+ * `requires-sign`: (Optional, boolean) If true, the request message (`target`) will be signed using the node's key before sending. Defaults to `false`.
+ * `http-client`: (Optional) Specify a custom HTTP client module to use (defaults to node's configured `relay_http_client`).
+ * **Response:** `{ok, }` where `` is the full message received from the remote peer, or `{error, Reason}`.
+ * **Example HyperPATH:**
+ ```
+ GET /~relay@1.0/call?method=GET&path=https://example.com
+ ```
+* **`cast`**
+ * **Action:** Sends an HTTP request asynchronously. The device returns immediately after spawning a process to send the request; it does not wait for or return the response from the remote peer.
+ * **Inputs:** Same as `call`.
+ * **Response:** `{ok, <<"OK">>}`.
+* **`preprocess`**
+ * **Action:** This function is designed to be used as a node's global `preprocessor` (configured via [`~meta@1.0`](../resources/source-code/dev_meta.md)). When configured, it intercepts *all* incoming requests to the node and automatically rewrites them to be relayed via the `call` key. This effectively turns the node into a pure forwarding proxy, using its routing table ([`dev_router`](../resources/source-code/dev_router.md)) to determine the destination.
+ * **Response:** A message structure that invokes `/~relay@1.0/call` with the original request as the target body.
+
+## Use Cases
+
+* **Inter-Node Communication:** Sending messages between HyperBEAM nodes.
+* **External API Calls:** Allowing AO processes to interact with traditional web APIs.
+* **Routing Nodes:** Nodes configured with the `preprocess` key act as dedicated routers/proxies.
+* **Client-Side Relaying:** A local HyperBEAM instance can use `~relay@1.0` to forward requests to public compute nodes.
+
+## Interaction with Routing
+
+When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by `hb_http:request/2`. This function often utilizes the node's routing configuration ([`dev_router`](../resources/source-code/dev_router.md)) to determine the specific peer/URL to send the request to, especially if the target path is an AO process ID or another internal identifier rather than a full external URL.
+
+[relay module](../resources/source-code/dev_relay.md)
+
+--- END OF FILE: docs/devices/relay-at-1-0.md ---
+
+--- START OF FILE: docs/devices/scheduler-at-1-0.md ---
+# Device: ~scheduler@1.0
+
+## Overview
+
+The [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md) device manages the queueing and ordering of messages targeted at a specific process ([`~process@1.0`](../resources/source-code/dev_process.md)). It ensures that messages are processed according to defined scheduling rules.
+
+## Core Concept: Message Ordering
+
+When messages are sent to an AO process (typically via the [`~push@1.0`](../resources/source-code/dev_push.md) device or a `POST` to the process's `/schedule` endpoint), they are added to a queue managed by the Scheduler Device associated with that process. The scheduler ensures that messages are processed one after another in a deterministic order, typically based on arrival time and potentially other factors like message nonces or timestamps (depending on the specific scheduler implementation details).
+
+The [`~process@1.0`](../resources/source-code/dev_process.md) device interacts with its configured Scheduler Device (which defaults to `~scheduler@1.0`) primarily through the `next` key to retrieve the next message to be executed.
+
+## Slot System
+
+Slots are a fundamental concept in the `~scheduler@1.0` device, providing a structured mechanism for organizing and sequencing computation.
+
+* **Sequential Ordering:** Slots act as numbered containers (starting at 0) that hold specific messages or tasks to be processed in a deterministic order.
+* **State Tracking:** The `at-slot` key in a process's state (or a similar internal field like `current-slot` within the scheduler itself) tracks execution progress, indicating which messages have been processed and which are pending. The `slot` function can be used to query this.
+* **Assignment Storage:** Each slot contains an "assignment" - the cryptographically verified message waiting to be executed. These assignments are retrieved using the `schedule` function or internally via `next`.
+* **Schedule Organization:** The collection of all slots for a process forms its "schedule".
+* **Application Scenarios:**
+ * **Scheduling Messages:** When a message is posted to a process (e.g., via `register`), it's assigned to the next available slot.
+ * **Status Monitoring:** Clients can query a process's current slot (via the `slot` function) to check progress.
+ * **Task Retrieval:** Processes find their next task by requesting the next assignment via the `next` function, which implicitly uses the next slot number based on the current state.
+ * **Distributed Consistency:** Slots ensure deterministic execution order across nodes, crucial for maintaining consistency in AO.
+
+This slotting mechanism is central to AO processes built on HyperBEAM, allowing for deterministic, verifiable computation.
+
+## Key Functions (Keys)
+
+These keys are typically accessed via the [`~process@1.0`](../resources/source-code/dev_process.md) device, which delegates the calls to its configured scheduler.
+
+* **`schedule` (Handler for `GET /~process@1.0/schedule`)**
+ * **Action:** Retrieves the list of pending assignments (messages) for the process. May support cursor-based traversal for long schedules.
+ * **Response:** A message map containing the assignments, often keyed by slot number or message ID.
+* **`register` (Handler for `POST /~process@1.0/schedule`)**
+ * **Action:** Adds/registers a new message to the process's schedule. If this is the first message for a process, it might initialize the scheduler state.
+ * **Request Body:** The message to schedule.
+ * **Response:** Confirmation, potentially including the assigned slot or message ID.
+* **`slot` (Handler for `GET /~process@1.0/slot`)**
+ * **Action:** Queries the current or a specific slot number within the process's schedule.
+ * **Response:** Information about the requested slot, such as the current highest slot number.
+* **`status` (Handler for `GET /~process@1.0/status`)**
+ * **Action:** Retrieves status information about the scheduler for the process.
+ * **Response:** A status message.
+* **`next` (Internal Key used by [`~process@1.0`](../resources/source-code/dev_process.md))**
+ * **Action:** Retrieves the next assignment message from the schedule based on the process's current `at-slot` state.
+ * **State Management:** Requires the current process state (`Msg1`) containing the `at-slot` key.
+ * **Response:** `{ok, #{ "body" => , "state" => }}` or `{error, Reason}` if no next assignment is found.
+ * **Caching & Lookahead:** The implementation uses internal caching (`dev_scheduler_cache`, `priv/assignments`) and potentially background lookahead workers to optimize fetching subsequent assignments.
+* **`init` (Internal Key)**
+ * **Action:** Initializes the scheduler state for a process, often called when the process itself is initialized.
+* **`checkpoint` (Internal Key)**
+ * **Action:** Triggers the scheduler to potentially persist its current state or perform other checkpointing operations.
+
+## Interaction with Other Components
+
+* **[`~process@1.0`](../resources/source-code/dev_process.md):** The primary user of the scheduler, calling `next` to drive process execution.
+* **[`~push@1.0`](../resources/source-code/dev_push.md):** Often used to add messages to the schedule via `POST /schedule`.
+* **`dev_scheduler_cache`:** Internal module used for caching assignments locally on the node to reduce latency.
+* **Scheduling Unit (SU):** Schedulers may interact with external entities (like Arweave gateways or dedicated SU nodes) to fetch or commit schedules, although `~scheduler@1.0` aims for a simpler, often node-local or SU-client model.
+
+`~scheduler@1.0` provides the fundamental mechanism for ordered, sequential execution within the potentially asynchronous and parallel environment of AO.
+
+[scheduler module](../resources/source-code/dev_scheduler.md)
+
+--- END OF FILE: docs/devices/scheduler-at-1-0.md ---
+
+--- START OF FILE: docs/devices/wasm64-at-1-0.md ---
+# Device: ~wasm64@1.0
+
+## Overview
+
+The [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device enables the execution of 64-bit WebAssembly (WASM) code within the HyperBEAM environment. It provides a sandboxed environment for running compiled code from various languages (like Rust, C++, Go) that target WASM.
+
+## Core Concept: WASM Execution
+
+This device allows AO processes to perform complex computations defined in WASM modules, which can be written in languages like Rust, C++, C, Go, etc., and compiled to WASM.
+
+The device manages the lifecycle of a WASM instance associated with the process state.
+
+## Key Functions (Keys)
+
+These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process.
+
+* **`init`**
+ * **Action:** Initializes the WASM environment for the process. It locates the WASM image (binary), starts a WAMR instance, and stores the instance handle and helper functions (for reading/writing WASM memory) in the process's private state (`priv/...`).
+ * **Inputs (Expected in Process Definition or `init` Message):**
+ * `[Prefix]/image`: The Arweave Transaction ID of the WASM binary, or the WASM binary itself, or a message containing the WASM binary in its body.
+ * `[Prefix]/Mode`: (Optional) Specifies execution mode (`WASM` (default) or `AOT` if allowed by node config).
+ * **Outputs (Stored in `priv/`):**
+ * `[Prefix]/instance`: The handle to the running WAMR instance.
+ * `[Prefix]/write`: A function to write data into the WASM instance's memory.
+ * `[Prefix]/read`: A function to read data from the WASM instance's memory.
+ * `[Prefix]/import-resolver`: A function used to handle calls *from* the WASM module back *to* the AO environment (imports).
+* **`compute`**
+ * **Action:** Executes a function within the initialized WASM instance. It retrieves the target function name and parameters from the incoming message or process definition and calls the WASM instance via `hb_beamr`.
+ * **Inputs (Expected in Process State or Incoming Message):**
+ * `priv/[Prefix]/instance`: The handle obtained during `init`.
+ * `function` or `body/function`: The name of the WASM function to call.
+ * `parameters` or `body/parameters`: A list of parameters to pass to the WASM function.
+ * **Outputs (Stored in `results/`):**
+ * `results/[Prefix]/type`: The result type returned by the WASM function.
+ * `results/[Prefix]/output`: The actual result value returned by the WASM function.
+* **`import`**
+ * **Action:** Handles calls originating *from* the WASM module (imports). The default implementation (`default_import_resolver`) resolves these calls by treating them as sub-calls within the AO environment, allowing WASM code to invoke other AO device functions or access process state via the `hb_ao:resolve` mechanism.
+ * **Inputs (Provided by `hb_beamr`):** Module name, function name, arguments, signature.
+ * **Response:** Returns the result of the resolved AO call back to the WASM instance.
+* **`snapshot`**
+ * **Action:** Captures the current memory state of the running WASM instance. This is used for checkpointing and restoring process state.
+ * **Inputs:** `priv/[Prefix]/instance`.
+ * **Outputs:** A message containing the raw binary snapshot of the WASM memory state, typically tagged with `[Prefix]/State`.
+* **`normalize` (Internal Helper)**
+ * **Action:** Ensures a consistent state representation for computation, primarily by loading a WASM instance from a snapshot (`[Prefix]/State`) if a live instance (`priv/[Prefix]/instance`) isn't already present. This allows resuming execution from a cached state.
+* **`terminate`**
+ * **Action:** Stops and cleans up the running WASM instance associated with the process.
+ * **Inputs:** `priv/[Prefix]/instance`.
+
+## Usage within `dev_stack`
+
+The `~wasm64@1.0` device is almost always used as part of an execution stack configured in the Process Definition Message and managed by [`dev_stack`](../resources/source-code/dev_stack.md). [`dev_stack`](../resources/source-code/dev_stack.md) ensures that `init` is called on the first pass, `compute` on subsequent passes, and potentially `snapshot` or `terminate` as needed.
+
+```text
+# Example Process Definition Snippet
+Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md)
+Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "wasm64@1.0"
+WASM-Image:
+```
+
+This setup allows AO processes to leverage the computational power and language flexibility offered by WebAssembly in a decentralized, verifiable manner.
+
+[wasm module](../resources/source-code/dev_wasm.md)
+
+--- END OF FILE: docs/devices/wasm64-at-1-0.md ---
+
+--- START OF FILE: docs/introduction/ao-devices.md ---
+# AO Devices
+
+In AO-Core and its implementation HyperBEAM, **Devices** are modular components responsible for processing and interpreting [Messages](./what-is-ao-core.md#core-concepts). They define the specific logic for how computations are performed, data is handled, or interactions occur within the AO ecosystem.
+
+Think of Devices as specialized engines or services that can be plugged into the AO framework. This modularity is key to AO's flexibility and extensibility.
+
+## Purpose of Devices
+
+* **Define Computation:** Devices dictate *how* a message's instructions are executed. One device might run WASM code, another might manage process state, and yet another might simply relay data.
+* **Enable Specialization:** Nodes running HyperBEAM can choose which Devices to support, allowing them to specialize in certain tasks (e.g., high-compute tasks, storage-focused tasks, secure TEE operations).
+* **Promote Modularity:** New functionalities can be added to AO by creating new Devices, without altering the core protocol.
+* **Distribute Workload:** Different Devices can handle different parts of a complex task, enabling parallel processing and efficient resource utilization across the network.
+
+## Familiar Examples
+
+HyperBEAM includes many preloaded devices that provide core functionality. Some key examples include:
+
+* **[`~meta@1.0`](../devices/meta-at-1-0.md):** Configures the node itself (hardware specs, supported devices, payment info).
+* **[`~process@1.0`](../devices/process-at-1-0.md):** Manages persistent, shared computational states (like traditional smart contracts, but more flexible).
+* **[`~scheduler@1.0`](../devices/scheduler-at-1-0.md):** Handles the ordering and execution of messages within a process.
+* **[`~wasm64@1.0`](../devices/wasm64-at-1-0.md):** Executes WebAssembly (WASM) code, allowing for complex computations written in languages like Rust, C++, etc.
+* **[`~lua@5.3a`](../devices/lua-at-5-3a.md):** Executes Lua scripts.
+* **[`~relay@1.0`](../devices/relay-at-1-0.md):** Forwards messages between AO nodes or to external HTTP endpoints.
+* **[`~json@1.0`](../devices/json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs.
+* **[`~message@1.0`](../devices/message-at-1-0.md):** Manages message state and processing.
+* **[`~patch@1.0`](../guides/exposing-process-state.md):** Applies state updates directly to a process, often used for migrating or managing process data.
+
+## Beyond the Basics
+
+Devices aren't limited to just computation or state management. They can represent more abstract concepts:
+
+* **Security Devices ([`~snp@1.0`](../resources/source-code/dev_snp.md), [`dev_codec_httpsig`](../resources/source-code/dev_codec_httpsig.md)):** Handle tasks related to Trusted Execution Environments (TEEs) or message signing, adding layers of security and verification.
+* **Payment/Access Control Devices ([`~p4@1.0`](../resources/source-code/dev_p4.md), [`~faff@1.0`](../resources/source-code/dev_faff.md)):** Manage metering, billing, or access control for node services.
+* **Workflow/Utility Devices ([`dev_cron`](../resources/source-code/dev_cron.md), [`dev_stack`](../resources/source-code/dev_stack.md), [`dev_monitor`](../resources/source-code/dev_monitor.md)):** Coordinate complex execution flows, schedule tasks, or monitor process activity.
+
+## Using Devices
+
+Devices are typically invoked via [HyperPATHs](./pathing-in-ao-core.md). The path specifies which Device should interpret the subsequent parts of the path or the request body.
+
+```
+# Example: Execute the 'now' key on the process device for a specific process
+/~process@1.0/now
+
+# Example: Relay a GET request via the relay device
+/~relay@1.0/call?method=GET&path=https://example.com
+```
+
+The specific functions or 'keys' available for each Device are documented individually. See the [Devices section](../devices/index.md) for details on specific built-in devices.
+
+## The Potential of Devices
+
+The modular nature of AO Devices opens up vast possibilities for future expansion and innovation. The current set of preloaded and community devices is just the beginning. As the AO ecosystem evolves, we can anticipate the development of new devices catering to increasingly specialized needs:
+
+* **Specialized Hardware Integration:** Devices could be created to interface directly with specialized hardware accelerators like GPUs (for AI/ML tasks such as running large language models), TPUs, or FPGAs, allowing AO processes to leverage high-performance computing resources securely and verifiably.
+* **Advanced Cryptography:** New devices could implement cutting-edge cryptographic techniques, such as zero-knowledge proofs (ZKPs) or fully homomorphic encryption (FHE), enabling enhanced privacy and complex computations on encrypted data.
+* **Cross-Chain & Off-Chain Bridges:** Devices could act as secure bridges to other blockchain networks or traditional Web2 APIs, facilitating seamless interoperability and data exchange between AO and the wider digital world.
+* **AI/ML Specific Devices:** Beyond raw GPU access, specialized devices could offer higher-level AI/ML functionalities, like optimized model inference engines or distributed training frameworks.
+* **Domain-Specific Logic:** Communities or organizations could develop devices tailored to specific industries or use cases, such as decentralized finance (DeFi) primitives, scientific computing libraries, or decentralized identity management systems.
+
+The Device framework ensures that AO can adapt and grow, incorporating new technologies and computational paradigms without requiring fundamental changes to the core protocol. This extensibility is key to AO's long-term vision of becoming a truly global, decentralized computer.
+
+--- END OF FILE: docs/introduction/ao-devices.md ---
+
+--- START OF FILE: docs/introduction/pathing-in-ao-core.md ---
+# Pathing in AO-Core
+
+## Overview
+
+Understanding how to construct and interpret paths in AO-Core is fundamental to working with HyperBEAM. This guide explains the structure and components of AO-Core paths, enabling you to effectively interact with processes and access their data.
+
+## HyperPATH Structure
+
+Let's examine a typical HyperBEAM endpoint piece-by-piece:
+
+```
+https://router-1.forward.computer/~process@1.0/now
+```
+
+### Node URL (`router-1.forward.computer`)
+
+The HTTP response from this node includes a signature from the host's key. By accessing the [`~snp@1.0`](../resources/source-code/dev_snp.md) device, you can verify that the node is running in a genuine Trusted Execution Environment (TEE), ensuring computation integrity. You can replace `router-1.forward.computer` with any HyperBEAM TEE node operated by any party while maintaining trustless guarantees.
+
+### Process Path (`/~process@1.0`)
+
+Every path in AO-Core represents a program. Think of the URL bar as a Unix-style command-line interface, providing access to AO's trustless and verifiable compute. Each path component (between `/` characters) represents a step in the computation. In this example, we instruct the AO-Core node to:
+
+1. Load a specific message from its caches (local, another node, or Arweave)
+2. Interpret it with the [`~process@1.0`](../devices/process-at-1-0.md) device
+3. The process device implements a shared computing environment with consistent state between users
+
+### State Access (`/now` or `/compute`)
+
+Devices in AO-Core expose keys accessible via path components. Each key executes a function on the device:
+
+- `now`: Calculates real-time process state
+- `compute`: Serves the latest known state (faster than checking for new messages)
+
+Under the surface, these keys represent AO-Core messages. As we progress through the path, AO-Core applies each message to the existing state. You can access the full process state by visiting:
+```
+/~process@1.0/now
+```
+
+### State Navigation
+
+You can browse through sub-messages and data fields by accessing them as keys. For example, if a process stores its interaction count in a field named `cache`, you can access it like this:
+```
+/~process@1.0/compute/cache
+```
+This shows the 'cache' of your process. Each response is:
+
+- A message with a signature attesting to its correctness
+- A hashpath describing its generation
+- Transferable to other AO-Core nodes for uninterrupted execution
+
+### Query Parameters and Type Casting
+
+Beyond path segments, HyperBEAM URLs can include query parameters that utilize a special type casting syntax. This allows specifying the desired data type for a parameter directly within the URL using the format `key+type=value`.
+
+- **Syntax**: A `+` symbol separates the parameter key from its intended type (e.g., `count+integer=42`, `items+list="apple",7`).
+- **Mechanism**: The HyperBEAM node identifies the `+type` suffix (e.g., `+integer`, `+list`, `+map`, `+float`, `+atom`, `+resolve`). It then uses internal functions ([`hb_singleton:maybe_typed`](../resources/source-code/hb_singleton.md) and [`dev_codec_structured:decode_value`](../resources/source-code/dev_codec_structured.md)) to decode and cast the provided value string into the corresponding Erlang data type before incorporating it into the message.
+- **Supported Types**: Common types include `integer`, `float`, `list`, `map`, `atom`, `binary` (often implicit), and `resolve` (for path resolution). List values often follow the [HTTP Structured Fields format (RFC 8941)](https://www.rfc-editor.org/rfc/rfc8941.html).
+
+This powerful feature enables the expression of complex data structures directly in URLs.
+
+## Examples
+
+The following examples illustrate using HyperPATH with various AO-Core processes and devices. While these cover a few specific use cases, HyperBEAM's extensible nature allows interaction with any device or process via HyperPATH. For a deeper understanding, we encourage exploring the [source code](https://github.com/permaweb/hyperbeam) and experimenting with different paths.
+
+### Example 1: Accessing Full Process State
+
+To get the complete, real-time state of a process identified by ``, use the `/now` path component with the [`~process@1.0`](../devices/process-at-1-0.md) device:
+
+```
+GET /~process@1.0/now
+```
+
+This instructs the AO-Core node to load the process and execute the `now` function on the [`~process@1.0`](../devices/process-at-1-0.md) device.
+
+### Example 2: Navigating to Specific Process Data
+
+If a process maintains its state in a map and you want to access a specific field, like `at-slot`, using the faster `/compute` endpoint:
+
+```
+GET /~process@1.0/compute/cache
+```
+
+This accesses the `compute` key on the [`~process@1.0`](../devices/process-at-1-0.md) device and then navigates to the `cache` key within the resulting state map. Using this path, you will see the latest 'cache' of your process (the number of interactions it has received). Every piece of relevant information about your process can be accessed similarly, effectively providing a native API.
+
+(Note: This represents direct navigation within the process state structure. For accessing data specifically published via the `~patch@1.0` device, see the documentation on [Exposing Process State](../build/exposing-process-state.md), which typically uses the `/cache/` path.)
+
+### Example 3: Basic `~message@1.0` Usage
+
+Here's a simple example of using [`~message@1.0`](../devices/message-at-1-0.md) to create a message and retrieve a value:
+
+```
+GET /~message@1.0&greeting="Hello"&count+integer=42/count
+```
+
+1. **Base:** `/` - The base URL of the HyperBEAM node.
+2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md)
+3. **Query Params:** `greeting="Hello"` (binary) and `count+integer=42` (integer), forming the message `#{ <<"greeting">> => <<"Hello">>, <<"count">> => 42 }`.
+4. **Path:** `/count` tells `~message@1.0` to retrieve the value associated with the key `count`.
+
+**Response:** The integer `42`.
+
+### Example 4: Using the `~message@1.0` Device with Type Casting
+
+The [`~message@1.0`](../devices/message-at-1-0.md) device can be used to construct and query transient messages, utilizing type casting in query parameters.
+
+Consider the following URL:
+
+```
+GET /~message@1.0&name="Alice"&age+integer=30&items+list="apple",1,"banana"&config+map=key1="val1";key2=true/[PATH]
+```
+
+HyperBEAM processes this as follows:
+
+1. **Base:** `/` - The base URL of the HyperBEAM node.
+2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md)
+3. **Query Parameters (with type casting):**
+ * `name="Alice"` -> `#{ <<"name">> => <<"Alice">> }` (binary)
+ * `age+integer=30` -> `#{ <<"age">> => 30 }` (integer)
+ * `items+list="apple",1,"banana"` -> `#{ <<"items">> => [<<"apple">>, 1, <<"banana">>] }` (list)
+ * `config+map=key1="val1";key2=true` -> `#{ <<"config">> => #{<<"key1">> => <<"val1">>, <<"key2">> => true} }` (map)
+4. **Initial Message Map:** A combination of the above key-value pairs.
+5. **Path Evaluation:**
+ * If `[PATH]` is `/items/1`, the response is the integer `1`.
+ * If `[PATH]` is `/config/key1`, the response is the binary `<<"val1">>`.
+
+## Best Practices
+
+1. Always verify cryptographic signatures on responses
+2. Use appropriate caching strategies for frequently accessed data
+3. Implement proper error handling for network requests
+4. Consider rate limits and performance implications
+5. Keep sensitive data secure and use appropriate authentication methods
+--- END OF FILE: docs/introduction/pathing-in-ao-core.md ---
+
+--- START OF FILE: docs/introduction/what-is-ao-core.md ---
+# What is AO-Core?
+
+AO-Core is the foundational protocol underpinning the [AO Computer](https://ao.arweave.net). It defines a minimal, generalized model for decentralized computation built around standard web technologies like HTTP. Think of it as a way to interpret the Arweave permaweb not just as static storage, but as a dynamic, programmable, and infinitely scalable computing environment.
+
+## Core Concepts
+
+AO-Core revolves around three fundamental components:
+
+1. **Messages:** The smallest units of data and computation. Messages can be simple data blobs or maps of named functions. They are the primary means of communication and triggering execution within the system. Messages are cryptographically linked, forming a verifiable computation graph.
+2. **Devices:** Modules responsible for interpreting and processing messages. Each device defines specific logic for how messages are handled (e.g., executing WASM, storing data, relaying information). This modular design allows nodes to specialize and the system to be highly extensible.
+3. **Paths:** Structures that link messages over time, creating a verifiable history of computations. Paths allow users to navigate the computation graph and access specific states or results. They leverage `HashPaths`, cryptographic fingerprints representing the sequence of operations leading to a specific message state, ensuring traceability and integrity.
+
+## Key Principles
+
+* **Minimalism:** AO-Core provides the simplest possible representation of data and computation, avoiding prescriptive consensus mechanisms or specific VM requirements.
+* **HTTP Native:** Designed for compatibility with HTTP protocols, making it accessible via standard web tools and infrastructure.
+* **Scalability:** By allowing parallel message processing and modular device execution, AO-Core enables hyper-parallel computing, overcoming the limitations of traditional sequential blockchains.
+* **Permissionlessness & Trustlessness:** While AO-Core itself is minimal, it provides the framework upon which higher-level protocols like AO can build systems that allow anyone to participate (`permissionlessness`) without needing to trust intermediaries (`trustlessness`). Users can choose their desired security and performance trade-offs.
+
+AO-Core transforms the permanent data storage of Arweave into a global, shared computation space, enabling the creation of complex, autonomous, and scalable decentralized applications.
+
+
+--- END OF FILE: docs/introduction/what-is-ao-core.md ---
+
+--- START OF FILE: docs/introduction/what-is-hyperbeam.md ---
+# What is HyperBEAM?
+
+HyperBEAM is the primary, production-ready implementation of the [AO-Core protocol](./what-is-ao-core.md), built on the robust Erlang/OTP framework. It serves as a decentralized operating system, powering the AO Computer—a scalable, trust-minimized, distributed supercomputer built on permanent storage. HyperBEAM provides the runtime environment and essential services to execute AO-Core computations across a network of distributed nodes.
+
+## Why HyperBEAM Matters
+
+HyperBEAM transforms the abstract concepts of AO-Core—such as [Messages](./what-is-ao-core.md#core-concepts), [Devices](./what-is-ao-core.md#core-concepts), and [Paths](./what-is-ao-core.md#core-concepts)—into a concrete, operational system. Here's why it's pivotal to the AO ecosystem:
+
+- **Modularity via Devices:** HyperBEAM introduces a uniquely modular architecture centered around [Devices](./ao-devices.md). These pluggable components define specific computational logic (like running WASM, managing state, or relaying data), allowing for unprecedented flexibility, specialization, and extensibility in a decentralized system.
+- **Decentralized OS:** It equips nodes with the infrastructure to join the AO network, manage resources, execute computations, and communicate seamlessly.
+- **Erlang/OTP Powerhouse:** Leveraging the BEAM virtual machine, HyperBEAM inherits Erlang's concurrency, fault tolerance, and scalability—perfect for distributed systems with lightweight processes and message passing.
+- **Hardware Independence:** It abstracts underlying hardware, allowing diverse nodes to contribute resources without compatibility issues.
+- **Node Coordination:** It governs how nodes join the network, offer services through specific Devices, and interact with one another.
+- **Verifiable Computation:** Through hashpaths and the Converge Protocol, HyperBEAM ensures computation results are cryptographically verified and trustworthy.
+
+In essence, HyperBEAM is the engine that drives the AO Computer, enabling a vision of decentralized, verifiable computing at scale.
+
+## Core Components & Features
+
+- **Pluggable Devices:** The heart of HyperBEAM's extensibility. It includes essential built-in devices like [`~meta`](../devices/meta-at-1-0.md), [`~relay`](../devices/relay-at-1-0.md), [`~process`](../devices/process-at-1-0.md), [`~scheduler`](../devices/scheduler-at-1-0.md), and [`~wasm64`](../devices/wasm64-at-1-0.md) for core functionality, but the system is designed for easy addition of new custom devices.
+- **Message System:** Everything in HyperBEAM is a "Message"—a map of named functions or binary data that can be processed, transformed, and cryptographically verified.
+- **HTTP Interface:** Nodes expose an HTTP server for interaction via standard web requests and HyperPATHs, structured URLs that represent computation paths.
+- **Modularity:** Its design supports easy extension, allowing new devices and functionalities to be added effortlessly.
+
+## Architecture
+
+* **Initialization Flow:** When a HyperBEAM node starts, it initializes the name service, scheduler registry, timestamp server, and HTTP server, establishing core services for process management, timing, communication, and storage.
+* **Compute Model:** Computation follows the pattern \`Message1(Message2) => Message3\`, where messages are resolved through their devices and [paths](./pathing-in-ao-core.md). The integrity and history of these computations are ensured by **hashpaths**, which serves as a cryptographic audit trail.
+* **Scheduler System:** The scheduler component manages execution order using ["slots"](../devices/scheduler-at-1-0.md#slot-system) — sequential positions that guarantee deterministic computation.
+* **Process Slots:** Each process has numbered slots starting from 0 that track message execution order, ensuring consistent computation even across distributed nodes.
+
+## HTTP API and Pathing
+
+HyperBEAM exposes a powerful HTTP API that allows for interacting with processes and accessing data through structured URL patterns. We call URLs that represent computation paths "HyperPATHs". The URL bar effectively functions as a command-line interface for AO's trustless and verifiable compute.
+
+For a comprehensive guide on constructing and interpreting paths in AO-Core, including detailed examples and best practices, see [Pathing in AO-Core](./pathing-in-ao-core.md).
+
+In essence, HyperBEAM is the engine that powers the AO Computer, enabling the vision of a scalable, trust-minimized, decentralized supercomputer built on permanent storage.
+
+*See also: [HyperBEAM GitHub Repository](https://github.com/permaweb/HyperBEAM)*
+
+--- END OF FILE: docs/introduction/what-is-hyperbeam.md ---
+
+--- START OF FILE: docs/resources/llms.md ---
+# LLM Context Files
+
+This section provides access to specially formatted files intended for consumption by Large Language Models (LLMs) to provide context about the HyperBEAM documentation.
+
+1. **[LLM Summary (llms.txt)](../llms.txt)**
+ * **Content**: Contains a brief summary of the HyperBEAM documentation structure and a list of relative file paths for all markdown documents included in the build.
+ * **Usage**: Useful for providing an LLM with a high-level overview and the available navigation routes within the documentation.
+
+2. **[LLM Full Content (llms-full.txt)](../llms-full.txt)**
+ * **Content**: A single text file containing the complete, concatenated content of all markdown documents from the specified documentation directories (`begin`, `run`, `guides`, `devices`, `resources`). Each file's content is clearly demarcated.
+ * **Usage**: Ideal for feeding the entire documentation content into an LLM for comprehensive context, analysis, or question-answering based on the full documentation set.
+
+!!! note "Generation Process"
+ These files are automatically generated by the `docs/build-all.sh` script during the documentation build process. They consolidate information from the following directories:
+
+ * `docs/begin`
+ * `docs/run`
+ * `docs/guides`
+ * `docs/devices`
+ * `docs/resources`
+
+--- END OF FILE: docs/resources/llms.md ---
+
+--- START OF FILE: docs/resources/reference/faq.md ---
+# Frequently Asked Questions
+
+This page answers common questions about HyperBEAM, its components, and how to use them effectively.
+
+## General Questions
+
+### What is HyperBEAM?
+
+HyperBEAM is a client implementation of the AO-Core protocol written in Erlang. It serves as the node software for a decentralized operating system that allows operators to offer computational resources to users in the AO network.
+
+### How does HyperBEAM differ from other distributed systems?
+
+HyperBEAM focuses on true decentralization with asynchronous message passing between isolated processes. Unlike many distributed systems that rely on central coordination, HyperBEAM nodes can operate independently while still forming a cohesive network. Additionally, its Erlang foundation provides robust fault tolerance and concurrency capabilities.
+
+### What can I build with HyperBEAM?
+
+You can build a wide range of applications, including:
+
+- Decentralized applications (dApps)
+- Distributed computation systems
+- Peer-to-peer services
+- Resilient microservices
+- IoT device networks
+- Decentralized storage solutions
+
+### Is HyperBEAM open source?
+
+Yes, HyperBEAM is open-source software licensed under the Business Source License License.
+
+### What is the current focus or phase of HyperBEAM development?
+
+The initial development phase focuses on integrating AO processes more deeply with HyperBEAM. A key part of this is phasing out the reliance on traditional "dryrun" simulations for reading process state. Instead, processes are encouraged to use the [~patch@1.0 device](../../resources/source-code/dev_patch.md) to expose specific parts of their state directly via HyperPATH GET requests. This allows for more efficient and direct state access, particularly for web interfaces and external integrations. You can learn more about this mechanism in the [Exposing Process State with the Patch Device](../../build/exposing-process-state.md) guide.
+
+## Installation and Setup
+
+### What are the system requirements for running HyperBEAM?
+
+Currently, HyperBEAM is primarily tested and documented for Ubuntu 22.04. Support for macOS and other platforms will be added in future updates. For detailed requirements, see the [System Requirements](../getting-started/requirements.md) page.
+
+### Can I run HyperBEAM in a container?
+
+While technically possible, running HyperBEAM in Docker containers or other containerization technologies is currently not recommended. The containerization approach may introduce additional complexity and potential performance issues. We recommend running HyperBEAM directly on the host system until container support is more thoroughly tested and optimized.
+
+### How do I update HyperBEAM to the latest version?
+
+To update HyperBEAM:
+
+1. Pull the latest code from the repository
+2. Rebuild the application
+3. Restart the HyperBEAM service
+
+Specific update instructions will vary depending on your installation method.
+
+### Can I run multiple HyperBEAM nodes on a single machine?
+
+Yes, you can run multiple HyperBEAM nodes on a single machine, but you'll need to configure them to use different ports and data directories to avoid conflicts. However, this is not recommended for production environments as each node should ideally have a unique IP address to properly participate in the network. Running multiple nodes on a single machine is primarily useful for development and testing purposes.
+
+## Architecture and Components
+
+### What is the difference between HyperBEAM and Compute Unit?
+
+- **HyperBEAM**: The Erlang-based node software that handles message routing, process management, and device coordination.
+- **Compute Unit (CU)**: A NodeJS implementation that executes WebAssembly modules and handles computational tasks.
+
+Together, these components form a complete execution environment for AO processes.
+
+## Development and Usage
+
+### What programming languages can I use with HyperBEAM?
+
+You can use any programming language that compiles to WebAssembly (WASM) for creating modules that run on the Compute Unit. This includes languages like:
+
+- Lua
+- Rust
+- C/C++
+- And many others with WebAssembly support
+
+### How do I debug processes running in HyperBEAM?
+
+Debugging processes in HyperBEAM can be done through:
+
+1. Logging messages to the system log
+2. Monitoring process state and message flow
+3. Inspecting memory usage and performance metrics
+
+### Is there a limit to how many processes can run on a node?
+
+The practical limit depends on your hardware resources. Erlang is designed to handle millions of lightweight processes efficiently, but the actual number will be determined by:
+
+- Available memory
+- CPU capacity
+- Network bandwidth
+- Storage speed
+- The complexity of your processes
+
+
+## Troubleshooting
+
+### What should I do if a node becomes unresponsive?
+
+If a node becomes unresponsive:
+
+1. Check the node's logs for error messages
+2. Verify network connectivity
+3. Ensure sufficient system resources
+4. Restart the node if necessary
+5. Check for configuration issues
+
+For persistent problems, consult the [Troubleshooting](troubleshooting.md) page.
+
+### Where can I get help if I encounter issues?
+
+If you encounter issues:
+
+- Check the [Troubleshooting](troubleshooting.md) guide
+- Search or ask questions on [GitHub Issues](https://github.com/permaweb/HyperBEAM/issues)
+- Join the community on [Discord](https://discord.gg/V3yjzrBxPM)
+--- END OF FILE: docs/resources/reference/faq.md ---
+
+--- START OF FILE: docs/resources/reference/glossary.md ---
+# Glossary
+
+This glossary provides definitions for terms and concepts used throughout the HyperBEAM documentation. For a comprehensive glossary of permaweb-specific terminology, check out the [permaweb glossary](#permaweb-glossary) section below.
+
+## AO-Core Protocol
+The underlying protocol that HyperBEAM implements, enabling decentralized computing and communication between nodes. AO-Core provides a framework into which any number of different computational models, encapsulated as primitive devices, can be attached.
+
+## Asynchronous Message Passing
+A communication paradigm where senders don't wait for receivers to be ready, allowing for non-blocking operations and better scalability.
+
+## Checkpoint
+A saved state of a process that can be used to resume execution from a known point, used for persistence and recovery.
+
+## Compute Unit (CU)
+The NodeJS component of HyperBEAM that executes WebAssembly modules and handles computational tasks.
+
+## Decentralized Execution
+The ability to run processes across a distributed network without centralized control or coordination.
+
+## Device
+A functional unit in HyperBEAM that provides specific capabilities to the system, such as storage, networking, or computational resources.
+
+## Erlang
+The programming language used to implement the HyperBEAM core, known for its robustness and support for building distributed, fault-tolerant applications.
+
+## ~flat@1.0
+A format used for encoding settings files in HyperBEAM configuration, using HTTP header styling.
+
+## Hashpaths
+A mechanism for referencing locations in a program's state-space prior to execution. These state-space links are represented as Merklized lists of programs inputs and initial states.
+
+## HyperBEAM
+The Erlang-based node software that handles message routing, process management, and device coordination in the HyperBEAM ecosystem.
+
+## Message
+A data structure used for communication between processes in the HyperBEAM system. Messages can be interpreted as a binary term or as a collection of named functions (a Map of functions).
+
+## Module
+A unit of code that can be loaded and executed by the Compute Unit, typically in WebAssembly format.
+
+## Node
+An instance of HyperBEAM running on a physical or virtual machine that participates in the distributed network.
+
+## ~p4@1.0
+A device that runs as a pre-processor and post-processor in HyperBEAM, enabling a framework for node operators to sell usage of their machine's hardware to execute AO-Core devices.
+
+## Process
+An independent unit of computation in HyperBEAM with its own state and execution context.
+
+## Process ID
+A unique identifier assigned to a process within the HyperBEAM system.
+
+## ~scheduler@1.0
+A device used to assign a linear hashpath to an execution, such that all users may access it with a deterministic ordering.
+
+## ~compute-lite@1.0
+A lightweight device wrapping a local WASM executor, used for executing legacynet AO processes inside HyperBEAM.
+
+## ~json-iface@1.0
+A device that offers a translation layer between the JSON-encoded message format used by legacy versions and HyperBEAM's native HTTP message format.
+
+## ~meta@1.0
+A device used to configure the node's hardware, supported devices, metering and payments information, amongst other configuration options.
+
+## ~process@1.0
+A device that enables users to create persistent, shared executions that can be accessed by any number of users, each of whom may add additional inputs to its hashpath.
+
+## ~relay@1.0
+A device used to relay messages between nodes and the wider HTTP network. It offers an interface for sending and receiving messages using a variety of execution strategies.
+
+## ~simple-pay@1.0
+A simple, flexible pricing device that can be used in conjunction with p4@1.0 to offer flat-fees for the execution of AO-Core messages.
+
+## ~snp@1.0
+A device used to generate and validate proofs that a node is executing inside a Trusted Execution Environment (TEE).
+
+## ~wasm64@1.0
+A device used to execute WebAssembly code, using the Web Assembly Micro-Runtime (WAMR) under-the-hood.
+
+## ~stack@1.0
+A device used to execute an ordered set of devices over the same inputs, allowing users to create complex combinations of other devices.
+
+## Trusted Execution Environment (TEE)
+A secure area inside a processor that ensures the confidentiality and integrity of code and data loaded within it. Used in HyperBEAM for trust-minimized computation.
+
+## WebAssembly (WASM)
+A binary instruction format that serves as a portable compilation target for programming languages, enabling deployment on the web and other environments.
+
+## Permaweb Glossary
+
+For a more comprehensive glossary of terms used in the permaweb, try the [Permaweb Glossary](https://glossary.arweave.net). Or use it below:
+
+
+
+
+
+
+
+
+
+
+
+
+--- END OF FILE: docs/resources/reference/glossary.md ---
+
+--- START OF FILE: docs/resources/reference/troubleshooting.md ---
+# Troubleshooting Guide
+
+This guide addresses common issues you might encounter when working with HyperBEAM and the Compute Unit.
+
+## Installation Issues
+
+### Erlang Installation Fails
+
+**Symptoms**: Errors during Erlang compilation or installation
+
+**Solutions**:
+
+- Ensure all required dependencies are installed: `sudo apt-get install -y libssl-dev ncurses-dev make cmake gcc g++`
+- Try configuring with fewer options: `./configure --without-wx --without-debugger --without-observer --without-et`
+- Check disk space, as compilation requires several GB of free space
+
+### Rebar3 Bootstrap Fails
+
+**Symptoms**: Errors when running `./bootstrap` for Rebar3
+
+**Solutions**:
+
+- Verify Erlang is correctly installed: `erl -eval 'erlang:display(erlang:system_info(otp_release)), halt().'`
+- Ensure you have the latest version of the repository: `git fetch && git reset --hard origin/master`
+- Try manually downloading a precompiled Rebar3 binary
+
+## HyperBEAM Issues
+
+### HyperBEAM Won't Start
+
+**Symptoms**: Errors when running `rebar3 shell` or the HyperBEAM startup command
+
+**Solutions**:
+
+- Check for port conflicts: Another service might be using the configured port
+- Verify the wallet key file exists and is accessible
+- Examine Erlang crash dumps for detailed error information
+- Ensure all required dependencies are installed
+
+### HyperBEAM Crashes During Operation
+
+**Symptoms**: Unexpected termination of the HyperBEAM process
+
+**Solutions**:
+
+- Check system resources (memory, disk space)
+- Examine Erlang crash dumps for details
+- Reduce memory limits if the system is resource-constrained
+- Check for network connectivity issues if connecting to external services
+
+## Compute Unit Issues
+
+### Compute Unit Won't Start
+
+**Symptoms**: Errors when running `npm start` in the CU directory
+
+**Solutions**:
+
+- Verify Node.js is installed correctly: `node -v`
+- Ensure all dependencies are installed: `npm i`
+- Check that the wallet file exists and is correctly formatted
+- Verify the `.env` file has all required settings
+
+### Memory Errors in Compute Unit
+
+**Symptoms**: Out of memory errors or excessive memory usage
+
+**Solutions**:
+
+- Adjust the `PROCESS_WASM_MEMORY_MAX_LIMIT` environment variable
+- Enable garbage collection by setting an appropriate `GC_INTERVAL_MS`
+- Monitor memory usage and adjust limits as needed
+- If on a low-memory system, reduce concurrent process execution
+
+## Integration Issues
+
+### HyperBEAM Can't Connect to Compute Unit
+
+**Symptoms**: Connection errors in HyperBEAM logs when trying to reach the CU
+
+**Solutions**:
+
+- Verify the CU is running: `curl http://localhost:6363`
+- Ensure there are no firewall rules blocking the connection
+- Verify network configuration if components are on different machines
+
+### Process Execution Fails
+
+**Symptoms**: Errors when deploying or executing processes
+
+**Solutions**:
+
+- Check both HyperBEAM and CU logs for specific error messages
+- Verify that the WASM module is correctly compiled and valid
+- Test with a simple example process to isolate the issue
+- Adjust memory limits if the process requires more resources
+
+## Getting Help
+
+If you're still experiencing issues after trying these troubleshooting steps:
+
+1. Check the [GitHub repository](https://github.com/permaweb/HyperBEAM) for known issues
+2. Join the [Discord community](https://discord.gg/V3yjzrBxPM) for support
+3. Open an issue on GitHub with detailed information about your problem
+--- END OF FILE: docs/resources/reference/troubleshooting.md ---
+
+--- START OF FILE: docs/resources/source-code/ar_bundles.md ---
+# [Module ar_bundles.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_bundles.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
Check if the server is already running, and if not, start it.
+
+
+
+
+## Function Details ##
+
+
+
+### cache/1 * ###
+
+`cache(Current) -> any()`
+
+Cache the current timestamp from Arweave.
+
+
+
+### get/0 ###
+
+`get() -> any()`
+
+Get the current timestamp from the server, starting the server if it
+isn't already running.
+
+
+
+### refresher/1 * ###
+
+`refresher(TSServer) -> any()`
+
+Refresh the timestamp cache periodically.
+
+
+
+### spawn_server/0 * ###
+
+`spawn_server() -> any()`
+
+Spawn a new server and its refresher.
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+Check if the server is already running, and if not, start it.
+
+
+--- END OF FILE: docs/resources/source-code/ar_timestamp.md ---
+
+--- START OF FILE: docs/resources/source-code/ar_tx.md ---
+# [Module ar_tx.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_tx.erl)
+
+
+
+
+The module with utilities for transaction creation, signing, and verification.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### cache_write_binary_test/0 * ###
+
+`cache_write_binary_test() -> any()`
+
+Ensure that we can write direct binaries to the cache.
+
+
+
+### cache_write_message_test/0 * ###
+
+`cache_write_message_test() -> any()`
+
+Test that the cache can be written to and read from using the hb_cache
+API.
+
+
+
+### is_trusted_writer/2 * ###
+
+`is_trusted_writer(Req, Opts) -> any()`
+
+Verify that the request originates from a trusted writer.
+Checks that the single signer of the request is present in the list
+of trusted cache writer addresses specified in the options.
+
+
+
+### link/3 ###
+
+`link(Base, Req, Opts) -> any()`
+
+Link a source to a destination in the cache.
+
+
+
+### read/3 ###
+
+`read(M1, M2, Opts) -> any()`
+
+Read data from the cache.
+Retrieves data corresponding to a key from a local store.
+The key is extracted from the incoming message under <<"target">>.
+The options map may include store configuration.
+If the "accept" header is set to <<"application/aos-2">>, the result is
+converted to a JSON structure and encoded.
+
+
+
+### read_from_cache/2 * ###
+
+`read_from_cache(Node, Path) -> any()`
+
+Read data from the cache via HTTP.
+Constructs a GET request using the provided path, sends it to the node,
+and returns the response.
+
+
+
+### setup_test_env/0 * ###
+
+`setup_test_env() -> any()`
+
+Create a test environment with a local store and node.
+Ensures that the required application is started, configures a local
+file-system store, resets the store for a clean state, creates a wallet
+for signing requests, and starts a node with the store and trusted cache
+writer configuration.
+
+
+
+### write/3 ###
+
+`write(M1, M2, Opts) -> any()`
+
+Write data to the cache.
+Processes a write request by first verifying that the request comes from a
+trusted writer (as defined by the `cache_writers` configuration in the
+options). The write type is determined from the message ("single" or "batch")
+and the data is stored accordingly.
+
+
+
+### write_single/2 * ###
+
+`write_single(Msg, Opts) -> any()`
+
+Helper function to write a single data item to the cache.
+Extracts the body, location, and operation from the message.
+Depending on the type of data (map or binary) or if a link operation is
+requested, it writes the data to the store using the appropriate function.
+
+
+
+### write_to_cache/3 * ###
+
+`write_to_cache(Node, Data, Wallet) -> any()`
+
+Write data to the cache via HTTP.
+Constructs a write request message with the provided data, signs it with the
+given wallet, sends it to the node, and verifies that the response indicates
+a successful write.
+
+
+--- END OF FILE: docs/resources/source-code/dev_cache.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_cacheviz.md ---
+# [Module dev_cacheviz.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cacheviz.erl)
+
+
+
+
+A device that generates renders (or renderable dot output) of a node's
+cache.
+
+
+
+## Function Index ##
+
+
+
Output the SVG representation of the cache, or a specific path within
+the cache set by the target key in the request.
+
+
+
+
+## Function Details ##
+
+
+
+### dot/3 ###
+
+`dot(X1, Req, Opts) -> any()`
+
+Output the dot representation of the cache, or a specific path within
+the cache set by the `target` key in the request.
+
+
+
+### svg/3 ###
+
+`svg(Base, Req, Opts) -> any()`
+
+Output the SVG representation of the cache, or a specific path within
+the cache set by the `target` key in the request.
+
+
+--- END OF FILE: docs/resources/source-code/dev_cacheviz.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_codec_ans104.md ---
+# [Module dev_codec_ans104.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_ans104.erl)
+
+
+
+
+Codec for managing transformations from `ar_bundles`-style Arweave TX
+records to and from TABMs.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### commit/3 ###
+
+`commit(Msg, Req, Opts) -> any()`
+
+Sign a message using the `priv_wallet` key in the options.
+
+
+
+### committed/3 ###
+
+`committed(Msg, Req, Opts) -> any()`
+
+Return a list of committed keys from an ANS-104 message.
+
+
+
+### committed_from_trusted_keys/3 * ###
+
+`committed_from_trusted_keys(Msg, TrustedKeys, Opts) -> any()`
+
+
+
+### content_type/1 ###
+
+`content_type(X1) -> any()`
+
+Return the content type for the codec.
+
+
+
+### deduplicating_from_list/1 * ###
+
+`deduplicating_from_list(Tags) -> any()`
+
+Deduplicate a list of key-value pairs by key, generating a list of
+values for each normalized key if there are duplicates.
+
+
+
+### deserialize/1 ###
+
+`deserialize(Binary) -> any()`
+
+Deserialize a binary ans104 message to a TABM.
+
+
+
+### do_from/1 * ###
+
+`do_from(RawTX) -> any()`
+
+
+
+### duplicated_tag_name_test/0 * ###
+
+`duplicated_tag_name_test() -> any()`
+
+
+
+### encoded_tags_to_map/1 * ###
+
+`encoded_tags_to_map(Tags) -> any()`
+
+Convert an ANS-104 encoded tag list into a HyperBEAM-compatible map.
+
+
+
+### from/1 ###
+
+`from(Binary) -> any()`
+
+Convert a #tx record into a message map recursively.
+
+
+
+### from_maintains_tag_name_case_test/0 * ###
+
+`from_maintains_tag_name_case_test() -> any()`
+
+
+
+### id/1 ###
+
+`id(Msg) -> any()`
+
+Return the ID of a message.
+
+
+
+### normal_tags/1 * ###
+
+`normal_tags(Tags) -> any()`
+
+Check whether a list of key-value pairs contains only normalized keys.
+
+
+
+### normal_tags_test/0 * ###
+
+`normal_tags_test() -> any()`
+
+
+
+### only_committed_maintains_target_test/0 * ###
+
+`only_committed_maintains_target_test() -> any()`
+
+
+
+### quantity_field_is_ignored_in_from_test/0 * ###
+
+`quantity_field_is_ignored_in_from_test() -> any()`
+
+
+
+### quantity_key_encoded_as_tag_test/0 * ###
+
+`quantity_key_encoded_as_tag_test() -> any()`
+
+
+
+### restore_tag_name_case_from_cache_test/0 * ###
+
+`restore_tag_name_case_from_cache_test() -> any()`
+
+
+
+### serialize/1 ###
+
+`serialize(Msg) -> any()`
+
+Serialize a message or TX to a binary.
+
+
+
+### signed_duplicated_tag_name_test/0 * ###
+
+`signed_duplicated_tag_name_test() -> any()`
+
+
+
+### simple_to_conversion_test/0 * ###
+
+`simple_to_conversion_test() -> any()`
+
+
+
+### tag_map_to_encoded_tags/1 * ###
+
+`tag_map_to_encoded_tags(TagMap) -> any()`
+
+Convert a HyperBEAM-compatible map into an ANS-104 encoded tag list,
+recreating the original order of the tags.
+
+
+
+### to/1 ###
+
+`to(Binary) -> any()`
+
+Internal helper to translate a message to its #tx record representation,
+which can then be used by ar_bundles to serialize the message. We call the
+message's device in order to get the keys that we will be checkpointing. We
+do this recursively to handle nested messages. The base case is that we hit
+a binary, which we return as is.
+
+
+
+### verify/3 ###
+
+`verify(Msg, Req, Opts) -> any()`
+
+Verify an ANS-104 commitment.
+
+
+--- END OF FILE: docs/resources/source-code/dev_codec_ans104.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_codec_flat.md ---
+# [Module dev_codec_flat.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_flat.erl)
+
+
+
+
+A codec for turning TABMs into/from flat Erlang maps that have
+(potentially multi-layer) paths as their keys, and a normal TABM binary as
+their value.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### binary_passthrough_test/0 * ###
+
+`binary_passthrough_test() -> any()`
+
+
+
+### commit/3 ###
+
+`commit(Msg, Req, Opts) -> any()`
+
+
+
+### committed/3 ###
+
+`committed(Msg, Req, Opts) -> any()`
+
+
+
+### deep_nesting_test/0 * ###
+
+`deep_nesting_test() -> any()`
+
+
+
+### deserialize/1 ###
+
+`deserialize(Bin) -> any()`
+
+
+
+### empty_map_test/0 * ###
+
+`empty_map_test() -> any()`
+
+
+
+### from/1 ###
+
+`from(Bin) -> any()`
+
+Convert a flat map to a TABM.
+
+
+
+### inject_at_path/3 * ###
+
+`inject_at_path(Rest, Value, Map) -> any()`
+
+
+
+### multiple_paths_test/0 * ###
+
+`multiple_paths_test() -> any()`
+
+
+
+### nested_conversion_test/0 * ###
+
+`nested_conversion_test() -> any()`
+
+
+
+### path_list_test/0 * ###
+
+`path_list_test() -> any()`
+
+
+
+### serialize/1 ###
+
+`serialize(Map) -> any()`
+
+
+
+### simple_conversion_test/0 * ###
+
+`simple_conversion_test() -> any()`
+
+
+
+### to/1 ###
+
+`to(Bin) -> any()`
+
+Convert a TABM to a flat map.
+
+
+
+### verify/3 ###
+
+`verify(Msg, Req, Opts) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_codec_flat.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_codec_httpsig_conv.md ---
+# [Module dev_codec_httpsig_conv.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_httpsig_conv.erl)
+
+
+
+
+A codec for the that marshals TABM encoded messages to and from the
+"HTTP" message structure.
+
+
+
+## Description ##
+
+Every HTTP message is an HTTP multipart message.
+See https://datatracker.ietf.org/doc/html/rfc7578
+
+For each TABM Key:
+
+The Key/Value Pair will be encoded according to the following rules:
+"signatures" -> {SignatureInput, Signature} header Tuples, each encoded
+as a Structured Field Dictionary
+"body" ->
+- if a map, then recursively encode as its own HyperBEAM message
+- otherwise encode as a normal field
+_ -> encode as a normal field
+
+Each field will be mapped to the HTTP Message according to the following
+rules:
+"body" -> always encoded part of the body as with Content-Disposition
+type of "inline"
+_ ->
+- If the byte size of the value is less than the ?MAX_TAG_VALUE,
+then encode as a header, also attempting to encode as a
+structured field.
+- Otherwise encode the value as a part in the multipart response
+
+
+## Function Index ##
+
+
+
Generate a unique, reproducible boundary for the
+multipart body, however we cannot use the id of the message as
+the boundary, as the id is not known until the message is
+encoded.
+
+
+
+
+## Function Details ##
+
+
+
+### boundary_from_parts/1 * ###
+
+`boundary_from_parts(PartList) -> any()`
+
+Generate a unique, reproducible boundary for the
+multipart body, however we cannot use the id of the message as
+the boundary, as the id is not known until the message is
+encoded. Subsequently, we generate each body part individually,
+concatenate them, and apply a SHA2-256 hash to the result.
+This ensures that the boundary is unique, reproducible, and
+secure.
+
+
+
+### commitments_from_signature/4 * ###
+
+`commitments_from_signature(Map, HPs, RawSig, RawSigInput) -> any()`
+
+Populate the `/commitments` key on the TABM with the dictionary of
+signatures and their corresponding inputs.
+
+
+
+### do_to/2 * ###
+
+`do_to(Binary, Opts) -> any()`
+
+
+
+### encode_body_keys/1 * ###
+
+`encode_body_keys(PartList) -> any()`
+
+Encode a list of body parts into a binary.
+
+
+
+### encode_body_part/3 * ###
+
+`encode_body_part(PartName, BodyPart, InlineKey) -> any()`
+
+Encode a multipart body part to a flat binary.
+
+
+
+### encode_http_msg/1 * ###
+
+`encode_http_msg(Httpsig) -> any()`
+
+Encode a HTTP message into a binary.
+
+
+
+### extract_hashpaths/1 * ###
+
+`extract_hashpaths(Map) -> any()`
+
+Extract all keys labelled `hashpath*` from the commitments, and add them
+to the HTTP message as `hashpath*` keys.
+
+
+
+### field_to_http/3 * ###
+
+`field_to_http(Httpsig, X2, Opts) -> any()`
+
+All maps are encoded into the body of the HTTP message
+to be further encoded later.
+
+
+
+### from/1 ###
+
+`from(Bin) -> any()`
+
+Convert a HTTP Message into a TABM.
+HTTP Structured Field is encoded into it's equivalent TABM encoding.
+
+
+
+### from_body/4 * ###
+
+`from_body(TABM, InlinedKey, ContentType, Body) -> any()`
+
+
+
+### from_body_parts/3 * ###
+
+`from_body_parts(TABM, InlinedKey, Rest) -> any()`
+
+
+
+### group_ids/1 * ###
+
+`group_ids(Map) -> any()`
+
+Group all elements with:
+1. A key that ?IS_ID returns true for, and
+2. A value that is immediate
+into a combined SF dict-_like_ structure. If not encoded, these keys would
+be sent as headers and lower-cased, losing their comparability against the
+original keys. The structure follows all SF dict rules, except that it allows
+for keys to contain capitals. The HyperBEAM SF parser will accept these keys,
+but standard RFC 8741 parsers will not. Subsequently, the resulting `ao-cased`
+key is not added to the `ao-types` map.
+
+
+
+### group_maps/1 * ###
+
+`group_maps(Map) -> any()`
+
+Merge maps at the same level, if possible.
+
+
+
+### group_maps/3 * ###
+
+`group_maps(Map, Parent, Top) -> any()`
+
+
+
+### group_maps_flat_compatible_test/0 * ###
+
+`group_maps_flat_compatible_test() -> any()`
+
+The grouped maps encoding is a subset of the flat encoding,
+where on keys with maps values are flattened.
+
+So despite needing a special encoder to produce it
+We can simply apply the flat encoder to it to get back
+the original message.
+
+The test asserts that is indeed the case.
+
+
+
+### group_maps_test/0 * ###
+
+`group_maps_test() -> any()`
+
+
+
+### hashpaths_from_message/1 * ###
+
+`hashpaths_from_message(Msg) -> any()`
+
+
+
+### inline_key/1 * ###
+
+`inline_key(Msg) -> any()`
+
+given a message, returns a binary tuple:
+- A list of pairs to add to the msg, if any
+- the field name for the inlined key
+
+In order to preserve the field name of the inlined
+part, an additional field may need to be added
+
+
+
+### to/1 ###
+
+`to(Bin) -> any()`
+
+Convert a TABM into an HTTP Message. The HTTP Message is a simple Erlang Map
+that can translated to a given web server Response API
+
+
+
+### to/2 * ###
+
+`to(TABM, Opts) -> any()`
+
+
+
+### ungroup_ids/1 * ###
+
+`ungroup_ids(Msg) -> any()`
+
+Decode the `ao-ids` key into a map.
+
+
+--- END OF FILE: docs/resources/source-code/dev_codec_httpsig_conv.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_codec_httpsig.md ---
+# [Module dev_codec_httpsig.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_httpsig.erl)
+
+
+
+
+This module implements HTTP Message Signatures as described in RFC-9421
+(https://datatracker.ietf.org/doc/html/rfc9421), as an AO-Core device.
+
+
+
+## Description ##
+It implements the codec standard (from/1, to/1), as well as the optional
+commitment functions (id/3, sign/3, verify/3). The commitment functions
+are found in this module, while the codec functions are relayed to the
+`dev_codec_httpsig_conv` module.
+
+
+## Data Types ##
+
+
+
+
+### authority_state() ###
+
+
+
Given a Component Identifier and a Request/Response Messages Context
+extract the value represented by the Component Identifier, from the Messages
+Context, specifically a "Derived" Component within the Messages Context,
+and return the normalized form of the identifier, along with the extracted
+encoded value.
Given a Component Identifier and a Request/Response Messages Context
+extract the value represented by the Component Identifier, from the Messages
+Context, specifically a field on a Message within the Messages Context,
+and return the normalized form of the identifier, along with the extracted
+encoded value.
Given a Component Identifier and a Request/Response Messages Context
+extract the value represented by the Component Identifier, from the Messages
+Context, and return the normalized form of the identifier, along with the
+extracted encoded value.
Takes a list of keys that will be used in the signature inputs and
+ensures that they have deterministic sorting, as well as the coorect
+component identifiers if applicable.
using the provided Authority and Request/Response Messages Context,
+create a Name, Signature and SignatureInput that can be used to additional
+signatures to a corresponding HTTP Message.
+
+
+A helper to validate and produce an "Authority" State
+
+
+
+### bin/1 * ###
+
+`bin(Item) -> any()`
+
+
+
+### commit/3 ###
+
+`commit(MsgToSign, Req, Opts) -> any()`
+
+Main entrypoint for signing a HTTP Message, using the standardized format.
+
+
+
+### committed/3 ###
+
+`committed(RawMsg, Req, Opts) -> any()`
+
+Return the list of committed keys from a message. The message will have
+had the `commitments` key removed and the signature inputs added to the
+root. Subsequently, we can parse that to get the list of committed keys.
+
+
+
+### committed_from_body/1 * ###
+
+`committed_from_body(Msg) -> any()`
+
+Return the list of committed keys from a message that are derived from
+the body components.
+
+
+
+### committed_id_test/0 * ###
+
+`committed_id_test() -> any()`
+
+
+
+### derive_component/3 * ###
+
+`derive_component(Identifier, Req, Res) -> any()`
+
+Given a Component Identifier and a Request/Response Messages Context
+extract the value represented by the Component Identifier, from the Messages
+Context, specifically a "Derived" Component within the Messages Context,
+and return the normalized form of the identifier, along with the extracted
+encoded value.
+
+This implements a portion of RFC-9421
+See https://datatracker.ietf.org/doc/html/rfc9421#name-derived-components
+
+
+
+### derive_component/4 * ###
+
+`derive_component(X1, Req, Res, Subject) -> any()`
+
+
+
+### derive_component_error_query_param_no_name_test/0 * ###
+
+`derive_component_error_query_param_no_name_test() -> any()`
+
+
+
+### derive_component_error_req_param_on_request_target_test/0 * ###
+
+`derive_component_error_req_param_on_request_target_test() -> any()`
+
+
+
+### derive_component_error_status_req_target_test/0 * ###
+
+`derive_component_error_status_req_target_test() -> any()`
+
+
+
+### do_committed/4 * ###
+
+`do_committed(SigInputStr, Msg, Req, Opts) -> any()`
+
+
+
+### extract_dictionary_field_value/2 * ###
+
+`extract_dictionary_field_value(StructuredField, Key) -> any()`
+
+Extract a value from a Structured Field, and return the normalized field,
+along with the encoded value
+
+
+
+### extract_field/3 * ###
+
+`extract_field(X1, Req, Res) -> any()`
+
+Given a Component Identifier and a Request/Response Messages Context
+extract the value represented by the Component Identifier, from the Messages
+Context, specifically a field on a Message within the Messages Context,
+and return the normalized form of the identifier, along with the extracted
+encoded value.
+
+This implements a portion of RFC-9421
+See https://datatracker.ietf.org/doc/html/rfc9421#name-http-fields
+
+
+
+### extract_field_value/2 * ###
+
+`extract_field_value(RawFields, X2) -> any()`
+
+Extract values from the field and return the normalized field,
+along with encoded value
+
+
+
+### find_byte_sequence_param/1 * ###
+
+`find_byte_sequence_param(Params) -> any()`
+
+
+
+### find_id/1 * ###
+
+`find_id(Msg) -> any()`
+
+Find the ID of the message, which is the hmac of the fields referenced in
+the signature and signature input. If the message already has a signature-input,
+directly, it is treated differently: We relabel it as `x-signature-input` to
+avoid key collisions.
+
+
+
+### find_key_param/1 * ###
+
+`find_key_param(Params) -> any()`
+
+
+
+### find_name_param/1 * ###
+
+`find_name_param(Params) -> any()`
+
+
+
+### find_request_param/1 * ###
+
+`find_request_param(Params) -> any()`
+
+
+
+### find_sf_param/3 * ###
+
+`find_sf_param(Name, Params, Default) -> any()`
+
+Given a parameter Name, extract the Parameter value from the HTTP
+Structured Field data structure.
+
+If no value is found, then false is returned
+
+
+
+### find_strict_format_param/1 * ###
+
+`find_strict_format_param(Params) -> any()`
+
+
+
+### find_trailer_param/1 * ###
+
+`find_trailer_param(Params) -> any()`
+
+
+
+### from/1 ###
+
+`from(Msg) -> any()`
+
+
+
+### hmac/1 * ###
+
+`hmac(Msg) -> any()`
+
+Generate the ID of the message, with the current signature and signature
+input as the components for the hmac.
+
+
+
+### id/3 ###
+
+`id(Msg, Params, Opts) -> any()`
+
+
+
+### identifier_to_component/3 * ###
+
+`identifier_to_component(Identifier, Req, Res) -> any()`
+
+Given a Component Identifier and a Request/Response Messages Context
+extract the value represented by the Component Identifier, from the Messages
+Context, and return the normalized form of the identifier, along with the
+extracted encoded value.
+
+Generally speaking, a Component Identifier may reference a "Derived" Component,
+a Message Field, or a sub-component of a Message Field.
+
+Since a Component Identifier is itself a Structured Field, it may also specify
+parameters, which are used to describe behavior such as which Message to
+derive a field or sub-component of the field, and how to encode the value as
+part of the signature base.
+
+
+
+### join_signature_base/2 * ###
+
+`join_signature_base(ComponentsLine, ParamsLine) -> any()`
+
+
+
+### join_signature_base_test/0 * ###
+
+`join_signature_base_test() -> any()`
+
+
+
+### lower_bin/1 * ###
+
+`lower_bin(Item) -> any()`
+
+
+
+### multicommitted_id_test/0 * ###
+
+`multicommitted_id_test() -> any()`
+
+
+
+### normalize_component_identifiers/1 * ###
+
+`normalize_component_identifiers(ComponentIdentifiers) -> any()`
+
+Takes a list of keys that will be used in the signature inputs and
+ensures that they have deterministic sorting, as well as the coorect
+component identifiers if applicable.
+
+
+
+### public_keys/1 ###
+
+`public_keys(Commitment) -> any()`
+
+
+
+### remove_derived_specifiers/1 ###
+
+`remove_derived_specifiers(ComponentIdentifiers) -> any()`
+
+Remove derived specifiers from a list of component identifiers.
+
+
+
+### reset_hmac/1 ###
+
+`reset_hmac(RawMsg) -> any()`
+
+Ensure that the commitments and hmac are properly encoded
+
+
+
+### sf_encode/1 * ###
+
+`sf_encode(StructuredField) -> any()`
+
+Attempt to encode the data structure into an HTTP Structured Field.
+This is the inverse of sf_parse.
+
+
+
+### sf_encode/2 * ###
+
+`sf_encode(Serializer, StructuredField) -> any()`
+
+
+
+### sf_item/1 * ###
+
+`sf_item(SfItem) -> any()`
+
+Attempt to parse the provided value into an HTTP Structured Field Item
+
+
+
+### sf_parse/1 * ###
+
+`sf_parse(Raw) -> any()`
+
+Attempt to parse the binary into a data structure that represents
+an HTTP Structured Field.
+
+Lacking some sort of "hint", there isn't a way to know which "kind" of
+Structured Field the binary is, apriori. So we simply try each parser,
+and return the first invocation that doesn't result in an error.
+
+If no parser is successful, then we return an error tuple
+
+
+
+### sf_parse/2 * ###
+
+`sf_parse(Rest, Raw) -> any()`
+
+
+
+### sf_signature_param/1 * ###
+
+`sf_signature_param(X1) -> any()`
+
+construct the structured field Parameter for the signature parameter,
+checking whether the parameter name is valid according RFC-9421
+
+See https://datatracker.ietf.org/doc/html/rfc9421#section-2.3-3
+
+
+
+### sf_signature_params/2 * ###
+
+`sf_signature_params(ComponentIdentifiers, SigParams) -> any()`
+
+construct the structured field List for the
+"signature-params-line" part of the signature base.
+
+Can be parsed into a binary by simply passing to hb_structured_fields:list/1
+
+See https://datatracker.ietf.org/doc/html/rfc9421#section-2.5-7.3.2.4
+
+
+
+### sig_name_from_dict/1 * ###
+
+`sig_name_from_dict(DictBin) -> any()`
+
+
+
+### sign_auth/3 * ###
+
+
+
+
+using the provided Authority and Request/Response Messages Context,
+create a Name, Signature and SignatureInput that can be used to additional
+signatures to a corresponding HTTP Message
+
+
+
+### signature_base/3 * ###
+
+`signature_base(Authority, Req, Res) -> any()`
+
+create the signature base that will be signed in order to create the
+Signature and SignatureInput.
+
+This implements a portion of RFC-9421 see:
+https://datatracker.ietf.org/doc/html/rfc9421#name-creating-the-signature-base
+
+
+
+### signature_components_line/3 * ###
+
+`signature_components_line(ComponentIdentifiers, Req, Res) -> any()`
+
+Given a list of Component Identifiers and a Request/Response Message
+context, create the "signature-base-line" portion of the signature base
+
+
+
+### signature_params_line/2 * ###
+
+`signature_params_line(ComponentIdentifiers, SigParams) -> any()`
+
+construct the "signature-params-line" part of the signature base.
+
+See https://datatracker.ietf.org/doc/html/rfc9421#section-2.5-7.3.2.4
+
+
+
+### signature_params_line_test/0 * ###
+
+`signature_params_line_test() -> any()`
+
+
+
+### to/1 ###
+
+`to(Msg) -> any()`
+
+
+
+### trim_and_normalize/1 * ###
+
+`trim_and_normalize(Bin) -> any()`
+
+
+
+### trim_ws/1 * ###
+
+`trim_ws(Bin) -> any()`
+
+Recursively trim space characters from the beginning of the binary
+
+
+
+### trim_ws_end/2 * ###
+
+`trim_ws_end(Value, N) -> any()`
+
+
+
+### trim_ws_test/0 * ###
+
+`trim_ws_test() -> any()`
+
+
+
+### upper_bin/1 * ###
+
+`upper_bin(Item) -> any()`
+
+
+
+### validate_large_message_from_http_test/0 * ###
+
+`validate_large_message_from_http_test() -> any()`
+
+Ensure that we can validate a signature on an extremely large and complex
+message that is sent over HTTP, signed with the codec.
+
+
+
+### verify/3 ###
+
+`verify(MsgToVerify, Req, Opts) -> any()`
+
+Verify different forms of httpsig committed messages. `dev_message:verify`
+already places the keys from the commitment message into the root of the
+message.
+
+
+
+### verify_auth/2 * ###
+
+`verify_auth(Verifier, Msg) -> any()`
+
+same verify/3, but with an empty Request Message Context
+
+
+
+### verify_auth/3 * ###
+
+`verify_auth(X1, Req, Res) -> any()`
+
+Given the signature name, and the Request/Response Message Context
+verify the named signature by constructing the signature base and comparing
+
+
+--- END OF FILE: docs/resources/source-code/dev_codec_httpsig.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_codec_json.md ---
+# [Module dev_codec_json.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_json.erl)
+
+
+
+
+A simple JSON codec for HyperBEAM's message format.
+
+
+
+## Description ##
+Takes a
+message as TABM and returns an encoded JSON string representation.
+This codec utilizes the httpsig@1.0 codec for signing and verifying.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### commit/3 ###
+
+`commit(Msg, Req, Opts) -> any()`
+
+
+
+### committed/1 ###
+
+`committed(Msg) -> any()`
+
+
+
+### content_type/1 ###
+
+`content_type(X1) -> any()`
+
+Return the content type for the codec.
+
+
+
+### deserialize/3 ###
+
+`deserialize(Base, Req, Opts) -> any()`
+
+Deserialize the JSON string found at the given path.
+
+
+
+### from/1 ###
+
+`from(Map) -> any()`
+
+Decode a JSON string to a message.
+
+
+
+### serialize/3 ###
+
+`serialize(Base, Msg, Opts) -> any()`
+
+Serialize a message to a JSON string.
+
+
+
+### to/1 ###
+
+`to(Msg) -> any()`
+
+Encode a message to a JSON string.
+
+
+
+### verify/3 ###
+
+`verify(Msg, Req, Opts) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_codec_json.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_codec_structured.md ---
+# [Module dev_codec_structured.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_structured.erl)
+
+
+
+
+A device implementing the codec interface (to/1, from/1) for
+HyperBEAM's internal, richly typed message format.
+
+
+
+## Description ##
+
+This format mirrors HTTP Structured Fields, aside from its limitations of
+compound type depths, as well as limited floating point representations.
+
+As with all AO-Core codecs, its target format (the format it expects to
+receive in the `to/1` function, and give in `from/1`) is TABM.
+
+For more details, see the HTTP Structured Fields (RFC-9651) specification.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### execute/2 ###
+
+`execute(CarrierMsg, S) -> any()`
+
+
+
+### push/2 ###
+
+`push(Msg, S) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_cu.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_dedup.md ---
+# [Module dev_dedup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_dedup.erl)
+
+
+
+
+A device that deduplicates messages send to a process.
+
+
+
+## Description ##
+Only runs on the first pass of the `compute` key call if executed
+in a stack. Currently the device stores its list of already seen
+items in memory, but at some point it will likely make sense to
+drop them in the cache.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### dedup_test/0 * ###
+
+`dedup_test() -> any()`
+
+
+
+### dedup_with_multipass_test/0 * ###
+
+`dedup_with_multipass_test() -> any()`
+
+
+
+### handle/4 * ###
+
+`handle(Key, M1, M2, Opts) -> any()`
+
+Forward the keys function to the message device, handle all others
+with deduplication. We only act on the first pass.
+
+
+
+### info/1 ###
+
+`info(M1) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_dedup.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_delegated_compute.md ---
+# [Module dev_delegated_compute.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_delegated_compute.erl)
+
+
+
+
+Simple wrapper module that enables compute on remote machines,
+implementing the JSON-Iface.
+
+
+
+## Description ##
+This can be used either as a standalone, to
+bring trusted results into the local node, or as the `Execution-Device` of
+an AO process.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### compute/3 ###
+
+`compute(Msg1, Msg2, Opts) -> any()`
+
+
+
+### do_compute/3 * ###
+
+`do_compute(ProcID, Msg2, Opts) -> any()`
+
+Execute computation on a remote machine via relay and the JSON-Iface.
+
+
+
+### init/3 ###
+
+`init(Msg1, Msg2, Opts) -> any()`
+
+Initialize or normalize the compute-lite device. For now, we don't
+need to do anything special here.
+
+
+
+### normalize/3 ###
+
+`normalize(Msg1, Msg2, Opts) -> any()`
+
+
+
+### snapshot/3 ###
+
+`snapshot(Msg1, Msg2, Opts) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_delegated_compute.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_faff.md ---
+# [Module dev_faff.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_faff.erl)
+
+
+
+
+A module that implements a 'friends and family' pricing policy.
+
+
+
+## Description ##
+
+It will allow users to process requests only if their addresses are
+in the allow-list for the node.
+
+Fundamentally against the spirit of permissionlessness, but it is useful if
+you are running a node for your own purposes and would not like to allow
+others to make use of it -- even for a fee. It also serves as a useful
+example of how to implement a custom pricing policy, as it implements stubs
+for both the pricing and ledger P4 APIs.
+
+## Function Index ##
+
+
+
Check whether all of the signers of the request are in the allow-list.
+
+
+
+
+## Function Details ##
+
+
+
+### debit/3 ###
+
+`debit(X1, Req, NodeMsg) -> any()`
+
+Debit the user's account if the request is allowed.
+
+
+
+### estimate/3 ###
+
+`estimate(X1, Msg, NodeMsg) -> any()`
+
+Decide whether or not to service a request from a given address.
+
+
+
+### is_admissible/2 * ###
+
+`is_admissible(Msg, NodeMsg) -> any()`
+
+Check whether all of the signers of the request are in the allow-list.
+
+
+--- END OF FILE: docs/resources/source-code/dev_faff.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_genesis_wasm.md ---
+# [Module dev_genesis_wasm.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_genesis_wasm.erl)
+
+
+
+
+A device that mimics an environment suitable for `legacynet` AO
+processes, using HyperBEAM infrastructure.
+
+
+
+## Description ##
+This allows existing `legacynet`
+AO process definitions to be used in HyperBEAM.
+
+## Function Index ##
+
+
+
Check if the genesis-wasm server is running by requesting its status
+endpoint.
+
+
+
+
+## Function Details ##
+
+
+
+### collect_events/1 * ###
+
+`collect_events(Port) -> any()`
+
+Collect events from the port and log them.
+
+
+
+### collect_events/2 * ###
+
+`collect_events(Port, Acc) -> any()`
+
+
+
+### compute/3 ###
+
+`compute(Msg, Msg2, Opts) -> any()`
+
+All the `delegated-compute@1.0` device to execute the request. We then apply
+the `patch@1.0` device, applying any state patches that the AO process may have
+requested.
+
+
+
+### ensure_started/1 * ###
+
+`ensure_started(Opts) -> any()`
+
+Ensure the local `genesis-wasm@1.0` is live. If it not, start it.
+
+
+
+### init/3 ###
+
+`init(Msg, Msg2, Opts) -> any()`
+
+Initialize the device.
+
+
+
+### is_genesis_wasm_server_running/1 * ###
+
+`is_genesis_wasm_server_running(Opts) -> any()`
+
+Check if the genesis-wasm server is running, using the cached process ID
+if available.
+
+
+
+### log_server_events/1 * ###
+
+`log_server_events(Bin) -> any()`
+
+Log lines of output from the genesis-wasm server.
+
+
+
+### normalize/3 ###
+
+`normalize(Msg, Msg2, Opts) -> any()`
+
+Normalize the device.
+
+
+
+### snapshot/3 ###
+
+`snapshot(Msg, Msg2, Opts) -> any()`
+
+Snapshot the device.
+
+
+
+### status/1 * ###
+
+`status(Opts) -> any()`
+
+Check if the genesis-wasm server is running by requesting its status
+endpoint.
+
+
+--- END OF FILE: docs/resources/source-code/dev_genesis_wasm.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_green_zone.md ---
+# [Module dev_green_zone.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_green_zone.erl)
+
+
+
+
+The green zone device, which provides secure communication and identity
+management between trusted nodes.
+
+
+
+## Description ##
+It handles node initialization, joining existing green zones, key exchange,
+and node identity cloning. All operations are protected by hardware
+commitment and encryption.
+
+## Function Index ##
+
+
+
+add_trusted_node(NodeAddr::binary(), Report::map(), RequesterPubKey::term(), Opts::map()) -> ok
+
+
+
+`NodeAddr`: The joining node's address `Report`: The commitment report provided by the joining node `RequesterPubKey`: The joining node's public key `Opts`: A map of configuration options
+
+returns: ok
+
+Adds a node to the trusted nodes list with its commitment report.
+
+This function updates the trusted nodes configuration:
+1. Retrieves the current trusted nodes map
+2. Adds the new node with its report and public key
+3. Updates the node configuration with the new trusted nodes list
+
+
+
+### become/3 ###
+
+
+
+
+`Opts`: A map of configuration options
+
+returns: `{ok, Map}` on success with confirmation details, or
+`{error, Binary}` if the node is not part of a green zone or
+identity adoption fails.
+
+Clones the identity of a target node in the green zone.
+
+This function performs the following operations:
+1. Retrieves target node location and ID from the configuration
+2. Verifies that the local node has a valid shared AES key
+3. Requests the target node's encrypted key via its key endpoint
+4. Verifies the response is from the expected peer
+5. Decrypts the target node's private key using the shared AES key
+6. Updates the local node's wallet with the target node's identity
+
+Required configuration in Opts map:
+- green_zone_peer_location: Target node's address
+- green_zone_peer_id: Target node's unique identifier
+- priv_green_zone_aes: The shared AES key for the green zone
+
+
+
+### calculate_node_message/3 * ###
+
+`calculate_node_message(RequiredOpts, Req, List) -> any()`
+
+Generate the node message that should be set prior to joining
+a green zone.
+
+This function takes a required opts message, a request message, and an
+`adopt-config` value. The `adopt-config` value can be a boolean, a list of
+fields that should be included in the node message from the request, or a
+binary string of fields to include, separated by commas.
+
+
+
+### decrypt_zone_key/2 * ###
+
+
+
+
+`EncZoneKey`: The encrypted zone AES key (Base64 encoded or binary) `Opts`: A map of configuration options
+
+returns: {ok, DecryptedKey} on success with the decrypted AES key
+
+Decrypts an AES key using the node's RSA private key.
+
+This function handles decryption of the zone key:
+1. Decodes the encrypted key if it's in Base64 format
+2. Extracts the RSA private key components from the wallet
+3. Creates an RSA private key record
+4. Performs private key decryption on the encrypted key
+
+
+
+### default_zone_required_opts/1 * ###
+
+
+
+
+`Opts`: A map of configuration options from which to derive defaults
+
+returns: A map of required configuration options for the green zone
+
+Provides the default required options for a green zone.
+
+This function defines the baseline security requirements for nodes in a green zone:
+1. Restricts loading of remote devices and only allows trusted signers
+2. Limits to preloaded devices from the initiating machine
+3. Enforces specific store configuration
+4. Prevents route changes from the defaults
+5. Requires matching hooks across all peers
+6. Disables message scheduling to prevent conflicts
+7. Enforces a permanent state to prevent further configuration changes
+
+
+
+### encrypt_payload/2 * ###
+
+
+
+
+`AESKey`: The shared AES key (256-bit binary) `RequesterPubKey`: The node's public RSA key
+
+returns: The encrypted AES key
+
+Encrypts an AES key with a node's RSA public key.
+
+This function securely encrypts the shared key for transmission:
+1. Extracts the RSA public key components
+2. Creates an RSA public key record
+3. Performs public key encryption on the AES key
+
+
+
+### finalize_become/5 * ###
+
+`finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> any()`
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Controls which functions are exposed via the device API.
+
+This function defines the security boundary for the green zone device by
+explicitly listing which functions are available through the API.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+Provides information about the green zone device and its API.
+
+This function returns detailed documentation about the device, including:
+1. A high-level description of the device's purpose
+2. Version information
+3. Available API endpoints with their parameters and descriptions
+
+
+
+### init/3 ###
+
+
+
+
+`Opts`: A map of configuration options
+
+returns: `{ok, Binary}` on success with confirmation message, or
+`{error, Binary}` on failure with error message.
+
+Initialize the green zone for a node.
+
+This function performs the following operations:
+1. Validates the node's history to ensure this is a valid initialization
+2. Retrieves or creates a required configuration for the green zone
+3. Ensures a wallet (keypair) exists or creates a new one
+4. Generates a new 256-bit AES key for secure communication
+5. Updates the node's configuration with these cryptographic identities
+
+Config options in Opts map:
+- green_zone_required_config: (Optional) Custom configuration requirements
+- priv_wallet: (Optional) Existing wallet to use instead of creating a new one
+- priv_green_zone_aes: (Optional) Existing AES key, if already part of a zone
+
+
+
+### join/3 ###
+
+
+
+
+`M1`: The join request message with target peer information `M2`: Additional request details, may include adoption preferences `Opts`: A map of configuration options for join operations
+
+returns: `{ok, Map}` on success with join response details, or
+`{error, Binary}` on failure with error message.
+
+Initiates the join process for a node to enter an existing green zone.
+
+This function performs the following operations depending on the state:
+1. Validates the node's history to ensure proper initialization
+2. Checks for target peer information (location and ID)
+3. If target peer is specified:
+a. Generates a commitment report for the peer
+b. Prepares and sends a POST request to the target peer
+c. Verifies the response and decrypts the returned zone key
+d. Updates local configuration with the shared AES key
+4. If no peer is specified, processes the join request locally
+
+Config options in Opts map:
+- green_zone_peer_location: Target peer's address
+- green_zone_peer_id: Target peer's unique identifier
+- green_zone_adopt_config:
+(Optional) Whether to adopt peer's configuration (default: true)
+
+
+
+### join_peer/5 * ###
+
+
+
+
+`PeerLocation`: The target peer's address `PeerID`: The target peer's unique identifier `M2`: May contain ShouldMount flag to enable encrypted volume mounting
+
+returns: `{ok, Map}` on success with confirmation message, or
+`{error, Map|Binary}` on failure with error details
+
+Processes a join request to a specific peer node.
+
+This function handles the client-side join flow when connecting to a peer:
+1. Verifies the node is not already in a green zone
+2. Optionally adopts configuration from the target peer
+3. Generates a hardware-backed commitment report
+4. Sends a POST request to the peer's join endpoint
+5. Verifies the response signature
+6. Decrypts the returned AES key
+7. Updates local configuration with the shared key
+8. Optionally mounts an encrypted volume using the shared key
+
+
+
+### key/3 ###
+
+
+
+
+`Opts`: A map of configuration options
+
+returns: `{ok, Map}` containing the encrypted key and IV on success, or
+`{error, Binary}` if the node is not part of a green zone
+
+Encrypts and provides the node's private key for secure sharing.
+
+This function performs the following operations:
+1. Retrieves the shared AES key and the node's wallet
+2. Verifies that the node is part of a green zone (has a shared AES key)
+3. Generates a random initialization vector (IV) for encryption
+4. Encrypts the node's private key using AES-256-GCM with the shared key
+5. Returns the encrypted key and IV for secure transmission
+
+Required configuration in Opts map:
+- priv_green_zone_aes: The shared AES key for the green zone
+- priv_wallet: The node's wallet containing the private key to encrypt
+
+
+
+### maybe_set_zone_opts/4 * ###
+
+
+
+
+`PeerLocation`: The location of the peer node to join `PeerID`: The ID of the peer node to join `Req`: The request message with adoption preferences `InitOpts`: A map of initial configuration options
+
+returns: `{ok, Map}` with updated configuration on success, or
+`{error, Binary}` if configuration retrieval fails
+
+Adopts configuration from a peer when joining a green zone.
+
+This function handles the conditional adoption of peer configuration:
+1. Checks if adoption is enabled (default: true)
+2. Requests required configuration from the peer
+3. Verifies the authenticity of the configuration
+4. Creates a node message with appropriate settings
+5. Updates the local node configuration
+
+Config options:
+- green_zone_adopt_config: Controls configuration adoption (boolean, list, or binary)
+
+
+
+### rsa_wallet_integration_test/0 * ###
+
+`rsa_wallet_integration_test() -> any()`
+
+Test RSA operations with the existing wallet structure.
+
+This test function verifies that encryption and decryption using the RSA keys
+from the wallet work correctly. It creates a new wallet, encrypts a test
+message with the RSA public key, and then decrypts it with the RSA private
+key, asserting that the decrypted message matches the original.
+
+
+
+### try_mount_encrypted_volume/2 * ###
+
+`try_mount_encrypted_volume(AESKey, Opts) -> any()`
+
+Attempts to mount an encrypted volume using the green zone AES key.
+
+This function handles the complete process of secure storage setup by
+delegating to the dev_volume module, which provides a unified interface
+for volume management.
+
+The encryption key used for the volume is the same AES key used for green zone
+communication, ensuring that only nodes in the green zone can access the data.
+
+
+
+### validate_join/3 * ###
+
+
+
+
+`M1`: Ignored parameter `Req`: The join request containing commitment report and public key `Opts`: A map of configuration options
+
+returns: `{ok, Map}` on success with encrypted AES key, or
+`{error, Binary}` on failure with error message
+
+Validates an incoming join request from another node.
+
+This function handles the server-side join flow when receiving a connection
+request:
+1. Validates the peer's configuration meets required standards
+2. Extracts the commitment report and public key from the request
+3. Verifies the hardware-backed commitment report
+4. Adds the joining node to the trusted nodes list
+5. Encrypts the shared AES key with the peer's public key
+6. Returns the encrypted key to the requesting node
+
+
+
+### validate_peer_opts/2 * ###
+
+
+
+
+`Req`: The request message containing the peer's configuration `Opts`: A map of the local node's configuration options
+
+returns: true if the peer's configuration is valid, false otherwise
+
+Validates that a peer's configuration matches required options.
+
+This function ensures the peer node meets configuration requirements:
+1. Retrieves the local node's required configuration
+2. Gets the peer's options from its message
+3. Adds required configuration to peer's required options list
+4. Verifies the peer's node history is valid
+5. Checks that the peer's options match the required configuration
+
+
+--- END OF FILE: docs/resources/source-code/dev_green_zone.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_hook.md ---
+# [Module dev_hook.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hook.erl)
+
+
+
+
+A generalized interface for `hooking` into HyperBEAM nodes.
+
+
+
+## Description ##
+
+This module allows users to define `hooks` that are executed at various
+points in the lifecycle of nodes and message evaluations.
+
+Hooks are maintained in the `node message` options, under the key `on`
+key. Each `hook` may have zero or many `handlers` which their request is
+executed against. A new `handler` of a hook can be registered by simply
+adding a new key to that message. If multiple hooks need to be executed for
+a single event, the key's value can be set to a list of hooks.
+
+`hook`s themselves do not need to be added explicitly. Any device can add
+a hook by simply executing `dev_hook:on(HookName, Req, Opts)`. This
+function is does not affect the hashpath of a message and is not exported on
+the device`s API, such that it is not possible to call it directly with
+AO-Core resolution.
+
+All handlers are expressed in the form of a message, upon which the hook's
+request is evaluated:
+
+AO(HookMsg, Req, Opts) => {Status, Result}
+
+The `Status` and `Result` of the evaluation can be used at the `hook` caller's
+discretion. If multiple handlers are to be executed for a single `hook`, the
+result of each is used as the input to the next, on the assumption that the
+status of the previous is `ok`. If a non-`ok` status is encountered, the
+evaluation is halted and the result is returned to the caller. This means
+that in most cases, hooks take the form of chainable pipelines of functions,
+passing the most pertinent data in the `body` key of both the request and
+result. Hook definitions can also set the `hook/result` key to `ignore`, if
+the result of the execution should be discarded and the prior value (the
+input to the hook) should be used instead. The `hook/commit-request` key can
+also be set to `true` if the request should be committed by the node before
+execution of the hook.
+
+The default HyperBEAM node implements several useful hooks. They include:
+
+start: Executed when the node starts.
+Req/body: The node's initial configuration.
+Result/body: The node's possibly updated configuration.
+request: Executed when a request is received via the HTTP API.
+Req/body: The sequence of messages that the node will evaluate.
+Req/request: The raw, unparsed singleton request.
+Result/body: The sequence of messages that the node will evaluate.
+step: Executed after each message in a sequence has been evaluated.
+Req/body: The result of the evaluation.
+Result/body: The result of the evaluation.
+response: Executed when a response is sent via the HTTP API.
+Req/body: The result of the evaluation.
+Req/request: The raw, unparsed singleton request that was used to
+generate the response.
+Result/body: The message to be sent in response to the request.
+
+Additionally, this module implements a traditional device API, allowing the
+node operator to register hooks to the node and find those that are
+currently active.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### execute_handler/4 * ###
+
+`execute_handler(HookName, Handler, Req, Opts) -> any()`
+
+Execute a single handler
+Handlers are expressed as messages that can be resolved via AO.
+
+
+
+### execute_handlers/4 * ###
+
+`execute_handlers(HookName, Rest, Req, Opts) -> any()`
+
+Execute a list of handlers in sequence.
+The result of each handler is used as input to the next handler.
+If a handler returns a non-ok status, execution is halted.
+
+
+
+### find/2 ###
+
+`find(HookName, Opts) -> any()`
+
+Get all handlers for a specific hook from the node message options.
+Handlers are stored in the `on` key of this message. The `find/2` variant of
+this function only takes a hook name and node message, and is not called
+directly via the device API. Instead it is used by `on/3` and other internal
+functionality to find handlers when necessary. The `find/3` variant can,
+however, be called directly via the device API.
+
+
+
+### find/3 ###
+
+`find(Base, Req, Opts) -> any()`
+
+
+
+### halt_on_error_test/0 * ###
+
+`halt_on_error_test() -> any()`
+
+Test that pipeline execution halts on error
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Device API information
+
+
+
+### multiple_handlers_test/0 * ###
+
+`multiple_handlers_test() -> any()`
+
+Test that multiple handlers form a pipeline
+
+
+
+### no_handlers_test/0 * ###
+
+`no_handlers_test() -> any()`
+
+Test that hooks with no handlers return the original request
+
+
+
+### on/3 ###
+
+`on(HookName, Req, Opts) -> any()`
+
+Execute a named hook with the provided request and options
+This function finds all handlers for the hook and evaluates them in sequence.
+The result of each handler is used as input to the next handler.
+
+
+
+### single_handler_test/0 * ###
+
+`single_handler_test() -> any()`
+
+Test that a single handler is executed correctly
+
+
+--- END OF FILE: docs/resources/source-code/dev_hook.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_hyperbuddy.md ---
+# [Module dev_hyperbuddy.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hyperbuddy.erl)
+
+
+
+
+A device that renders a REPL-like interface for AO-Core via HTML.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### format/3 ###
+
+`format(Base, X2, X3) -> any()`
+
+Employ HyperBEAM's internal pretty printer to format a message.
+
+
+
+### info/0 ###
+
+`info() -> any()`
+
+Export an explicit list of files via http.
+
+
+
+### metrics/3 ###
+
+`metrics(X1, Req, Opts) -> any()`
+
+The main HTML page for the REPL device.
+
+
+
+### return_file/1 * ###
+
+`return_file(Name) -> any()`
+
+Read a file from disk and serve it as a static HTML page.
+
+
+
+### serve/4 * ###
+
+`serve(Key, M1, M2, Opts) -> any()`
+
+Serve a file from the priv directory. Only serves files that are explicitly
+listed in the `routes` field of the `info/0` return value.
+
+
+--- END OF FILE: docs/resources/source-code/dev_hyperbuddy.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_json_iface.md ---
+# [Module dev_json_iface.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_json_iface.erl)
+
+
+
+
+A device that provides a way for WASM execution to interact with
+the HyperBEAM (and AO) systems, using JSON as a shared data representation.
+
+
+
+## Description ##
+
+The interface is easy to use. It works as follows:
+
+1. The device is given a message that contains a process definition, WASM
+environment, and a message that contains the data to be processed,
+including the image to be used in part of `execute{pass=1}`.
+2. The device is called with `execute{pass=2}`, which reads the result of
+the process execution from the WASM environment and adds it to the
+message.
+
+The device has the following requirements and interface:
+
+```
+
+ M1/Computed when /Pass == 1 ->
+ Assumes:
+ M1/priv/wasm/instance
+ M1/Process
+ M2/Message
+ M2/Assignment/Block-Height
+ Generates:
+ /wasm/handler
+ /wasm/params
+ Side-effects:
+ Writes the process and message as JSON representations into the
+ WASM environment.
+ M1/Computed when M2/Pass == 2 ->
+ Assumes:
+ M1/priv/wasm/instance
+ M2/Results
+ M2/Process
+ Generates:
+ /Results/Outbox
+ /Results/Data
+```
+
+
+## Function Index ##
+
+
+
After the process returns messages from an evaluation, the
+signing node needs to add some tags to each message and spawn such that
+the target process knows these messages are created by a process.
Read the computed results out of the WASM environment, assuming that
+the environment has been set up by prep_call/3 and that the WASM executor
+has been called with computed{pass=1}.
+
+
+
+
+## Function Details ##
+
+
+
+### aos_stack_benchmark_test_/0 * ###
+
+`aos_stack_benchmark_test_() -> any()`
+
+
+
+### basic_aos_call_test_/0 * ###
+
+`basic_aos_call_test_() -> any()`
+
+
+
+### compute/3 ###
+
+`compute(M1, M2, Opts) -> any()`
+
+On first pass prepare the call, on second pass get the results.
+
+
+
+### denormalize_message/1 * ###
+
+`denormalize_message(Message) -> any()`
+
+Normalize a message for AOS-compatibility.
+
+
+
+### env_read/3 * ###
+
+`env_read(M1, M2, Opts) -> any()`
+
+Read the results out of the execution environment.
+
+
+
+### env_write/5 * ###
+
+`env_write(ProcessStr, MsgStr, Base, Req, Opts) -> any()`
+
+Write the message and process into the execution environment.
+
+
+
+### generate_aos_msg/2 ###
+
+`generate_aos_msg(ProcID, Code) -> any()`
+
+
+
+### generate_stack/1 ###
+
+`generate_stack(File) -> any()`
+
+
+
+### generate_stack/2 ###
+
+`generate_stack(File, Mode) -> any()`
+
+
+
+### header_case_string/1 * ###
+
+`header_case_string(Key) -> any()`
+
+
+
+### init/3 ###
+
+`init(M1, M2, Opts) -> any()`
+
+Initialize the device.
+
+
+
+### json_to_message/2 ###
+
+`json_to_message(JSON, Opts) -> any()`
+
+Translates a compute result -- either from a WASM execution using the
+JSON-Iface, or from a `Legacy` CU -- and transforms it into a result message.
+
+
+
+### maybe_list_to_binary/1 * ###
+
+`maybe_list_to_binary(List) -> any()`
+
+
+
+### message_to_json_struct/1 ###
+
+`message_to_json_struct(RawMsg) -> any()`
+
+
+
+### message_to_json_struct/2 * ###
+
+`message_to_json_struct(RawMsg, Features) -> any()`
+
+
+
+### normalize_results/1 * ###
+
+`normalize_results(Msg) -> any()`
+
+Normalize the results of an evaluation.
+
+
+
+### postprocess_outbox/3 * ###
+
+`postprocess_outbox(Msg, Proc, Opts) -> any()`
+
+Post-process messages in the outbox to add the correct `from-process`
+and `from-image` tags.
+
+
+
+### prep_call/3 * ###
+
+`prep_call(M1, M2, Opts) -> any()`
+
+Prepare the WASM environment for execution by writing the process string and
+the message as JSON representations into the WASM environment.
+
+
+
+### prepare_header_case_tags/1 * ###
+
+`prepare_header_case_tags(TABM) -> any()`
+
+Convert a message without an `original-tags` field into a list of
+key-value pairs, with the keys in HTTP header-case.
+
+
+
+### prepare_tags/1 * ###
+
+`prepare_tags(Msg) -> any()`
+
+Prepare the tags of a message as a key-value list, for use in the
+construction of the JSON-Struct message.
+
+
+
+### preprocess_results/2 * ###
+
+`preprocess_results(Msg, Opts) -> any()`
+
+After the process returns messages from an evaluation, the
+signing node needs to add some tags to each message and spawn such that
+the target process knows these messages are created by a process.
+
+
+
+### results/3 * ###
+
+`results(M1, M2, Opts) -> any()`
+
+Read the computed results out of the WASM environment, assuming that
+the environment has been set up by `prep_call/3` and that the WASM executor
+has been called with `computed{pass=1}`.
+
+
+
+### safe_to_id/1 * ###
+
+`safe_to_id(ID) -> any()`
+
+
+
+### tags_to_map/1 * ###
+
+`tags_to_map(Msg) -> any()`
+
+Convert a message with tags into a map of their key-value pairs.
+
+
+
+### test_init/0 * ###
+
+`test_init() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_json_iface.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_local_name.md ---
+# [Module dev_local_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_local_name.erl)
+
+
+
+
+A device for registering and looking up local names.
+
+
+
+## Description ##
+This device uses
+the node message to store a local cache of its known names, and the typical
+non-volatile storage of the node message to store the names long-term.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### default_lookup/4 * ###
+
+`default_lookup(Key, X2, Req, Opts) -> any()`
+
+Handle all other requests by delegating to the lookup function.
+
+
+
+### direct_register/2 ###
+
+`direct_register(Req, Opts) -> any()`
+
+Register a name without checking if the caller is an operator. Exported
+for use by other devices, but not publicly available.
+
+
+
+### find_names/1 * ###
+
+`find_names(Opts) -> any()`
+
+Returns a message containing all known names.
+
+
+
+### generate_test_opts/0 * ###
+
+`generate_test_opts() -> any()`
+
+
+
+### http_test/0 * ###
+
+`http_test() -> any()`
+
+
+
+### info/1 ###
+
+`info(Opts) -> any()`
+
+Export only the `lookup` and `register` functions.
+
+
+
+### load_names/1 * ###
+
+`load_names(Opts) -> any()`
+
+Loads all known names from the cache and returns the new `node message`
+with those names loaded into it.
+
+
+
+### lookup/3 ###
+
+`lookup(X1, Req, Opts) -> any()`
+
+Takes a `key` argument and returns the value of the name, if it exists.
+
+
+
+### lookup_opts_name_test/0 * ###
+
+`lookup_opts_name_test() -> any()`
+
+
+
+### no_names_test/0 * ###
+
+`no_names_test() -> any()`
+
+
+
+### register/3 ###
+
+`register(X1, Req, Opts) -> any()`
+
+Takes a `key` and `value` argument and registers the name. The caller
+must be the node operator in order to register a name.
+
+
+
+### register_test/0 * ###
+
+`register_test() -> any()`
+
+
+
+### unauthorized_test/0 * ###
+
+`unauthorized_test() -> any()`
+
+
+
+### update_names/2 * ###
+
+`update_names(LocalNames, Opts) -> any()`
+
+Updates the node message with the new names. Further HTTP requests will
+use this new message, removing the need to look up the names from non-volatile
+storage.
+
+
+--- END OF FILE: docs/resources/source-code/dev_local_name.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_lookup.md ---
+# [Module dev_lookup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lookup.erl)
+
+
+
+
+A device that looks up an ID from a local store and returns it, honoring
+the `accept` key to return the correct format.
+
+
+
+## Function Index ##
+
+
+
Fetch a resource from the cache using "target" ID extracted from the message.
+
+
+
+
+## Function Details ##
+
+
+
+### aos2_message_lookup_test/0 * ###
+
+`aos2_message_lookup_test() -> any()`
+
+
+
+### binary_lookup_test/0 * ###
+
+`binary_lookup_test() -> any()`
+
+
+
+### http_lookup_test/0 * ###
+
+`http_lookup_test() -> any()`
+
+
+
+### message_lookup_test/0 * ###
+
+`message_lookup_test() -> any()`
+
+
+
+### read/3 ###
+
+`read(M1, M2, Opts) -> any()`
+
+Fetch a resource from the cache using "target" ID extracted from the message
+
+
+--- END OF FILE: docs/resources/source-code/dev_lookup.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_lua_lib.md ---
+# [Module dev_lua_lib.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua_lib.erl)
+
+
+
+
+A module for providing AO library functions to the Lua environment.
+
+
+
+## Description ##
+
+This module contains the implementation of the functions, each by the name
+that should be used in the `ao` table in the Lua environment. Every export
+is imported into the Lua environment.
+
+Each function adheres closely to the Luerl calling convention, adding the
+appropriate node message as a third argument:
+
+fun(Args, State, NodeMsg) -> {ResultTerms, NewState}
+
+As Lua allows for multiple return values, each function returns a list of
+terms to grant to the caller. Matching the tuple convention used by AO-Core,
+the first term is typically the status, and the second term is the result.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### exec_test/2 * ###
+
+`exec_test(State, Function) -> any()`
+
+Generate an EUnit test for a given function.
+
+
+
+### exec_test_/0 * ###
+
+`exec_test_() -> any()`
+
+Main entrypoint for Lua tests.
+
+
+
+### new_state/1 * ###
+
+`new_state(File) -> any()`
+
+Create a new Lua environment for a given script.
+
+
+
+### parse_spec/1 ###
+
+`parse_spec(Str) -> any()`
+
+Parse a string representation of test descriptions received from the
+command line via the `LUA_TESTS` environment variable.
+
+Supported syntax in loose BNF/RegEx:
+
+Definitions := (ModDef,)+
+ModDef := ModName(TestDefs)?
+ModName := ModuleInLUA_SCRIPTS|(FileName[.lua])?
+TestDefs := (:TestDef)+
+TestDef := TestName
+
+File names ending in `.lua` are assumed to be relative paths from the current
+working directory. Module names lacking the `.lua` extension are assumed to
+be modules found in the `LUA_SCRIPTS` environment variable (defaulting to
+`scripts/`).
+
+For example, to run a single test one could call the following:
+
+LUA_TESTS=~/src/LuaScripts/test.yourTest rebar3 lua-tests
+
+To specify that one would like to run all of the tests in the
+`scripts/test.lua` file and two tests from the `scripts/test2.lua` file, the
+user could provide the following test definition:
+
+LUA_TESTS="test,scripts/test2.userTest1|userTest2" rebar3 lua-tests
+
+
+
+### suite/2 * ###
+
+`suite(File, Funcs) -> any()`
+
+Generate an EUnit test suite for a given Lua script. If the `Funcs` is
+the atom `tests` we find all of the global functions in the script, then
+filter for those ending in `_test` in a similar fashion to Eunit.
+
+
+
+### terminates_with/2 * ###
+
+`terminates_with(String, Suffix) -> any()`
+
+Check if a string terminates with a given suffix.
+
+
+--- END OF FILE: docs/resources/source-code/dev_lua_test.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_lua.md ---
+# [Module dev_lua.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua.erl)
+
+
+
+
+A device that calls a Lua module upon a request and returns the result.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### ao_core_resolution_from_lua_test/0 * ###
+
+`ao_core_resolution_from_lua_test() -> any()`
+
+Run an AO-Core resolution from the Lua environment.
+
+
+
+### ao_core_sandbox_test/0 * ###
+
+`ao_core_sandbox_test() -> any()`
+
+Run an AO-Core resolution from the Lua environment.
+
+
+
+### aos_authority_not_trusted_test/0 * ###
+
+`aos_authority_not_trusted_test() -> any()`
+
+
+
+### aos_process_benchmark_test_/0 * ###
+
+`aos_process_benchmark_test_() -> any()`
+
+Benchmark the performance of Lua executions.
+
+
+
+### compute/4 * ###
+
+`compute(Key, RawBase, Req, Opts) -> any()`
+
+Call the Lua script with the given arguments.
+
+
+
+### decode/1 ###
+
+`decode(EncMsg) -> any()`
+
+Decode a Lua result into a HyperBEAM `structured@1.0` message.
+
+
+
+### decode_params/2 * ###
+
+`decode_params(Rest, State) -> any()`
+
+Decode a list of Lua references, as found in a stack trace, into a
+list of Erlang terms.
+
+
+
+### decode_stacktrace/2 * ###
+
+`decode_stacktrace(StackTrace, State0) -> any()`
+
+Parse a Lua stack trace into a list of messages.
+
+
+
+### decode_stacktrace/3 * ###
+
+`decode_stacktrace(Rest, State, Acc) -> any()`
+
+
+
+### direct_benchmark_test/0 * ###
+
+`direct_benchmark_test() -> any()`
+
+Benchmark the performance of Lua executions.
+
+
+
+### encode/1 ###
+
+`encode(Map) -> any()`
+
+Encode a HyperBEAM `structured@1.0` message into a Lua term.
+
+
+
+### ensure_initialized/3 * ###
+
+`ensure_initialized(Base, Req, Opts) -> any()`
+
+Initialize the Lua VM if it is not already initialized. Optionally takes
+the script as a Binary string. If not provided, the module will be loaded
+from the base message.
+
+
+
+### error_response_test/0 * ###
+
+`error_response_test() -> any()`
+
+
+
+### find_modules/2 * ###
+
+`find_modules(Base, Opts) -> any()`
+
+Find the script in the base message, either by ID or by string.
+
+
+
+### functions/3 ###
+
+`functions(Base, Req, Opts) -> any()`
+
+Return a list of all functions in the Lua environment.
+
+
+
+### generate_lua_process/1 * ###
+
+`generate_lua_process(File) -> any()`
+
+Generate a Lua process message.
+
+
+
+### generate_stack/1 * ###
+
+`generate_stack(File) -> any()`
+
+Generate a stack message for the Lua process.
+
+
+
+### generate_test_message/1 * ###
+
+`generate_test_message(Process) -> any()`
+
+Generate a test message for a Lua process.
+
+
+
+### info/1 ###
+
+`info(Base) -> any()`
+
+All keys that are not directly available in the base message are
+resolved by calling the Lua function in the module of the same name.
+Additionally, we exclude the `keys`, `set`, `encode` and `decode` functions
+which are `message@1.0` core functions, and Lua public utility functions.
+
+
+
+### init/3 ###
+
+`init(Base, Req, Opts) -> any()`
+
+Initialize the device state, loading the script into memory if it is
+a reference.
+
+
+
+### initialize/3 * ###
+
+`initialize(Base, Modules, Opts) -> any()`
+
+Initialize a new Lua state with a given base message and module.
+
+
+
+### invoke_aos_test/0 * ###
+
+`invoke_aos_test() -> any()`
+
+
+
+### invoke_non_compute_key_test/0 * ###
+
+`invoke_non_compute_key_test() -> any()`
+
+Call a non-compute key on a Lua device message and ensure that the
+function of the same name in the script is called.
+
+
+
+### load_modules/2 * ###
+
+`load_modules(Modules, Opts) -> any()`
+
+Load a list of modules for installation into the Lua VM.
+
+
+
+### load_modules/3 * ###
+
+`load_modules(Rest, Opts, Acc) -> any()`
+
+
+
+### load_modules_by_id_test/0 * ###
+
+`load_modules_by_id_test() -> any()`
+
+
+
+### lua_http_hook_test/0 * ###
+
+`lua_http_hook_test() -> any()`
+
+Use a Lua module as a hook on the HTTP server via `~meta@1.0`.
+
+
+
+### multiple_modules_test/0 * ###
+
+`multiple_modules_test() -> any()`
+
+
+
+### normalize/3 ###
+
+`normalize(Base, Req, RawOpts) -> any()`
+
+Restore the Lua state from a snapshot, if it exists.
+
+
+
+### process_response/2 * ###
+
+`process_response(X1, Priv) -> any()`
+
+Process a response to a Luerl invocation. Returns the typical AO-Core
+HyperBEAM response format.
+
+
+
+### pure_lua_process_benchmark_test_/0 * ###
+
+`pure_lua_process_benchmark_test_() -> any()`
+
+
+
+### pure_lua_process_test/0 * ###
+
+`pure_lua_process_test() -> any()`
+
+Call a process whose `execution-device` is set to `lua@5.3a`.
+
+
+
+### sandbox/3 * ###
+
+`sandbox(State, Map, Opts) -> any()`
+
+Sandbox (render inoperable) a set of Lua functions. Each function is
+referred to as if it is a path in AO-Core, with its value being what to
+return to the caller. For example, 'os.exit' would be referred to as
+referred to as `os/exit`. If preferred, a list rather than a map may be
+provided, in which case the functions all return `sandboxed`.
+
+
+
+### sandboxed_failure_test/0 * ###
+
+`sandboxed_failure_test() -> any()`
+
+
+
+### simple_invocation_test/0 * ###
+
+`simple_invocation_test() -> any()`
+
+
+
+### snapshot/3 ###
+
+`snapshot(Base, Req, Opts) -> any()`
+
+Snapshot the Lua state from a live computation. Normalizes its `priv`
+state element, then serializes the state to a binary.
+
+
+--- END OF FILE: docs/resources/source-code/dev_lua.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_manifest.md ---
+# [Module dev_manifest.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_manifest.erl)
+
+
+
+
+An Arweave path manifest resolution device.
+
+
+
+## Description ##
+Follows the v1 schema:
+https://specs.ar.io/?tx=lXLd0OPwo-dJLB_Amz5jgIeDhiOkjXuM3-r0H_aiNj0
+
+## Function Index ##
+
+
+
Route a request to the associated data via its manifest.
+
+
+
+
+## Function Details ##
+
+
+
+### info/0 ###
+
+`info() -> any()`
+
+Use the `route/4` function as the handler for all requests, aside
+from `keys` and `set`, which are handled by the default resolver.
+
+
+
+### manifest/3 * ###
+
+`manifest(Base, Req, Opts) -> any()`
+
+Find and deserialize a manifest from the given base.
+
+
+
+### route/4 * ###
+
+`route(Key, M1, M2, Opts) -> any()`
+
+Route a request to the associated data via its manifest.
+
+
+--- END OF FILE: docs/resources/source-code/dev_manifest.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_message.md ---
+# [Module dev_message.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_message.erl)
+
+
+
+
+The identity device: For non-reserved keys, it simply returns a key
+from the message as it is found in the message's underlying Erlang map.
+
+
+
+## Description ##
+Private keys (`priv[.*]`) are not included.
+Reserved keys are: `id`, `commitments`, `committers`, `keys`, `path`,
+`set`, `remove`, `get`, and `verify`. Their function comments describe the
+behaviour of the device when these keys are set.
+
+## Function Index ##
+
+
+
Return a message with only the relevant commitments for a given request.
+
+
+
+
+## Function Details ##
+
+
+
+### calculate_ids/3 * ###
+
+`calculate_ids(Base, Req, NodeOpts) -> any()`
+
+
+
+### cannot_get_private_keys_test/0 * ###
+
+`cannot_get_private_keys_test() -> any()`
+
+
+
+### case_insensitive_get/2 * ###
+
+`case_insensitive_get(Key, Msg) -> any()`
+
+Key matching should be case insensitive, following RFC-9110, so we
+implement a case-insensitive key lookup rather than delegating to
+`maps:get/2`. Encode the key to a binary if it is not already.
+
+
+
+### case_insensitive_get_test/0 * ###
+
+`case_insensitive_get_test() -> any()`
+
+
+
+### commit/3 ###
+
+`commit(Self, Req, Opts) -> any()`
+
+Commit to a message, using the `commitment-device` key to specify the
+device that should be used to commit to the message. If the key is not set,
+the default device (`httpsig@1.0`) is used.
+
+
+
+### commitment_ids_from_committers/2 * ###
+
+`commitment_ids_from_committers(CommitterAddrs, Commitments) -> any()`
+
+Returns a list of commitment IDs in a commitments map that are relevant
+for a list of given committer addresses.
+
+
+
+### commitment_ids_from_request/3 * ###
+
+`commitment_ids_from_request(Base, Req, Opts) -> any()`
+
+Implements a standardized form of specifying commitment IDs for a
+message request. The caller may specify a list of committers (by address)
+or a list of commitment IDs directly. They may specify both, in which case
+the returned list will be the union of the two lists. In each case, they
+may specify `all` or `none` for each group. If no specifiers are provided,
+the default is `all` for commitments -- also implying `all` for committers.
+
+
+
+### committed/3 ###
+
+`committed(Self, Req, Opts) -> any()`
+
+Return the list of committed keys from a message.
+
+
+
+### committers/1 ###
+
+`committers(Base) -> any()`
+
+Return the committers of a message that are present in the given request.
+
+
+
+### committers/2 ###
+
+`committers(Base, Req) -> any()`
+
+
+
+### committers/3 ###
+
+`committers(X1, X2, NodeOpts) -> any()`
+
+
+
+### deep_unset_test/0 * ###
+
+`deep_unset_test() -> any()`
+
+
+
+### exec_for_commitment/5 * ###
+
+`exec_for_commitment(Func, Base, Commitment, Req, Opts) -> any()`
+
+Execute a function for a single commitment in the context of its
+parent message.
+Note: Assumes that the `commitments` key has already been removed from the
+message if applicable.
+
+
+
+### get/2 ###
+
+`get(Key, Msg) -> any()`
+
+Return the value associated with the key as it exists in the message's
+underlying Erlang map. First check the public keys, then check case-
+insensitively if the key is a binary.
+
+
+
+### get/3 ###
+
+`get(Key, Msg, Msg2) -> any()`
+
+
+
+### get_keys_mod_test/0 * ###
+
+`get_keys_mod_test() -> any()`
+
+
+
+### id/1 ###
+
+`id(Base) -> any()`
+
+Return the ID of a message, using the `committers` list if it exists.
+If the `committers` key is `all`, return the ID including all known
+commitments -- `none` yields the ID without any commitments. If the
+`committers` key is a list/map, return the ID including only the specified
+commitments.
+
+The `id-device` key in the message can be used to specify the device that
+should be used to calculate the ID. If it is not set, the default device
+(`httpsig@1.0`) is used.
+
+Note: This function _does not_ use AO-Core's `get/3` function, as it
+as it would require significant computation. We may want to change this
+if/when non-map message structures are created.
+
+
+
+### id/2 ###
+
+`id(Base, Req) -> any()`
+
+
+
+### id/3 ###
+
+`id(Base, Req, NodeOpts) -> any()`
+
+
+
+### id_device/1 * ###
+
+`id_device(X1) -> any()`
+
+Locate the ID device of a message. The ID device is determined the
+`device` set in _all_ of the commitments. If no commitments are present,
+the default device (`httpsig@1.0`) is used.
+
+
+
+### info/0 ###
+
+`info() -> any()`
+
+Return the info for the identity device.
+
+
+
+### is_private_mod_test/0 * ###
+
+`is_private_mod_test() -> any()`
+
+
+
+### key_from_device_test/0 * ###
+
+`key_from_device_test() -> any()`
+
+
+
+### keys/1 ###
+
+`keys(Msg) -> any()`
+
+Get the public keys of a message.
+
+
+
+### keys_from_device_test/0 * ###
+
+`keys_from_device_test() -> any()`
+
+
+
+### private_keys_are_filtered_test/0 * ###
+
+`private_keys_are_filtered_test() -> any()`
+
+
+
+### remove/2 ###
+
+`remove(Message1, X2) -> any()`
+
+Remove a key or keys from a message.
+
+
+
+### remove_test/0 * ###
+
+`remove_test() -> any()`
+
+
+
+### run_test/0 * ###
+
+`run_test() -> any()`
+
+
+
+### set/3 ###
+
+`set(Message1, NewValuesMsg, Opts) -> any()`
+
+Deep merge keys in a message. Takes a map of key-value pairs and sets
+them in the message, overwriting any existing values.
+
+
+
+### set_conflicting_keys_test/0 * ###
+
+`set_conflicting_keys_test() -> any()`
+
+
+
+### set_ignore_undefined_test/0 * ###
+
+`set_ignore_undefined_test() -> any()`
+
+
+
+### set_path/3 ###
+
+`set_path(Message1, X2, Opts) -> any()`
+
+Special case of `set/3` for setting the `path` key. This cannot be set
+using the normal `set` function, as the `path` is a reserved key, necessary
+for AO-Core to know the key to evaluate in requests.
+
+
+
+### unset_with_set_test/0 * ###
+
+`unset_with_set_test() -> any()`
+
+
+
+### verify/3 ###
+
+`verify(Self, Req, Opts) -> any()`
+
+Verify a message. By default, all commitments are verified. The
+`committers` key in the request can be used to specify that only the
+commitments from specific committers should be verified. Similarly, specific
+commitments can be specified using the `commitments` key.
+
+
+
+### verify_test/0 * ###
+
+`verify_test() -> any()`
+
+
+
+### with_relevant_commitments/3 * ###
+
+`with_relevant_commitments(Base, Req, Opts) -> any()`
+
+Return a message with only the relevant commitments for a given request.
+See `commitment_ids_from_request/3` for more information on the request format.
+
+
+--- END OF FILE: docs/resources/source-code/dev_message.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_meta.md ---
+# [Module dev_meta.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_meta.erl)
+
+
+
+
+The hyperbeam meta device, which is the default entry point
+for all messages processed by the machine.
+
+
+
+## Description ##
+This device executes a
+AO-Core singleton request, after first applying the node's
+pre-processor, if set. The pre-processor can halt the request by
+returning an error, or return a modified version if it deems necessary --
+the result of the pre-processor is used as the request for the AO-Core
+resolver. Additionally, a post-processor can be set, which is executed after
+the AO-Core resolver has returned a result.
+
+## Function Index ##
+
+
+
Validate that the request is signed by the operator of the node, then
+allow them to update the node message.
+
+
+
+
+## Function Details ##
+
+
+
+### add_dynamic_keys/1 * ###
+
+`add_dynamic_keys(NodeMsg) -> any()`
+
+Add dynamic keys to the node message.
+
+
+
+### adopt_node_message/2 ###
+
+`adopt_node_message(Request, NodeMsg) -> any()`
+
+Attempt to adopt changes to a node message.
+
+
+
+### authorized_set_node_msg_succeeds_test/0 * ###
+
+`authorized_set_node_msg_succeeds_test() -> any()`
+
+Test that we can set the node message if the request is signed by the
+owner of the node.
+
+
+
+### build/3 ###
+
+`build(X1, X2, NodeMsg) -> any()`
+
+Emits the version number and commit hash of the HyperBEAM node source,
+if available.
+
+We include the short hash separately, as the length of this hash may change in
+the future, depending on the git version/config used to build the node.
+Subsequently, rather than embedding the `git-short-hash-length`, for the
+avoidance of doubt, we include the short hash separately, as well as its long
+hash.
+
+
+
+### buildinfo_test/0 * ###
+
+`buildinfo_test() -> any()`
+
+Test that version information is available and returned correctly.
+
+
+
+### claim_node_test/0 * ###
+
+`claim_node_test() -> any()`
+
+Test that we can claim the node correctly and set the node message after.
+
+
+
+### config_test/0 * ###
+
+`config_test() -> any()`
+
+Test that we can get the node message.
+
+
+
+### embed_status/1 * ###
+
+`embed_status(X1) -> any()`
+
+Wrap the result of a device call in a status.
+
+
+
+### filter_node_msg/1 * ###
+
+`filter_node_msg(Msg) -> any()`
+
+Remove items from the node message that are not encodable into a
+message.
+
+
+
+### halt_request_test/0 * ###
+
+`halt_request_test() -> any()`
+
+Test that we can halt a request if the hook returns an error.
+
+
+
+### handle/2 ###
+
+`handle(NodeMsg, RawRequest) -> any()`
+
+Normalize and route messages downstream based on their path. Messages
+with a `Meta` key are routed to the `handle_meta/2` function, while all
+other messages are routed to the `handle_resolve/2` function.
+
+
+
+### handle_initialize/2 * ###
+
+`handle_initialize(Rest, NodeMsg) -> any()`
+
+
+
+### handle_resolve/3 * ###
+
+`handle_resolve(Req, Msgs, NodeMsg) -> any()`
+
+Handle an AO-Core request, which is a list of messages. We apply
+the node's pre-processor to the request first, and then resolve the request
+using the node's AO-Core implementation if its response was `ok`.
+After execution, we run the node's `response` hook on the result of
+the request before returning the result it grants back to the user.
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Ensure that the helper function `adopt_node_message/2` is not exported.
+The naming of this method carefully avoids a clash with the exported `info/3`
+function. We would like the node information to be easily accessible via the
+`info` endpoint, but AO-Core also uses `info` as the name of the function
+that grants device information. The device call takes two or fewer arguments,
+so we are safe to use the name for both purposes in this case, as the user
+info call will match the three-argument version of the function. If in the
+future the `request` is added as an argument to AO-Core's internal `info`
+function, we will need to find a different approach.
+
+
+
+### info/3 ###
+
+`info(X1, Request, NodeMsg) -> any()`
+
+Get/set the node message. If the request is a `POST`, we check that the
+request is signed by the owner of the node. If not, we return the node message
+as-is, aside all keys that are private (according to `hb_private`).
+
+
+
+### is/2 ###
+
+`is(Request, NodeMsg) -> any()`
+
+Check if the request in question is signed by a given `role` on the node.
+The `role` can be one of `operator` or `initiator`.
+
+
+
+### is/3 ###
+
+`is(X1, Request, NodeMsg) -> any()`
+
+
+
+### maybe_sign/2 * ###
+
+`maybe_sign(Res, NodeMsg) -> any()`
+
+Sign the result of a device call if the node is configured to do so.
+
+
+
+### message_to_status/1 * ###
+
+`message_to_status(Item) -> any()`
+
+Get the HTTP status code from a transaction (if it exists).
+
+
+
+### modify_request_test/0 * ###
+
+`modify_request_test() -> any()`
+
+Test that a hook can modify a request.
+
+
+
+### permanent_node_message_test/0 * ###
+
+`permanent_node_message_test() -> any()`
+
+Test that a permanent node message cannot be changed.
+
+
+
+### priv_inaccessible_test/0 * ###
+
+`priv_inaccessible_test() -> any()`
+
+Test that we can't get the node message if the requested key is private.
+
+
+
+### request_response_hooks_test/0 * ###
+
+`request_response_hooks_test() -> any()`
+
+
+
+### resolve_hook/4 * ###
+
+`resolve_hook(HookName, InitiatingRequest, Body, NodeMsg) -> any()`
+
+Execute a hook from the node message upon the user's request. The
+invocation of the hook provides a request of the following form:
+
+```
+
+ /path => request | response
+ /request => the original request singleton
+ /body => parsed sequence of messages to process | the execution result
+```
+
+
+
+### status_code/1 * ###
+
+`status_code(X1) -> any()`
+
+Calculate the appropriate HTTP status code for an AO-Core result.
+The order of precedence is:
+1. The status code from the message.
+2. The HTTP representation of the status code.
+3. The default status code.
+
+
+
+### unauthorized_set_node_msg_fails_test/0 * ###
+
+`unauthorized_set_node_msg_fails_test() -> any()`
+
+Test that we can't set the node message if the request is not signed by
+the owner of the node.
+
+
+
+### uninitialized_node_test/0 * ###
+
+`uninitialized_node_test() -> any()`
+
+Test that an uninitialized node will not run computation.
+
+
+
+### update_node_message/2 * ###
+
+`update_node_message(Request, NodeMsg) -> any()`
+
+Validate that the request is signed by the operator of the node, then
+allow them to update the node message.
+
+
+--- END OF FILE: docs/resources/source-code/dev_meta.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_monitor.md ---
+# [Module dev_monitor.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_monitor.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### basic_multipass_test/0 * ###
+
+`basic_multipass_test() -> any()`
+
+
+
+### handle/4 * ###
+
+`handle(Key, M1, M2, Opts) -> any()`
+
+Forward the keys function to the message device, handle all others
+with deduplication. We only act on the first pass.
+
+
+
+### info/1 ###
+
+`info(M1) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_multipass.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_name.md ---
+# [Module dev_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_name.erl)
+
+
+
+
+A device for resolving names to their corresponding values, through the
+use of a `resolver` interface.
+
+
+
+## Description ##
+Each `resolver` is a message that can be
+given a `key` and returns an associated value. The device will attempt to
+match the key against each resolver in turn, and return the value of the
+first resolver that matches.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### execute_resolver/3 * ###
+
+`execute_resolver(Key, Resolver, Opts) -> any()`
+
+Execute a resolver with the given key and return its value.
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Configure the `default` key to proxy to the `resolver/4` function.
+Exclude the `keys` and `set` keys from being processed by this device, as
+these are needed to modify the base message itself.
+
+
+
+### load_and_execute_test/0 * ###
+
+`load_and_execute_test() -> any()`
+
+Test that we can resolve messages from a name loaded with the device.
+
+
+
+### match_resolver/3 * ###
+
+`match_resolver(Key, Resolvers, Opts) -> any()`
+
+Find the first resolver that matches the key and return its value.
+
+
+
+### message_lookup_device_resolver/1 * ###
+
+`message_lookup_device_resolver(Msg) -> any()`
+
+
+
+### multiple_resolvers_test/0 * ###
+
+`multiple_resolvers_test() -> any()`
+
+
+
+### no_resolvers_test/0 * ###
+
+`no_resolvers_test() -> any()`
+
+
+
+### resolve/4 * ###
+
+`resolve(Key, X2, Req, Opts) -> any()`
+
+Resolve a name to its corresponding value. The name is given by the key
+called. For example, `GET /~name@1.0/hello&load=false` grants the value of
+`hello`. If the `load` key is set to `true`, the value is treated as a
+pointer and its contents is loaded from the cache. For example,
+`GET /~name@1.0/reference` yields the message at the path specified by the
+`reference` key.
+
+
+
+### single_resolver_test/0 * ###
+
+`single_resolver_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_name.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_node_process.md ---
+# [Module dev_node_process.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_node_process.erl)
+
+
+
+
+A device that implements the singleton pattern for processes specific
+to an individual node.
+
+
+
+## Description ##
+
+This device uses the `local-name@1.0` device to
+register processes with names locally, persistenting them across reboots.
+
+Definitions of singleton processes are expected to be found with their
+names in the `node_processes` section of the node message.
+
+## Function Index ##
+
+
+
Spawn a new process according to the process definition found in the
+node message, and register it with the given name.
+
+
+
+
+## Function Details ##
+
+
+
+### augment_definition/2 * ###
+
+`augment_definition(BaseDef, Opts) -> any()`
+
+Augment the given process definition with the node's address.
+
+
+
+### generate_test_opts/0 * ###
+
+`generate_test_opts() -> any()`
+
+Helper function to generate a test environment and its options.
+
+
+
+### generate_test_opts/1 * ###
+
+`generate_test_opts(Defs) -> any()`
+
+
+
+### info/1 ###
+
+`info(Opts) -> any()`
+
+Register a default handler for the device. Inherits `keys` and `set`
+from the default device.
+
+
+
+### lookup/4 * ###
+
+`lookup(Name, Base, Req, Opts) -> any()`
+
+Lookup a process by name.
+
+
+
+### lookup_execute_test/0 * ###
+
+`lookup_execute_test() -> any()`
+
+Test that a process can be spawned, executed upon, and its result retrieved.
+
+
+
+### lookup_no_spawn_test/0 * ###
+
+`lookup_no_spawn_test() -> any()`
+
+
+
+### lookup_spawn_test/0 * ###
+
+`lookup_spawn_test() -> any()`
+
+
+
+### spawn_register/2 * ###
+
+`spawn_register(Name, Opts) -> any()`
+
+Spawn a new process according to the process definition found in the
+node message, and register it with the given name.
+
+
+--- END OF FILE: docs/resources/source-code/dev_node_process.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_p4.md ---
+# [Module dev_p4.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_p4.erl)
+
+
+
+
+The HyperBEAM core payment ledger.
+
+
+
+## Description ##
+
+This module allows the operator to
+specify another device that can act as a pricing mechanism for transactions
+on the node, as well as orchestrating a payment ledger to calculate whether
+the node should fulfil services for users.
+
+The device requires the following node message settings in order to function:
+
+- `p4_pricing-device`: The device that will estimate the cost of a request.
+- `p4_ledger-device`: The device that will act as a payment ledger.
+
+The pricing device should implement the following keys:
+
+```
+GET /estimate?type=pre|post&body=[...]&request=RequestMessageGET /price?type=pre|post&body=[...]&request=RequestMessage
+```
+
+The `body` key is used to pass either the request or response messages to the
+device. The `type` key is used to specify whether the inquiry is for a request
+(pre) or a response (post) object. Requests carry lists of messages that will
+be executed, while responses carry the results of the execution. The `price`
+key may return `infinity` if the node will not serve a user under any
+circumstances. Else, the value returned by the `price` key will be passed to
+the ledger device as the `amount` key.
+
+A ledger device should implement the following keys:
+
+```
+POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&request=RequestMessageGET /balance?request=RequestMessage
+```
+
+The `type` key is optional and defaults to `pre`. If `type` is set to `post`,
+the debit must be applied to the ledger, whereas the `pre` type is used to
+check whether the debit would succeed before execution.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### balance/3 ###
+
+`balance(X1, Req, NodeMsg) -> any()`
+
+Get the balance of a user in the ledger.
+
+
+
+### faff_test/0 * ###
+
+`faff_test() -> any()`
+
+Simple test of p4's capabilities with the `faff@1.0` device.
+
+
+
+### is_chargable_req/2 * ###
+
+`is_chargable_req(Req, NodeMsg) -> any()`
+
+The node operator may elect to make certain routes non-chargable, using
+the `routes` syntax also used to declare routes in `router@1.0`.
+
+
+
+### lua_pricing_test/0 * ###
+
+`lua_pricing_test() -> any()`
+
+Ensure that Lua modules can be used as pricing and ledger devices. Our
+modules come in two parts:
+- A `process` module which is executed as a persistent `local-process` on the
+node, and which maintains the state of the ledger.
+- A `client` module, which is executed as a `p4@1.0` device, marshalling
+requests to the `process` module.
+
+
+
+### non_chargable_route_test/0 * ###
+
+`non_chargable_route_test() -> any()`
+
+Test that a non-chargable route is not charged for.
+
+
+
+### request/3 ###
+
+`request(State, Raw, NodeMsg) -> any()`
+
+Estimate the cost of a transaction and decide whether to proceed with
+a request. The default behavior if `pricing-device` or `p4_balances` are
+not set is to proceed, so it is important that a user initialize them.
+
+
+
+### response/3 ###
+
+`response(State, RawResponse, NodeMsg) -> any()`
+
+Postprocess the request after it has been fulfilled.
+
+
+
+### test_opts/1 * ###
+
+`test_opts(Opts) -> any()`
+
+
+
+### test_opts/2 * ###
+
+`test_opts(Opts, PricingDev) -> any()`
+
+
+
+### test_opts/3 * ###
+
+`test_opts(Opts, PricingDev, LedgerDev) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_p4.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_patch.md ---
+# [Module dev_patch.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_patch.erl)
+
+
+
+
+A device that can be used to reorganize a message: Moving data from
+one path inside it to another.
+
+
+
+## Description ##
+
+This device's function runs in two modes:
+
+1. When using `all` to move all data at the path given in `from` to the
+path given in `to`.
+2. When using `patches` to move all submessages in the source to the target,
+_if_ they have a `method` key of `PATCH` or a `device` key of `patch@1.0`.
+
+Source and destination paths may be prepended by `base:` or `req:` keys to
+indicate that they are relative to either of the message`s that the
+computation is being performed on.
+
+The search order for finding the source and destination keys is as follows,
+where `X` is either `from` or `to`:
+
+1. The `patch-X` key of the execution message.
+2. The `X` key of the execution message.
+3. The `patch-X` key of the request message.
+4. The `X` key of the request message.
+
+Additionally, this device implements the standard computation device keys,
+allowing it to be used as an element of an execution stack pipeline, etc.
+
+## Function Index ##
+
+
+
Find relevant PATCH messages in the given source key of the execution
+and request messages, and apply them to the given destination key of the
+request.
+
+
+
+
+## Function Details ##
+
+
+
+### all/3 ###
+
+`all(Msg1, Msg2, Opts) -> any()`
+
+Get the value found at the `patch-from` key of the message, or the
+`from` key if the former is not present. Remove it from the message and set
+the new source to the value found.
+
+
+
+### all_mode_test/0 * ###
+
+`all_mode_test() -> any()`
+
+
+
+### compute/3 ###
+
+`compute(Msg1, Msg2, Opts) -> any()`
+
+
+
+### init/3 ###
+
+`init(Msg1, Msg2, Opts) -> any()`
+
+Necessary hooks for compliance with the `execution-device` standard.
+
+
+
+### move/4 * ###
+
+`move(Mode, Msg1, Msg2, Opts) -> any()`
+
+Unified executor for the `all` and `patches` modes.
+
+
+
+### normalize/3 ###
+
+`normalize(Msg1, Msg2, Opts) -> any()`
+
+
+
+### patch_to_submessage_test/0 * ###
+
+`patch_to_submessage_test() -> any()`
+
+
+
+### patches/3 ###
+
+`patches(Msg1, Msg2, Opts) -> any()`
+
+Find relevant `PATCH` messages in the given source key of the execution
+and request messages, and apply them to the given destination key of the
+request.
+
+
+
+### req_prefix_test/0 * ###
+
+`req_prefix_test() -> any()`
+
+
+
+### snapshot/3 ###
+
+`snapshot(Msg1, Msg2, Opts) -> any()`
+
+
+
+### uninitialized_patch_test/0 * ###
+
+`uninitialized_patch_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_patch.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_poda.md ---
+# [Module dev_poda.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_poda.erl)
+
+
+
+
+A simple exemplar decentralized proof of authority consensus algorithm
+for AO processes.
+
+
+
+## Description ##
+
+This device is split into two flows, spanning three
+actions.
+
+Execution flow:
+1. Initialization.
+2. Validation of incoming messages before execution.
+Commitment flow:
+1. Adding commitments to results, either on a CU or MU.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### find_latest_outputs/1 * ###
+
+`find_latest_outputs(Opts) -> any()`
+
+Test for retrieving the latest computed output for a process.
+
+
+
+### first_with_path/4 * ###
+
+`first_with_path(ProcID, RequiredPath, Slots, Opts) -> any()`
+
+Find the latest assignment with the requested path suffix.
+
+
+
+### first_with_path/5 * ###
+
+`first_with_path(ProcID, Required, Rest, Opts, Store) -> any()`
+
+
+
+### latest/2 ###
+
+`latest(ProcID, Opts) -> any()`
+
+Retrieve the latest slot for a given process. Optionally state a limit
+on the slot number to search for, as well as a required path that the slot
+must have.
+
+
+
+### latest/3 ###
+
+`latest(ProcID, RequiredPath, Opts) -> any()`
+
+
+
+### latest/4 ###
+
+`latest(ProcID, RawRequiredPath, Limit, Opts) -> any()`
+
+
+
+### path/3 * ###
+
+`path(ProcID, Ref, Opts) -> any()`
+
+Calculate the path of a result, given a process ID and a slot.
+
+
+
+### path/4 * ###
+
+`path(ProcID, Ref, PathSuffix, Opts) -> any()`
+
+
+
+### process_cache_suite_test_/0 * ###
+
+`process_cache_suite_test_() -> any()`
+
+
+
+### read/2 ###
+
+`read(ProcID, Opts) -> any()`
+
+Read the result of a process at a given slot.
+
+
+
+### read/3 ###
+
+`read(ProcID, SlotRef, Opts) -> any()`
+
+
+
+### test_write_and_read_output/1 * ###
+
+`test_write_and_read_output(Opts) -> any()`
+
+Test for writing multiple computed outputs, then getting them by
+their slot number and by their signed and unsigned IDs.
+
+
+
+### write/4 ###
+
+`write(ProcID, Slot, Msg, Opts) -> any()`
+
+Write a process computation result to the cache.
+
+
+--- END OF FILE: docs/resources/source-code/dev_process_cache.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_process_worker.md ---
+# [Module dev_process_worker.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process_worker.erl)
+
+
+
+
+A long-lived process worker that keeps state in memory between
+calls.
+
+
+
+## Description ##
+Implements the interface of `hb_ao` to receive and respond
+to computation requests regarding a process as a singleton.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### await/5 ###
+
+`await(Worker, GroupName, Msg1, Msg2, Opts) -> any()`
+
+Await a resolution from a worker executing the `process@1.0` device.
+
+
+
+### group/3 ###
+
+`group(Msg1, Msg2, Opts) -> any()`
+
+Returns a group name for a request. The worker is responsible for all
+computation work on the same process on a single node, so we use the
+process ID as the group name.
+
+
+
+### grouper_test/0 * ###
+
+`grouper_test() -> any()`
+
+
+
+### info_test/0 * ###
+
+`info_test() -> any()`
+
+
+
+### notify_compute/4 ###
+
+`notify_compute(GroupName, SlotToNotify, Msg3, Opts) -> any()`
+
+Notify any waiters for a specific slot of the computed results.
+
+
+
+### notify_compute/5 * ###
+
+`notify_compute(GroupName, SlotToNotify, Msg3, Opts, Count) -> any()`
+
+
+
+### process_to_group_name/2 * ###
+
+`process_to_group_name(Msg1, Opts) -> any()`
+
+
+
+### send_notification/4 * ###
+
+`send_notification(Listener, GroupName, SlotToNotify, Msg3) -> any()`
+
+
+
+### server/3 ###
+
+`server(GroupName, Msg1, Opts) -> any()`
+
+Spawn a new worker process. This is called after the end of the first
+execution of `hb_ao:resolve/3`, so the state we are given is the
+already current.
+
+
+
+### stop/1 ###
+
+`stop(Worker) -> any()`
+
+Stop a worker process.
+
+
+
+### test_init/0 * ###
+
+`test_init() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_process_worker.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_process.md ---
+# [Module dev_process.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process.erl)
+
+
+
+
+This module contains the device implementation of AO processes
+in AO-Core.
+
+
+
+## Description ##
+
+The core functionality of the module is in 'routing' requests
+for different functionality (scheduling, computing, and pushing messages)
+to the appropriate device. This is achieved by swapping out the device
+of the process message with the necessary component in order to run the
+execution, then swapping it back before returning. Computation is supported
+as a stack of devices, customizable by the user, while the scheduling
+device is (by default) a single device.
+
+This allows the devices to share state as needed. Additionally, after each
+computation step the device caches the result at a path relative to the
+process definition itself, such that the process message's ID can act as an
+immutable reference to the process's growing list of interactions. See
+`dev_process_cache` for details.
+
+The external API of the device is as follows:
+
+```
+
+ GET /ID/Schedule: Returns the messages in the schedule
+ POST /ID/Schedule: Adds a message to the schedule
+ GET /ID/Compute/[IDorSlotNum]: Returns the state of the process after
+ applying a message
+ GET /ID/Now: Returns the /Results key of the latest
+ computed message
+```
+
+An example process definition will look like this:
+
+```
+
+ Device: Process/1.0
+ Scheduler-Device: Scheduler/1.0
+ Execution-Device: Stack/1.0
+ Execution-Stack: "Scheduler/1.0", "Cron/1.0", "WASM/1.0", "PoDA/1.0"
+ Cron-Frequency: 10-Minutes
+ WASM-Image: WASMImageID
+ PoDA:
+ Device: PoDA/1.0
+ Authority: A
+ Authority: B
+ Authority: C
+ Quorum: 2
+```
+
+Runtime options:
+Cache-Frequency: The number of assignments that will be computed
+before the full (restorable) state should be cached.
+Cache-Keys: A list of the keys that should be cached for all
+assignments, in addition to `/Results`.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### aos_browsable_state_test_/0 * ###
+
+`aos_browsable_state_test_() -> any()`
+
+
+
+### aos_compute_test_/0 * ###
+
+`aos_compute_test_() -> any()`
+
+
+
+### aos_persistent_worker_benchmark_test_/0 * ###
+
+`aos_persistent_worker_benchmark_test_() -> any()`
+
+
+
+### aos_state_access_via_http_test_/0 * ###
+
+`aos_state_access_via_http_test_() -> any()`
+
+
+
+### aos_state_patch_test_/0 * ###
+
+`aos_state_patch_test_() -> any()`
+
+
+
+### as_process/2 ###
+
+`as_process(Msg1, Opts) -> any()`
+
+Change the message to for that has the device set as this module.
+In situations where the key that is `run_as` returns a message with a
+transformed device, this is useful.
+
+
+
+### compute/3 ###
+
+`compute(Msg1, Msg2, Opts) -> any()`
+
+Compute the result of an assignment applied to the process state, if it
+is the next message.
+
+
+
+### compute_slot/5 * ###
+
+`compute_slot(ProcID, State, RawInputMsg, ReqMsg, Opts) -> any()`
+
+Compute a single slot for a process, given an initialized state.
+
+
+
+### compute_to_slot/5 * ###
+
+`compute_to_slot(ProcID, Msg1, Msg2, TargetSlot, Opts) -> any()`
+
+Continually get and apply the next assignment from the scheduler until
+we reach the target slot that the user has requested.
+
+
+
+### default_device/3 * ###
+
+`default_device(Msg1, Key, Opts) -> any()`
+
+Returns the default device for a given piece of functionality. Expects
+the `process/variant` key to be set in the message. The `execution-device`
+_must_ be set in all processes aside those marked with `ao.TN.1` variant.
+This is in order to ensure that post-mainnet processes do not default to
+using infrastructure that should not be present on nodes in the future.
+
+
+
+### default_device_index/1 * ###
+
+`default_device_index(X1) -> any()`
+
+
+
+### dev_test_process/0 ###
+
+`dev_test_process() -> any()`
+
+Generate a device that has a stack of two `dev_test`s for
+execution. This should generate a message state has doubled
+`Already-Seen` elements for each assigned slot.
+
+
+
+### do_test_restore/0 ###
+
+`do_test_restore() -> any()`
+
+
+
+### ensure_loaded/3 * ###
+
+`ensure_loaded(Msg1, Msg2, Opts) -> any()`
+
+Ensure that the process message we have in memory is live and
+up-to-date.
+
+
+
+### ensure_process_key/2 ###
+
+`ensure_process_key(Msg1, Opts) -> any()`
+
+Helper function to store a copy of the `process` key in the message.
+
+
+
+### get_scheduler_slot_test/0 * ###
+
+`get_scheduler_slot_test() -> any()`
+
+
+
+### http_wasm_process_by_id_test/0 * ###
+
+`http_wasm_process_by_id_test() -> any()`
+
+
+
+### info/1 ###
+
+`info(Msg1) -> any()`
+
+When the info key is called, we should return the process exports.
+
+
+
+### init/0 ###
+
+`init() -> any()`
+
+
+
+### init/3 * ###
+
+`init(Msg1, Msg2, Opts) -> any()`
+
+Before computation begins, a boot phase is required. This phase
+allows devices on the execution stack to initialize themselves. We set the
+`Initialized` key to `True` to indicate that the process has been
+initialized.
+
+
+
+### next/3 * ###
+
+`next(Msg1, Msg2, Opts) -> any()`
+
+
+
+### now/3 ###
+
+`now(RawMsg1, Msg2, Opts) -> any()`
+
+Returns the known state of the process at either the current slot, or
+the latest slot in the cache depending on the `process_now_from_cache` option.
+
+
+
+### now_results_test_/0 * ###
+
+`now_results_test_() -> any()`
+
+
+
+### persistent_process_test/0 * ###
+
+`persistent_process_test() -> any()`
+
+
+
+### prior_results_accessible_test_/0 * ###
+
+`prior_results_accessible_test_() -> any()`
+
+
+
+### process_id/3 ###
+
+`process_id(Msg1, Msg2, Opts) -> any()`
+
+Returns the process ID of the current process.
+
+
+
+### push/3 ###
+
+`push(Msg1, Msg2, Opts) -> any()`
+
+Recursively push messages to the scheduler until we find a message
+that does not lead to any further messages being scheduled.
+
+
+
+### recursive_path_resolution_test/0 * ###
+
+`recursive_path_resolution_test() -> any()`
+
+
+
+### restore_test_/0 * ###
+
+`restore_test_() -> any()`
+
+Manually test state restoration without using the cache.
+
+
+
+### run_as/4 * ###
+
+`run_as(Key, Msg1, Msg2, Opts) -> any()`
+
+Run a message against Msg1, with the device being swapped out for
+the device found at `Key`. After execution, the device is swapped back
+to the original device if the device is the same as we left it.
+
+
+
+### schedule/3 ###
+
+`schedule(Msg1, Msg2, Opts) -> any()`
+
+Wraps functions in the Scheduler device.
+
+
+
+### schedule_aos_call/2 ###
+
+`schedule_aos_call(Msg1, Code) -> any()`
+
+
+
+### schedule_aos_call/3 ###
+
+`schedule_aos_call(Msg1, Code, Opts) -> any()`
+
+
+
+### schedule_on_process_test/0 * ###
+
+`schedule_on_process_test() -> any()`
+
+
+
+### schedule_test_message/2 * ###
+
+`schedule_test_message(Msg1, Text) -> any()`
+
+
+
+### schedule_test_message/3 * ###
+
+`schedule_test_message(Msg1, Text, MsgBase) -> any()`
+
+
+
+### schedule_wasm_call/3 * ###
+
+`schedule_wasm_call(Msg1, FuncName, Params) -> any()`
+
+
+
+### schedule_wasm_call/4 * ###
+
+`schedule_wasm_call(Msg1, FuncName, Params, Opts) -> any()`
+
+
+
+### simple_wasm_persistent_worker_benchmark_test/0 * ###
+
+`simple_wasm_persistent_worker_benchmark_test() -> any()`
+
+
+
+### slot/3 ###
+
+`slot(Msg1, Msg2, Opts) -> any()`
+
+
+
+### snapshot/3 ###
+
+`snapshot(RawMsg1, Msg2, Opts) -> any()`
+
+
+
+### store_result/5 * ###
+
+`store_result(ProcID, Slot, Msg3, Msg2, Opts) -> any()`
+
+Store the resulting state in the cache, potentially with the snapshot
+key.
+
+
+
+### test_aos_process/0 ###
+
+`test_aos_process() -> any()`
+
+Generate a process message with a random number, and the
+`dev_wasm` device for execution.
+
+
+
+### test_aos_process/1 ###
+
+`test_aos_process(Opts) -> any()`
+
+
+
+### test_aos_process/2 * ###
+
+`test_aos_process(Opts, Stack) -> any()`
+
+
+
+### test_base_process/0 * ###
+
+`test_base_process() -> any()`
+
+Generate a process message with a random number, and no
+executor.
+
+
+
+### test_base_process/1 * ###
+
+`test_base_process(Opts) -> any()`
+
+
+
+### test_device_compute_test/0 * ###
+
+`test_device_compute_test() -> any()`
+
+
+
+### test_wasm_process/1 ###
+
+`test_wasm_process(WASMImage) -> any()`
+
+
+
+### test_wasm_process/2 * ###
+
+`test_wasm_process(WASMImage, Opts) -> any()`
+
+
+
+### wasm_compute_from_id_test/0 * ###
+
+`wasm_compute_from_id_test() -> any()`
+
+
+
+### wasm_compute_test/0 * ###
+
+`wasm_compute_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_process.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_push.md ---
+# [Module dev_push.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_push.erl)
+
+
+
+
+`push@1.0` takes a message or slot number, evaluates it, and recursively
+pushes the resulting messages to other processes.
+
+
+
+## Description ##
+The `push`ing mechanism
+continues until the there are no remaining messages to push.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### additional_keys/3 * ###
+
+`additional_keys(Origin, ToSched, Opts) -> any()`
+
+Set the necessary keys in order for the recipient to know where the
+message came from.
+
+
+
+### do_push/3 * ###
+
+`do_push(Process, Assignment, Opts) -> any()`
+
+Push a message or slot number, including its downstream results.
+
+
+
+### extract/2 * ###
+
+`extract(X1, Raw) -> any()`
+
+Return either the `target` or the `hint`.
+
+
+
+### find_type/2 * ###
+
+`find_type(Req, Opts) -> any()`
+
+
+
+### full_push_test_/0 * ###
+
+`full_push_test_() -> any()`
+
+
+
+### is_async/3 * ###
+
+`is_async(Process, Req, Opts) -> any()`
+
+Determine if the push is asynchronous.
+
+
+
+### multi_process_push_test_/0 * ###
+
+`multi_process_push_test_() -> any()`
+
+
+
+### normalize_message/2 * ###
+
+`normalize_message(MsgToPush, Opts) -> any()`
+
+Augment the message with from-* keys, if it doesn't already have them.
+
+
+
+### parse_redirect/1 * ###
+
+`parse_redirect(Location) -> any()`
+
+
+
+### ping_pong_script/1 * ###
+
+`ping_pong_script(Limit) -> any()`
+
+
+
+### push/3 ###
+
+`push(Base, Req, Opts) -> any()`
+
+Push either a message or an assigned slot number. If a `Process` is
+provided in the `body` of the request, it will be scheduled (initializing
+it if it does not exist). Otherwise, the message specified by the given
+`slot` key will be pushed.
+
+Optional parameters:
+`/result-depth`: The depth to which the full contents of the result
+will be included in the response. Default: 1, returning
+the full result of the first message, but only the 'tree'
+of downstream messages.
+`/push-mode`: Whether or not the push should be done asynchronously.
+Default: `sync`, pushing synchronously.
+
+
+
+### push_prompts_encoding_change_test/0 * ###
+
+`push_prompts_encoding_change_test() -> any()`
+
+
+
+### push_result_message/4 * ###
+
+`push_result_message(TargetProcess, MsgToPush, Origin, Opts) -> any()`
+
+Push a downstream message result. The `Origin` map contains information
+about the origin of the message: The process that originated the message,
+the slot number from which it was sent, and the outbox key of the message,
+and the depth to which downstream results should be included in the message.
+
+
+
+### push_with_mode/3 * ###
+
+`push_with_mode(Process, Req, Opts) -> any()`
+
+
+
+### push_with_redirect_hint_test_disabled/0 * ###
+
+`push_with_redirect_hint_test_disabled() -> any()`
+
+
+
+### remote_schedule_result/3 * ###
+
+`remote_schedule_result(Location, SignedReq, Opts) -> any()`
+
+
+
+### reply_script/0 * ###
+
+`reply_script() -> any()`
+
+
+
+### schedule_initial_message/3 * ###
+
+`schedule_initial_message(Base, Req, Opts) -> any()`
+
+Push a message or a process, prior to pushing the resulting slot number.
+
+
+
+### schedule_result/4 * ###
+
+`schedule_result(TargetProcess, MsgToPush, Origin, Opts) -> any()`
+
+Add the necessary keys to the message to be scheduled, then schedule it.
+If the remote scheduler does not support the given codec, it will be
+downgraded and re-signed.
+
+
+
+### schedule_result/5 * ###
+
+`schedule_result(TargetProcess, MsgToPush, Codec, Origin, Opts) -> any()`
+
+
+
+### split_target/1 * ###
+
+`split_target(RawTarget) -> any()`
+
+Split the target into the process ID and the optional query string.
+
+
+
+### target_process/2 * ###
+
+`target_process(MsgToPush, Opts) -> any()`
+
+Find the target process ID for a message to push.
+
+
+--- END OF FILE: docs/resources/source-code/dev_push.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_relay.md ---
+# [Module dev_relay.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_relay.erl)
+
+
+
+
+This module implements the relay device, which is responsible for
+relaying messages between nodes and other HTTP(S) endpoints.
+
+
+
+## Description ##
+
+It can be called in either `call` or `cast` mode. In `call` mode, it
+returns a `{ok, Result}` tuple, where `Result` is the response from the
+remote peer to the message sent. In `cast` mode, the invocation returns
+immediately, and the message is relayed asynchronously. No response is given
+and the device returns `{ok, <<"OK">>}`.
+
+Example usage:
+
+```
+
+ curl /~relay@.1.0/call?method=GET?0.path=https://www.arweave.net/
+```
+
+
+## Function Index ##
+
+
+
Test that the preprocess/3 function re-routes a request to remote
+peers, according to the node's routing table.
+
+
+
+
+## Function Details ##
+
+
+
+### call/3 ###
+
+`call(M1, RawM2, Opts) -> any()`
+
+Execute a `call` request using a node's routes.
+
+Supports the following options:
+- `target`: The target message to relay. Defaults to the original message.
+- `relay-path`: The path to relay the message to. Defaults to the original path.
+- `method`: The method to use for the request. Defaults to the original method.
+- `requires-sign`: Whether the request requires signing before dispatching.
+Defaults to `false`.
+
+
+
+### call_get_test/0 * ###
+
+`call_get_test() -> any()`
+
+
+
+### cast/3 ###
+
+`cast(M1, M2, Opts) -> any()`
+
+Execute a request in the same way as `call/3`, but asynchronously. Always
+returns `<<"OK">>`.
+
+
+
+### request/3 ###
+
+`request(Msg1, Msg2, Opts) -> any()`
+
+Preprocess a request to check if it should be relayed to a different node.
+
+
+
+### request_hook_reroute_to_nearest_test/0 * ###
+
+`request_hook_reroute_to_nearest_test() -> any()`
+
+Test that the `preprocess/3` function re-routes a request to remote
+peers, according to the node's routing table.
+
+
+--- END OF FILE: docs/resources/source-code/dev_relay.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_router.md ---
+# [Module dev_router.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_router.erl)
+
+
+
+
+A device that routes outbound messages from the node to their
+appropriate network recipients via HTTP.
+
+
+
+## Description ##
+
+All messages are initially
+routed to a single process per node, which then load-balances them
+between downstream workers that perform the actual requests.
+
+The routes for the router are defined in the `routes` key of the `Opts`,
+as a precidence-ordered list of maps. The first map that matches the
+message will be used to determine the route.
+
+Multiple nodes can be specified as viable for a single route, with the
+`Choose` key determining how many nodes to choose from the list (defaulting
+to 1). The `Strategy` key determines the load distribution strategy,
+which can be one of `Random`, `By-Base`, or `Nearest`. The route may also
+define additional parallel execution parameters, which are used by the
+`hb_http` module to manage control of requests.
+
+The structure of the routes should be as follows:
+
+```
+
+ Node?: The node to route the message to.
+ Nodes?: A list of nodes to route the message to.
+ Strategy?: The load distribution strategy to use.
+ Choose?: The number of nodes to choose from the list.
+ Template?: A message template to match the message against, either as a
+ map or a path regex.
+```
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### add_route_test/0 * ###
+
+`add_route_test() -> any()`
+
+
+
+### apply_route/2 * ###
+
+`apply_route(Msg, Route) -> any()`
+
+Apply a node map's rules for transforming the path of the message.
+Supports the following keys:
+- `opts`: A map of options to pass to the request.
+- `prefix`: The prefix to add to the path.
+- `suffix`: The suffix to add to the path.
+- `replace`: A regex to replace in the path.
+
+
+
+### apply_routes/3 * ###
+
+`apply_routes(Msg, R, Opts) -> any()`
+
+Generate a `uri` key for each node in a route.
+
+
+
+### binary_to_bignum/1 * ###
+
+`binary_to_bignum(Bin) -> any()`
+
+Cast a human-readable or native-encoded ID to a big integer.
+
+
+
+### by_base_determinism_test/0 * ###
+
+`by_base_determinism_test() -> any()`
+
+Ensure that `By-Base` always chooses the same node for the same
+hashpath.
+
+
+
+### choose/5 * ###
+
+`choose(N, X2, Hashpath, Nodes, Opts) -> any()`
+
+Implements the load distribution strategies if given a cluster.
+
+
+
+### choose_1_test/1 * ###
+
+`choose_1_test(Strategy) -> any()`
+
+
+
+### choose_n_test/1 * ###
+
+`choose_n_test(Strategy) -> any()`
+
+
+
+### device_call_from_singleton_test/0 * ###
+
+`device_call_from_singleton_test() -> any()`
+
+
+
+### dynamic_route_provider_test/0 * ###
+
+`dynamic_route_provider_test() -> any()`
+
+
+
+### dynamic_router_test/0 * ###
+
+`dynamic_router_test() -> any()`
+
+Example of a Lua module being used as the `route_provider` for a
+HyperBEAM node. The module utilized in this example dynamically adjusts the
+likelihood of routing to a given node, depending upon price and performance.
+also include preprocessing support for routing
+
+
+
+### dynamic_routing_by_performance/0 * ###
+
+`dynamic_routing_by_performance() -> any()`
+
+
+
+### dynamic_routing_by_performance_test_/0 * ###
+
+`dynamic_routing_by_performance_test_() -> any()`
+
+Demonstrates routing tables being dynamically created and adjusted
+according to the real-time performance of nodes. This test utilizes the
+`dynamic-router` script to manage routes and recalculate weights based on the
+reported performance.
+
+
+
+### explicit_route_test/0 * ###
+
+`explicit_route_test() -> any()`
+
+
+
+### extract_base/2 * ###
+
+`extract_base(RawPath, Opts) -> any()`
+
+Extract the base message ID from a request message. Produces a single
+binary ID that can be used for routing decisions.
+
+
+
+### field_distance/2 * ###
+
+`field_distance(A, B) -> any()`
+
+Calculate the minimum distance between two numbers
+(either progressing backwards or forwards), assuming a
+256-bit field.
+
+
+
+### find_target_path/2 * ###
+
+`find_target_path(Msg, Opts) -> any()`
+
+Find the target path to route for a request message.
+
+
+
+### generate_hashpaths/1 * ###
+
+`generate_hashpaths(Runs) -> any()`
+
+
+
+### generate_nodes/1 * ###
+
+`generate_nodes(N) -> any()`
+
+
+
+### get_routes_test/0 * ###
+
+`get_routes_test() -> any()`
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Exported function for getting device info, controls which functions are
+exposed via the device API.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+HTTP info response providing information about this device
+
+
+
+### load_routes/1 * ###
+
+`load_routes(Opts) -> any()`
+
+Load the current routes for the node. Allows either explicit routes from
+the node message's `routes` key, or dynamic routes generated by resolving the
+`route_provider` message.
+
+
+
+### local_dynamic_router_test/0 * ###
+
+`local_dynamic_router_test() -> any()`
+
+Example of a Lua module being used as the `route_provider` for a
+HyperBEAM node. The module utilized in this example dynamically adjusts the
+likelihood of routing to a given node, depending upon price and performance.
+
+
+
+### local_process_route_provider_test/0 * ###
+
+`local_process_route_provider_test() -> any()`
+
+
+
+### lowest_distance/1 * ###
+
+`lowest_distance(Nodes) -> any()`
+
+Find the node with the lowest distance to the given hashpath.
+
+
+
+### lowest_distance/2 * ###
+
+`lowest_distance(Nodes, X) -> any()`
+
+
+
+### match/3 ###
+
+`match(Base, Req, Opts) -> any()`
+
+Find the first matching template in a list of known routes. Allows the
+path to be specified by either the explicit `path` (for internal use by this
+module), or `route-path` for use by external devices and users.
+
+
+
+### match_routes/3 * ###
+
+`match_routes(ToMatch, Routes, Opts) -> any()`
+
+
+
+### match_routes/4 * ###
+
+`match_routes(ToMatch, Routes, Keys, Opts) -> any()`
+
+
+
+### preprocess/3 ###
+
+`preprocess(Msg1, Msg2, Opts) -> any()`
+
+Preprocess a request to check if it should be relayed to a different node.
+
+
+
+### register/3 ###
+
+`register(M1, M2, Opts) -> any()`
+
+
+
+### relay_nearest_test/0 * ###
+
+`relay_nearest_test() -> any()`
+
+
+
+### route/2 ###
+
+`route(Msg, Opts) -> any()`
+
+Find the appropriate route for the given message. If we are able to
+resolve to a single host+path, we return that directly. Otherwise, we return
+the matching route (including a list of nodes under `nodes`) from the list of
+routes.
+
+If we have a route that has multiple resolving nodes, check
+the load distribution strategy and choose a node. Supported strategies:
+
+```
+
+ All: Return all nodes (default).
+ Random: Distribute load evenly across all nodes, non-deterministically.
+ By-Base: According to the base message's hashpath.
+ By-Weight: According to the node's weight key.
+ Nearest: According to the distance of the node's wallet address to the
+ base message's hashpath.
+```
+
+`By-Base` will ensure that all traffic for the same hashpath is routed to the
+same node, minimizing work duplication, while `Random` ensures a more even
+distribution of the requests.
+
+Can operate as a `~router@1.0` device, which will ignore the base message,
+routing based on the Opts and request message provided, or as a standalone
+function, taking only the request message and the `Opts` map.
+
+
+
+### route/3 ###
+
+`route(X1, Msg, Opts) -> any()`
+
+
+
+### route_provider_test/0 * ###
+
+`route_provider_test() -> any()`
+
+
+
+### route_regex_matches_test/0 * ###
+
+`route_regex_matches_test() -> any()`
+
+
+
+### route_template_message_matches_test/0 * ###
+
+`route_template_message_matches_test() -> any()`
+
+
+
+### routes/3 ###
+
+`routes(M1, M2, Opts) -> any()`
+
+Device function that returns all known routes.
+
+
+
+### simulate/4 * ###
+
+`simulate(Runs, ChooseN, Nodes, Strategy) -> any()`
+
+
+
+### simulation_distribution/2 * ###
+
+`simulation_distribution(SimRes, Nodes) -> any()`
+
+
+
+### simulation_occurences/2 * ###
+
+`simulation_occurences(SimRes, Nodes) -> any()`
+
+
+
+### strategy_suite_test_/0 * ###
+
+`strategy_suite_test_() -> any()`
+
+
+
+### template_matches/3 * ###
+
+`template_matches(ToMatch, Template, Opts) -> any()`
+
+Check if a message matches a message template or path regex.
+
+
+
+### unique_nodes/1 * ###
+
+`unique_nodes(Simulation) -> any()`
+
+
+
+### unique_test/1 * ###
+
+`unique_test(Strategy) -> any()`
+
+
+
+### weighted_random_strategy_test/0 * ###
+
+`weighted_random_strategy_test() -> any()`
+
+
+
+### within_norms/3 * ###
+
+`within_norms(SimRes, Nodes, TestSize) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_router.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_scheduler_cache.md ---
+# [Module dev_scheduler_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_cache.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
Write the latest known scheduler location for an address.
+
+
+
+
+## Function Details ##
+
+
+
+### latest/2 ###
+
+`latest(ProcID, Opts) -> any()`
+
+Get the latest assignment from the cache.
+
+
+
+### list/2 ###
+
+`list(ProcID, Opts) -> any()`
+
+Get the assignments for a process.
+
+
+
+### read/3 ###
+
+`read(ProcID, Slot, Opts) -> any()`
+
+Get an assignment message from the cache.
+
+
+
+### read_location/2 ###
+
+`read_location(Address, Opts) -> any()`
+
+Read the latest known scheduler location for an address.
+
+
+
+### write/2 ###
+
+`write(Assignment, Opts) -> any()`
+
+Write an assignment message into the cache.
+
+
+
+### write_location/2 ###
+
+`write_location(LocMsg, Opts) -> any()`
+
+Write the latest known scheduler location for an address.
+
+
+--- END OF FILE: docs/resources/source-code/dev_scheduler_cache.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_scheduler_formats.md ---
+# [Module dev_scheduler_formats.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_formats.erl)
+
+
+
+
+This module is used by dev_scheduler in order to produce outputs that
+are compatible with various forms of AO clients.
+
+
+
+## Description ##
+
+It features two main formats:
+
+- `application/json`
+- `application/http`
+
+The `application/json` format is a legacy format that is not recommended for
+new integrations of the AO protocol.
+
+## Function Index ##
+
+
+
For all scheduler format operations, we do not calculate hashpaths,
+perform cache lookups, or await inprogress results.
+
+
+
+
+## Function Details ##
+
+
+
+### aos2_normalize_data/1 * ###
+
+`aos2_normalize_data(JSONStruct) -> any()`
+
+The `hb_gateway_client` module expects all JSON structures to at least
+have a `data` field. This function ensures that.
+
+
+
+### aos2_normalize_types/1 ###
+
+`aos2_normalize_types(Msg) -> any()`
+
+Normalize an AOS2 formatted message to ensure that all field NAMES and
+types are correct. This involves converting field names to integers and
+specific field names to their canonical form.
+NOTE: This will result in a message that is not verifiable! It is, however,
+necessary for gaining compatibility with the AOS2-style scheduling API.
+
+
+
+### aos2_to_assignment/2 ###
+
+`aos2_to_assignment(A, RawOpts) -> any()`
+
+Create and normalize an assignment from an AOS2-style JSON structure.
+NOTE: This method is destructive to the verifiability of the assignment.
+
+
+
+### aos2_to_assignments/3 ###
+
+`aos2_to_assignments(ProcID, Body, RawOpts) -> any()`
+
+Convert an AOS2-style JSON structure to a normalized HyperBEAM
+assignments response.
+
+
+
+### assignment_to_aos2/2 * ###
+
+`assignment_to_aos2(Assignment, RawOpts) -> any()`
+
+Convert an assignment to an AOS2-compatible JSON structure.
+
+
+
+### assignments_to_aos2/4 ###
+
+`assignments_to_aos2(ProcID, Assignments, More, RawOpts) -> any()`
+
+
+
+### assignments_to_bundle/4 ###
+
+`assignments_to_bundle(ProcID, Assignments, More, Opts) -> any()`
+
+Generate a `GET /schedule` response for a process as HTTP-sig bundles.
+
+
+
+### assignments_to_bundle/5 * ###
+
+`assignments_to_bundle(ProcID, Assignments, More, TimeInfo, RawOpts) -> any()`
+
+
+
+### cursor/2 * ###
+
+`cursor(Assignment, RawOpts) -> any()`
+
+Generate a cursor for an assignment. This should be the slot number, at
+least in the case of mainnet `ao.N.1` assignments. In the case of legacynet
+(`ao.TN.1`) assignments, we may want to use the assignment ID.
+
+
+
+### format_opts/1 * ###
+
+`format_opts(Opts) -> any()`
+
+For all scheduler format operations, we do not calculate hashpaths,
+perform cache lookups, or await inprogress results.
+
+
+--- END OF FILE: docs/resources/source-code/dev_scheduler_formats.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_scheduler_registry.md ---
+# [Module dev_scheduler_registry.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_registry.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
Find a process associated with the processor ID in the local registry
+If the process is not found and GenIfNotHosted is true, it attemps to create a new one.
+
+
+
+
+## Function Details ##
+
+
+
+### create_and_find_process_test/0 * ###
+
+`create_and_find_process_test() -> any()`
+
+
+
+### create_multiple_processes_test/0 * ###
+
+`create_multiple_processes_test() -> any()`
+
+
+
+### find/1 ###
+
+`find(ProcID) -> any()`
+
+Find a process associated with the processor ID in the local registry
+If the process is not found, it will not create a new one
+
+
+
+### find/2 ###
+
+`find(ProcID, GenIfNotHosted) -> any()`
+
+Find a process associated with the processor ID in the local registry
+If the process is not found and `GenIfNotHosted` is true, it attemps to create a new one
+
+
+
+### find/3 ###
+
+`find(ProcID, GenIfNotHosted, Opts) -> any()`
+
+Same as `find/2` but with additional options passed when spawning a new process (if needed)
+
+
+
+### find_non_existent_process_test/0 * ###
+
+`find_non_existent_process_test() -> any()`
+
+
+
+### get_all_processes_test/0 * ###
+
+`get_all_processes_test() -> any()`
+
+
+
+### get_processes/0 ###
+
+`get_processes() -> any()`
+
+Return a list of all currently registered ProcID.
+
+
+
+### get_wallet/0 ###
+
+`get_wallet() -> any()`
+
+
+
+### maybe_new_proc/3 * ###
+
+`maybe_new_proc(ProcID, GenIfNotHosted, Opts) -> any()`
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_scheduler_registry.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_scheduler_server.md ---
+# [Module dev_scheduler_server.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_server.erl)
+
+
+
+
+A long-lived server that schedules messages for a process.
+
+
+
+## Description ##
+It acts as a deliberate 'bottleneck' to prevent the server accidentally
+assigning multiple messages to the same slot.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### assign/3 * ###
+
+`assign(State, Message, ReplyPID) -> any()`
+
+Assign a message to the next slot.
+
+
+
+### do_assign/3 * ###
+
+`do_assign(State, Message, ReplyPID) -> any()`
+
+Generate and store the actual assignment message.
+
+
+
+### info/1 ###
+
+`info(ProcID) -> any()`
+
+Get the current slot from the scheduling server.
+
+
+
+### maybe_inform_recipient/5 * ###
+
+`maybe_inform_recipient(Mode, ReplyPID, Message, Assignment, State) -> any()`
+
+
+
+### new_proc_test_/0 * ###
+
+`new_proc_test_() -> any()`
+
+Test the basic functionality of the server.
+
+
+
+### next_hashchain/2 * ###
+
+`next_hashchain(HashChain, Message) -> any()`
+
+Create the next element in a chain of hashes that links this and prior
+assignments.
+
+
+
+### schedule/2 ###
+
+`schedule(AOProcID, Message) -> any()`
+
+Call the appropriate scheduling server to assign a message.
+
+
+
+### server/1 * ###
+
+`server(State) -> any()`
+
+The main loop of the server. Simply waits for messages to assign and
+returns the current slot.
+
+
+
+### start/2 ###
+
+`start(ProcID, Opts) -> any()`
+
+Start a scheduling server for a given computation.
+
+
+
+### stop/1 ###
+
+`stop(ProcID) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/dev_scheduler_server.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_scheduler.md ---
+# [Module dev_scheduler.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler.erl)
+
+
+
+
+A simple scheduler scheme for AO.
+
+
+
+## Description ##
+This device expects a message of the form:
+Process: `#{ id, Scheduler: #{ Authority } }`
+
+```
+
+ It exposes the following keys for scheduling:#{ method: GET, path: <<"/info">> } ->
+ Returns information about the scheduler.#{ method: GET, path: <<"/slot">> } -> slot(Msg1, Msg2, Opts)
+ Returns the current slot for a process.#{ method: GET, path: <<"/schedule">> } -> get_schedule(Msg1, Msg2, Opts)
+ Returns the schedule for a process in a cursor-traversable format.#{ method: POST, path: <<"/schedule">> } -> post_schedule(Msg1, Msg2, Opts)
+ Schedules a new message for a process, or starts a new scheduler
+ for the given message.
+```
+
+
+## Function Index ##
+
+
+
Take a process ID or target with a potential hint and return just the
+process ID.
+
+
+
+
+## Function Details ##
+
+
+
+### benchmark_suite/2 * ###
+
+`benchmark_suite(Port, Base) -> any()`
+
+
+
+### benchmark_suite_test_/0 * ###
+
+`benchmark_suite_test_() -> any()`
+
+
+
+### cache_remote_schedule/2 * ###
+
+`cache_remote_schedule(Schedule, Opts) -> any()`
+
+Cache a schedule received from a remote scheduler.
+
+
+
+### check_lookahead_and_local_cache/4 * ###
+
+`check_lookahead_and_local_cache(Msg1, ProcID, TargetSlot, Opts) -> any()`
+
+Check if we have a result from a lookahead worker or from our local
+cache. If we have a result in the local cache, we may also start a new
+lookahead worker to fetch the next assignments if we have them locally,
+ahead of time. This can be enabled/disabled with the `scheduler_lookahead`
+option.
+
+
+
+### checkpoint/1 ###
+
+`checkpoint(State) -> any()`
+
+Returns the current state of the scheduler.
+
+
+
+### do_get_remote_schedule/6 * ###
+
+`do_get_remote_schedule(ProcID, LocalAssignments, From, To, Redirect, Opts) -> any()`
+
+Get a schedule from a remote scheduler, unless we already have already
+read all of the assignments from the local cache.
+
+
+
+### do_post_schedule/4 * ###
+
+`do_post_schedule(ProcID, PID, Msg2, Opts) -> any()`
+
+Post schedule the message. `Msg2` by this point has been refined to only
+committed keys, and to only include the `target` message that is to be
+scheduled.
+
+
+
+### filter_json_assignments/3 * ###
+
+`filter_json_assignments(JSONRes, To, From) -> any()`
+
+Filter JSON assignment results from a remote legacy scheduler.
+
+
+
+### find_message_to_schedule/3 * ###
+
+`find_message_to_schedule(Msg1, Msg2, Opts) -> any()`
+
+Search the given base and request message pair to find the message to
+schedule. The precidence order for search is as follows:
+1. `Msg2/body`
+2. `Msg2`
+
+
+
+### find_remote_scheduler/3 * ###
+
+`find_remote_scheduler(ProcID, Scheduler, Opts) -> any()`
+
+Use the SchedulerLocation to the remote path and return a redirect.
+
+
+
+### find_server/3 * ###
+
+`find_server(ProcID, Msg1, Opts) -> any()`
+
+Locate the correct scheduling server for a given process.
+
+
+
+### find_server/4 * ###
+
+`find_server(ProcID, Msg1, ToSched, Opts) -> any()`
+
+
+
+### find_target_id/3 * ###
+
+`find_target_id(Msg1, Msg2, Opts) -> any()`
+
+Find the schedule ID from a given request. The precidence order for
+search is as follows:
+[1. `ToSched/id` -- in the case of `POST schedule`, handled locally]
+2. `Msg2/target`
+3. `Msg2/id` when `Msg2` has `type: Process`
+4. `Msg1/process/id`
+5. `Msg1/id` when `Msg1` has `type: Process`
+6. `Msg2/id`
+
+
+
+### generate_local_schedule/5 * ###
+
+`generate_local_schedule(Format, ProcID, From, To, Opts) -> any()`
+
+Generate a `GET /schedule` response for a process.
+
+
+
+### generate_redirect/3 * ###
+
+`generate_redirect(ProcID, SchedulerLocation, Opts) -> any()`
+
+Generate a redirect message to a scheduler.
+
+
+
+### get_hint/2 * ###
+
+`get_hint(Str, Opts) -> any()`
+
+If a hint is present in the string, return it. Else, return not_found.
+
+
+
+### get_local_assignments/4 * ###
+
+`get_local_assignments(ProcID, From, RequestedTo, Opts) -> any()`
+
+Get the assignments for a process, and whether the request was truncated.
+
+
+
+### get_local_schedule_test/0 * ###
+
+`get_local_schedule_test() -> any()`
+
+
+
+### get_location/3 * ###
+
+`get_location(Msg1, Req, Opts) -> any()`
+
+Search for the location of the scheduler in the scheduler-location
+cache. If an address is provided, we search for the location of that
+specific scheduler. Otherwise, we return the location record for the current
+node's scheduler, if it has been established.
+
+
+
+### get_remote_schedule/5 * ###
+
+`get_remote_schedule(RawProcID, From, To, Redirect, Opts) -> any()`
+
+Get a schedule from a remote scheduler, but first read all of the
+assignments from the local cache that we already know about.
+
+
+
+### get_schedule/3 * ###
+
+`get_schedule(Msg1, Msg2, Opts) -> any()`
+
+Generate and return a schedule for a process, optionally between
+two slots -- labelled as `from` and `to`. If the schedule is not local,
+we redirect to the remote scheduler or proxy based on the node opts.
+
+
+
+### http_get_json_schedule_test_/0 * ###
+
+`http_get_json_schedule_test_() -> any()`
+
+
+
+### http_get_legacy_schedule_as_aos2_test_/0 * ###
+
+`http_get_legacy_schedule_as_aos2_test_() -> any()`
+
+
+
+### http_get_legacy_schedule_slot_range_test_/0 * ###
+
+`http_get_legacy_schedule_slot_range_test_() -> any()`
+
+
+
+### http_get_legacy_schedule_test_/0 * ###
+
+`http_get_legacy_schedule_test_() -> any()`
+
+
+
+### http_get_legacy_slot_test_/0 * ###
+
+`http_get_legacy_slot_test_() -> any()`
+
+
+
+### http_get_schedule/4 * ###
+
+`http_get_schedule(N, PMsg, From, To) -> any()`
+
+
+
+### http_get_schedule/5 * ###
+
+`http_get_schedule(N, PMsg, From, To, Format) -> any()`
+
+
+
+### http_get_schedule_redirect_test/0 * ###
+
+`http_get_schedule_redirect_test() -> any()`
+
+
+
+### http_get_schedule_test_/0 * ###
+
+`http_get_schedule_test_() -> any()`
+
+
+
+### http_get_slot/2 * ###
+
+`http_get_slot(N, PMsg) -> any()`
+
+
+
+### http_init/0 * ###
+
+`http_init() -> any()`
+
+
+
+### http_init/1 * ###
+
+`http_init(Opts) -> any()`
+
+
+
+### http_post_legacy_schedule_test_/0 * ###
+
+`http_post_legacy_schedule_test_() -> any()`
+
+
+
+### http_post_schedule_sign/4 * ###
+
+`http_post_schedule_sign(Node, Msg, ProcessMsg, Wallet) -> any()`
+
+
+
+### http_post_schedule_test/0 * ###
+
+`http_post_schedule_test() -> any()`
+
+
+
+### info/0 ###
+
+`info() -> any()`
+
+This device uses a default_handler to route requests to the correct
+function.
+
+
+
+### location/3 ###
+
+`location(Msg1, Msg2, Opts) -> any()`
+
+Router for `record` requests. Expects either a `POST` or `GET` request.
+
+
+
+### many_clients/1 * ###
+
+`many_clients(Opts) -> any()`
+
+
+
+### message_cached_assignments/2 * ###
+
+`message_cached_assignments(Msg, Opts) -> any()`
+
+Non-device exported helper to get the cached assignments held in a
+process.
+
+
+
+### next/3 ###
+
+`next(Msg1, Msg2, Opts) -> any()`
+
+Load the schedule for a process into the cache, then return the next
+assignment. Assumes that Msg1 is a `dev_process` or similar message, having
+a `Current-Slot` key. It stores a local cache of the schedule in the
+`priv/To-Process` key.
+
+
+
+### node_from_redirect/2 * ###
+
+`node_from_redirect(Redirect, Opts) -> any()`
+
+Get the node URL from a redirect.
+
+
+
+### post_legacy_schedule/4 * ###
+
+`post_legacy_schedule(ProcID, OnlyCommitted, Node, Opts) -> any()`
+
+
+
+### post_location/3 * ###
+
+`post_location(Msg1, RawReq, Opts) -> any()`
+
+Generate a new scheduler location record and register it. We both send
+the new scheduler-location to the given registry, and return it to the caller.
+
+
+
+### post_remote_schedule/4 * ###
+
+`post_remote_schedule(RawProcID, Redirect, OnlyCommitted, Opts) -> any()`
+
+
+
+### post_schedule/3 * ###
+
+`post_schedule(Msg1, Msg2, Opts) -> any()`
+
+Schedules a new message on the SU. Searches Msg1 for the appropriate ID,
+then uses the wallet address of the scheduler to determine if the message is
+for this scheduler. If so, it schedules the message and returns the assignment.
+
+
+
+### read_local_assignments/4 * ###
+
+`read_local_assignments(ProcID, From, To, Opts) -> any()`
+
+Get the assignments for a process.
+
+
+
+### redirect_from_graphql_test/0 * ###
+
+`redirect_from_graphql_test() -> any()`
+
+
+
+### redirect_to_hint_test/0 * ###
+
+`redirect_to_hint_test() -> any()`
+
+
+
+### register_location_on_boot_test/0 * ###
+
+`register_location_on_boot_test() -> any()`
+
+Test that a scheduler location is registered on boot.
+
+
+
+### register_new_process_test/0 * ###
+
+`register_new_process_test() -> any()`
+
+
+
+### register_scheduler_test/0 * ###
+
+`register_scheduler_test() -> any()`
+
+
+
+### remote_slot/3 * ###
+
+`remote_slot(ProcID, Redirect, Opts) -> any()`
+
+Get the current slot from a remote scheduler.
+
+
+
+### remote_slot/4 * ###
+
+`remote_slot(X1, ProcID, Node, Opts) -> any()`
+
+Get the current slot from a remote scheduler, based on the variant of
+the process's scheduler.
+
+
+
+### router/4 ###
+
+`router(X1, Msg1, Msg2, Opts) -> any()`
+
+The default handler for the scheduler device.
+
+
+
+### schedule/3 ###
+
+`schedule(Msg1, Msg2, Opts) -> any()`
+
+A router for choosing between getting the existing schedule, or
+scheduling a new message.
+
+
+
+### schedule_message_and_get_slot_test/0 * ###
+
+`schedule_message_and_get_slot_test() -> any()`
+
+
+
+### single_resolution/1 * ###
+
+`single_resolution(Opts) -> any()`
+
+
+
+### slot/3 ###
+
+`slot(M1, M2, Opts) -> any()`
+
+Returns information about the current slot for a process.
+
+
+
+### spawn_lookahead_worker/3 * ###
+
+`spawn_lookahead_worker(ProcID, Slot, Opts) -> any()`
+
+Spawn a new Erlang process to fetch the next assignments from the local
+cache, if we have them available.
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+Helper to ensure that the environment is started.
+
+
+
+### status/3 ###
+
+`status(M1, M2, Opts) -> any()`
+
+Returns information about the entire scheduler.
+
+
+
+### status_test/0 * ###
+
+`status_test() -> any()`
+
+
+
+### test_process/0 ###
+
+`test_process() -> any()`
+
+Generate a _transformed_ process message, not as they are generated
+by users. See `dev_process` for examples of AO process messages.
+
+
+
+### test_process/1 * ###
+
+`test_process(Wallet) -> any()`
+
+
+
+### without_hint/1 * ###
+
+`without_hint(Target) -> any()`
+
+Take a process ID or target with a potential hint and return just the
+process ID.
+
+
+--- END OF FILE: docs/resources/source-code/dev_scheduler.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_simple_pay.md ---
+# [Module dev_simple_pay.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_simple_pay.erl)
+
+
+
+
+A simple device that allows the operator to specify a price for a
+request and then charge the user for it, on a per message basis.
+
+
+
+## Description ##
+The device's ledger is stored in the node message at `simple_pay_ledger`,
+and can be topped-up by either the operator, or an external device. The
+price is specified in the node message at `simple_pay_price`.
+This device acts as both a pricing device and a ledger device, by p4's
+definition.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### balance/3 ###
+
+`balance(X1, RawReq, NodeMsg) -> any()`
+
+Get the balance of a user in the ledger.
+
+
+
+### debit/3 ###
+
+`debit(X1, RawReq, NodeMsg) -> any()`
+
+Preprocess a request by checking the ledger and charging the user. We
+can charge the user at this stage because we know statically what the price
+will be
+
+
+
+### estimate/3 ###
+
+`estimate(X1, EstimateReq, NodeMsg) -> any()`
+
+Estimate the cost of a request by counting the number of messages in
+the request, then multiplying by the per-message price. The operator does
+not pay for their own requests.
+
+
+
+### get_balance/2 * ###
+
+`get_balance(Signer, NodeMsg) -> any()`
+
+Get the balance of a user in the ledger.
+
+
+
+### get_balance_and_top_up_test/0 * ###
+
+`get_balance_and_top_up_test() -> any()`
+
+
+
+### is_operator/2 * ###
+
+`is_operator(Req, NodeMsg) -> any()`
+
+Check if the request is from the operator.
+
+
+
+### set_balance/3 * ###
+
+`set_balance(Signer, Amount, NodeMsg) -> any()`
+
+Adjust a user's balance, normalizing their wallet ID first.
+
+
+
+### test_opts/1 * ###
+
+`test_opts(Ledger) -> any()`
+
+
+
+### topup/3 ###
+
+`topup(X1, Req, NodeMsg) -> any()`
+
+Top up the user's balance in the ledger.
+
+
+--- END OF FILE: docs/resources/source-code/dev_simple_pay.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_snp_nif.md ---
+# [Module dev_snp_nif.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_snp_nif.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
Generate an commitment report and emit it as a message, including all of
+the necessary data to generate the nonce (ephemeral node address + node
+message ID), as well as the expected measurement (firmware, kernel, and VMSAs
+hashes).
Validates if a given message parameter matches a trusted value from the SNP trusted list
+Returns {ok, true} if the message is trusted, {ok, false} otherwise.
Verify an commitment report message; validating the identity of a
+remote node, its ephemeral private address, and the integrity of the report.
+
+
+
+
+## Function Details ##
+
+
+
+### execute_is_trusted/3 * ###
+
+`execute_is_trusted(M1, Msg, NodeOpts) -> any()`
+
+Ensure that all of the software hashes are trusted. The caller may set
+a specific device to use for the `is-trusted` key. The device must then
+implement the `trusted` resolver.
+
+
+
+### generate/3 ###
+
+`generate(M1, M2, Opts) -> any()`
+
+Generate an commitment report and emit it as a message, including all of
+the necessary data to generate the nonce (ephemeral node address + node
+message ID), as well as the expected measurement (firmware, kernel, and VMSAs
+hashes).
+
+
+
+### generate_nonce/2 * ###
+
+`generate_nonce(RawAddress, RawNodeMsgID) -> any()`
+
+Generate the nonce to use in the commitment report.
+
+
+
+### is_debug/1 * ###
+
+`is_debug(Report) -> any()`
+
+Ensure that the node's debug policy is disabled.
+
+
+
+### real_node_test/0 * ###
+
+`real_node_test() -> any()`
+
+
+
+### report_data_matches/3 * ###
+
+`report_data_matches(Address, NodeMsgID, ReportData) -> any()`
+
+Ensure that the report data matches the expected report data.
+
+
+
+### trusted/3 ###
+
+`trusted(Msg1, Msg2, NodeOpts) -> any()`
+
+Validates if a given message parameter matches a trusted value from the SNP trusted list
+Returns {ok, true} if the message is trusted, {ok, false} otherwise
+
+
+
+### verify/3 ###
+
+`verify(M1, M2, NodeOpts) -> any()`
+
+Verify an commitment report message; validating the identity of a
+remote node, its ephemeral private address, and the integrity of the report.
+The checks that must be performed to validate the report are:
+1. Verify the address and the node message ID are the same as the ones
+used to generate the nonce.
+2. Verify the address that signed the message is the same as the one used
+to generate the nonce.
+3. Verify that the debug flag is disabled.
+4. Verify that the firmware, kernel, and OS (VMSAs) hashes, part of the
+measurement, are trusted.
+5. Verify the measurement is valid.
+6. Verify the report's certificate chain to hardware root of trust.
+
+
+--- END OF FILE: docs/resources/source-code/dev_snp.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_stack.md ---
+# [Module dev_stack.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_stack.erl)
+
+
+
+
+A device that contains a stack of other devices, and manages their
+execution.
+
+
+
+## Description ##
+
+It can run in two modes: fold (the default), and map.
+
+In fold mode, it runs upon input messages in the order of their keys. A
+stack maintains and passes forward a state (expressed as a message) as it
+progresses through devices.
+
+For example, a stack of devices as follows:
+
+```
+
+ Device -> Stack
+ Device-Stack/1/Name -> Add-One-Device
+ Device-Stack/2/Name -> Add-Two-Device
+```
+
+When called with the message:
+
+```
+
+ #{ Path = "FuncName", binary => <<"0">> }
+```
+
+Will produce the output:
+
+```
+
+ #{ Path = "FuncName", binary => <<"3">> }
+ {ok, #{ bin => <<"3">> }}
+```
+
+In map mode, the stack will run over all the devices in the stack, and
+combine their results into a single message. Each of the devices'
+output values have a key that is the device's name in the `Device-Stack`
+(its number if the stack is a list).
+
+You can switch between fold and map modes by setting the `Mode` key in the
+`Msg2` to either `Fold` or `Map`, or set it globally for the stack by
+setting the `Mode` key in the `Msg1` message. The key in `Msg2` takes
+precedence over the key in `Msg1`.
+
+The key that is called upon the device stack is the same key that is used
+upon the devices that are contained within it. For example, in the above
+scenario we resolve FuncName on the stack, leading FuncName to be called on
+Add-One-Device and Add-Two-Device.
+
+A device stack responds to special statuses upon responses as follows:
+
+`skip`: Skips the rest of the device stack for the current pass.
+
+`pass`: Causes the stack to increment its pass number and re-execute
+the stack from the first device, maintaining the state
+accumulated so far. Only available in fold mode.
+
+In all cases, the device stack will return the accumulated state to the
+caller as the result of the call to the stack.
+
+The dev_stack adds additional metadata to the message in order to track
+the state of its execution as it progresses through devices. These keys
+are as follows:
+
+`Stack-Pass`: The number of times the stack has reset and re-executed
+from the first device for the current message.
+
+`Input-Prefix`: The prefix that the device should use for its outputs
+and inputs.
+
+`Output-Prefix`: The device that was previously executed.
+
+All counters used by the stack are initialized to 1.
+
+Additionally, as implemented in HyperBEAM, the device stack will honor a
+number of options that are passed to it as keys in the message. Each of
+these options is also passed through to the devices contained within the
+stack during execution. These options include:
+
+`Error-Strategy`: Determines how the stack handles errors from devices.
+See `maybe_error/5` for more information.
+
+`Allow-Multipass`: Determines whether the stack is allowed to automatically
+re-execute from the first device when the `pass` tag is returned. See
+`maybe_pass/3` for more information.
+
+Under-the-hood, dev_stack uses a `default` handler to resolve all calls to
+devices, aside `set/2` which it calls itself to mutate the message's `device`
+key in order to change which device is currently being executed. This method
+allows dev_stack to ensure that the message's HashPath is always correct,
+even as it delegates calls to other devices. An example flow for a `dev_stack`
+execution is as follows:
+
+```
+
+ /Msg1/AlicesExcitingKey ->
+ dev_stack:execute ->
+ /Msg1/Set?device=/Device-Stack/1 ->
+ /Msg2/AlicesExcitingKey ->
+ /Msg3/Set?device=/Device-Stack/2 ->
+ /Msg4/AlicesExcitingKey
+ ... ->
+ /MsgN/Set?device=[This-Device] ->
+ returns {ok, /MsgN+1} ->
+ /MsgN+1
+```
+
+In this example, the `device` key is mutated a number of times, but the
+resulting HashPath remains correct and verifiable.
+
+## Function Index ##
+
+
+
Map over the devices in the stack, accumulating the output in a single
+message of keys and values, where keys are the same as the keys in the
+original message (typically a number).
Return a message which, when given a key, will transform the message
+such that the device named Key from the Device-Stack key in the message
+takes the place of the original Device key.
+
+
+
+
+## Function Details ##
+
+
+
+### benchmark_test/0 * ###
+
+`benchmark_test() -> any()`
+
+
+
+### example_device_for_stack_test/0 * ###
+
+`example_device_for_stack_test() -> any()`
+
+
+
+### generate_append_device/1 ###
+
+`generate_append_device(Separator) -> any()`
+
+
+
+### generate_append_device/2 * ###
+
+`generate_append_device(Separator, Status) -> any()`
+
+
+
+### increment_pass/2 * ###
+
+`increment_pass(Message, Opts) -> any()`
+
+Helper to increment the pass number.
+
+
+
+### info/1 ###
+
+`info(Msg) -> any()`
+
+
+
+### input_and_output_prefixes_test/0 * ###
+
+`input_and_output_prefixes_test() -> any()`
+
+
+
+### input_output_prefixes_passthrough_test/0 * ###
+
+`input_output_prefixes_passthrough_test() -> any()`
+
+
+
+### input_prefix/3 ###
+
+`input_prefix(Msg1, Msg2, Opts) -> any()`
+
+Return the input prefix for the stack.
+
+
+
+### many_devices_test/0 * ###
+
+`many_devices_test() -> any()`
+
+
+
+### maybe_error/5 * ###
+
+`maybe_error(Message1, Message2, DevNum, Info, Opts) -> any()`
+
+
+
+### no_prefix_test/0 * ###
+
+`no_prefix_test() -> any()`
+
+
+
+### not_found_test/0 * ###
+
+`not_found_test() -> any()`
+
+
+
+### output_prefix/3 ###
+
+`output_prefix(Msg1, Msg2, Opts) -> any()`
+
+Return the output prefix for the stack.
+
+
+
+### output_prefix_test/0 * ###
+
+`output_prefix_test() -> any()`
+
+
+
+### pass_test/0 * ###
+
+`pass_test() -> any()`
+
+
+
+### prefix/3 ###
+
+`prefix(Msg1, Msg2, Opts) -> any()`
+
+Return the default prefix for the stack.
+
+
+
+### reinvocation_test/0 * ###
+
+`reinvocation_test() -> any()`
+
+
+
+### resolve_fold/3 * ###
+
+`resolve_fold(Message1, Message2, Opts) -> any()`
+
+The main device stack execution engine. See the moduledoc for more
+information.
+
+
+
+### resolve_fold/4 * ###
+
+`resolve_fold(Message1, Message2, DevNum, Opts) -> any()`
+
+
+
+### resolve_map/3 * ###
+
+`resolve_map(Message1, Message2, Opts) -> any()`
+
+Map over the devices in the stack, accumulating the output in a single
+message of keys and values, where keys are the same as the keys in the
+original message (typically a number).
+
+
+
+### router/3 * ###
+
+`router(Message1, Message2, Opts) -> any()`
+
+
+
+### router/4 ###
+
+`router(Key, Message1, Message2, Opts) -> any()`
+
+The device stack key router. Sends the request to `resolve_stack`,
+except for `set/2` which is handled by the default implementation in
+`dev_message`.
+
+
+
+### simple_map_test/0 * ###
+
+`simple_map_test() -> any()`
+
+
+
+### simple_stack_execute_test/0 * ###
+
+`simple_stack_execute_test() -> any()`
+
+
+
+### skip_test/0 * ###
+
+`skip_test() -> any()`
+
+
+
+### test_prefix_msg/0 * ###
+
+`test_prefix_msg() -> any()`
+
+
+
+### transform/3 * ###
+
+`transform(Msg1, Key, Opts) -> any()`
+
+Return Message1, transformed such that the device named `Key` from the
+`Device-Stack` key in the message takes the place of the original `Device`
+key. This transformation allows dev_stack to correctly track the HashPath
+of the message as it delegates execution to devices contained within it.
+
+
+
+### transform_external_call_device_test/0 * ###
+
+`transform_external_call_device_test() -> any()`
+
+Ensure we can generate a transformer message that can be called to
+return a version of msg1 with only that device attached.
+
+
+
+### transform_internal_call_device_test/0 * ###
+
+`transform_internal_call_device_test() -> any()`
+
+Test that the transform function can be called correctly internally
+by other functions in the module.
+
+
+
+### transformer_message/2 * ###
+
+`transformer_message(Msg1, Opts) -> any()`
+
+Return a message which, when given a key, will transform the message
+such that the device named `Key` from the `Device-Stack` key in the message
+takes the place of the original `Device` key. This allows users to call
+a single device from the stack:
+
+/Msg1/Transform/DeviceName/keyInDevice ->
+keyInDevice executed on DeviceName against Msg1.
+
+
+--- END OF FILE: docs/resources/source-code/dev_stack.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_test.md ---
+# [Module dev_test.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_test.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
Find a test worker's PID and send it an update message.
+
+
+
+
+## Function Details ##
+
+
+
+### compute/3 ###
+
+`compute(Msg1, Msg2, Opts) -> any()`
+
+Example implementation of a `compute` handler. Makes a running list of
+the slots that have been computed in the state message and places the new
+slot number in the results key.
+
+
+
+### compute_test/0 * ###
+
+`compute_test() -> any()`
+
+
+
+### delay/3 ###
+
+`delay(Msg1, Req, Opts) -> any()`
+
+Does nothing, just sleeps `Req/duration or 750` ms and returns the
+appropriate form in order to be used as a hook.
+
+
+
+### device_with_function_key_module_test/0 * ###
+
+`device_with_function_key_module_test() -> any()`
+
+Tests the resolution of a default function.
+
+
+
+### increment_counter/3 ###
+
+`increment_counter(Msg1, Msg2, Opts) -> any()`
+
+Find a test worker's PID and send it an increment message.
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Exports a default_handler function that can be used to test the
+handler resolution mechanism.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+Exports a default_handler function that can be used to test the
+handler resolution mechanism.
+
+
+
+### init/3 ###
+
+`init(Msg, Msg2, Opts) -> any()`
+
+Example `init/3` handler. Sets the `Already-Seen` key to an empty list.
+
+
+
+### mul/2 ###
+
+`mul(Msg1, Msg2) -> any()`
+
+Example implementation of an `imported` function for a WASM
+executor.
+
+
+
+### restore/3 ###
+
+`restore(Msg, Msg2, Opts) -> any()`
+
+Example `restore/3` handler. Sets the hidden key `Test/Started` to the
+value of `Current-Slot` and checks whether the `Already-Seen` key is valid.
+
+
+
+### restore_test/0 * ###
+
+`restore_test() -> any()`
+
+
+
+### snapshot/3 ###
+
+`snapshot(Msg1, Msg2, Opts) -> any()`
+
+Do nothing when asked to snapshot.
+
+
+
+### test_func/1 ###
+
+`test_func(X1) -> any()`
+
+
+
+### update_state/3 ###
+
+`update_state(Msg, Msg2, Opts) -> any()`
+
+Find a test worker's PID and send it an update message.
+
+
+--- END OF FILE: docs/resources/source-code/dev_test.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_volume.md ---
+# [Module dev_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_volume.erl)
+
+
+
+
+Secure Volume Management for HyperBEAM Nodes.
+
+
+
+## Description ##
+
+This module handles encrypted storage operations for HyperBEAM, providing
+a robust and secure approach to data persistence. It manages the complete
+lifecycle of encrypted volumes from detection to creation, formatting, and
+mounting.
+
+Key responsibilities:
+- Volume detection and initialization
+- Encrypted partition creation and formatting
+- Secure mounting using cryptographic keys
+- Store path reconfiguration to use mounted volumes
+- Automatic handling of various system states
+(new device, existing partition, etc.)
+
+The primary entry point is the `mount/3` function, which orchestrates the
+entire process based on the provided configuration parameters. This module
+works alongside `hb_volume` which provides the low-level operations for
+device manipulation.
+
+Security considerations:
+- Ensures data at rest is protected through LUKS encryption
+- Provides proper volume sanitization and secure mounting
+- IMPORTANT: This module only applies configuration set in node options and
+does NOT accept disk operations via HTTP requests. It cannot format arbitrary
+disks as all operations are safeguarded by host operating system permissions
+enforced upon the HyperBEAM environment.
+
+## Function Index ##
+
+
+
+
+
+`Device`: The base device to check. `Partition`: The partition to check. `PartitionType`: The type of partition to check. `VolumeName`: The name of the volume to check. `MountPoint`: The mount point to check. `StorePath`: The store path to check. `Key`: The key to check. `Opts`: The options to check.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Check if the base device exists and if it does, check if the partition exists.
+
+
+
+### check_partition/8 * ###
+
+
+
+
+`Device`: The base device to check. `Partition`: The partition to check. `PartitionType`: The type of partition to check. `VolumeName`: The name of the volume to check. `MountPoint`: The mount point to check. `StorePath`: The store path to check. `Key`: The key to check. `Opts`: The options to check.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Check if the partition exists. If it does, attempt to mount it.
+If it doesn't exist, create it, format it with encryption and mount it.
+
+
+
+### create_and_mount_partition/8 * ###
+
+
+
+
+`Device`: The device to create the partition on. `Partition`: The partition to create. `PartitionType`: The type of partition to create. `Key`: The key to create the partition with. `MountPoint`: The mount point to mount the partition to. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Create, format and mount a new partition.
+
+
+
+### decrypt_volume_key/2 * ###
+
+
+
+
+`Opts`: A map of configuration options.
+
+returns: `{ok, DecryptedKey}` on successful decryption, or
+`{error, Binary}` if decryption fails.
+
+Decrypts an encrypted volume key using the node's private key.
+
+This function takes an encrypted key (typically sent by a client who encrypted
+it with the node's public key) and decrypts it using the node's private RSA key.
+
+
+
+### format_and_mount/6 * ###
+
+
+
+
+`Partition`: The partition to format and mount. `Key`: The key to format and mount the partition with. `MountPoint`: The mount point to mount the partition to. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Format and mount a newly created partition.
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Exported function for getting device info, controls which functions are
+exposed via the device API.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+HTTP info response providing information about this device
+
+
+
+### mount/3 ###
+
+
+
+
+`M1`: Base message for context. `M2`: Request message with operation details. `Opts`: A map of configuration options for volume operations.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Handles the complete process of secure encrypted volume mounting.
+
+This function performs the following operations depending on the state:
+1. Validates the encryption key is present
+2. Checks if the base device exists
+3. Checks if the partition exists on the device
+4. If the partition exists, attempts to mount it
+5. If the partition doesn't exist, creates it, formats it with encryption
+and mounts it
+6. Updates the node's store configuration to use the mounted volume
+
+Config options in Opts map:
+- volume_key: (Required) The encryption key
+- volume_device: Base device path
+- volume_partition: Partition path
+- volume_partition_type: Filesystem type
+- volume_name: Name for encrypted volume
+- volume_mount_point: Where to mount
+- volume_store_path: Store path on volume
+
+
+
+### mount_existing_partition/6 * ###
+
+
+
+
+`Partition`: The partition to mount. `Key`: The key to mount. `MountPoint`: The mount point to mount. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Mount an existing partition.
+
+
+
+### mount_formatted_partition/6 * ###
+
+
+
+
+`Partition`: The partition to mount. `Key`: The key to mount the partition with. `MountPoint`: The mount point to mount the partition to. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Mount a newly formatted partition.
+
+
+
+### public_key/3 ###
+
+
+
+
+`Opts`: A map of configuration options.
+
+returns: `{ok, Map}` containing the node's public key on success, or
+`{error, Binary}` if the node's wallet is not available.
+
+Returns the node's public key for secure key exchange.
+
+This function retrieves the node's wallet and extracts the public key
+for encryption purposes. It allows users to securely exchange encryption keys
+by first encrypting their volume key with the node's public key.
+
+The process ensures that sensitive keys are never transmitted in plaintext.
+The encrypted key can then be securely sent to the node, which will decrypt it
+using its private key before using it for volume encryption.
+
+
+
+### update_node_config/2 * ###
+
+
+
+
+`NewStore`: The new store to update the node's configuration with. `Opts`: The options to update the node's configuration with.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Update the node's configuration with the new store.
+
+
+
+### update_store_path/2 * ###
+
+
+
+
+`StorePath`: The store path to update. `Opts`: The options to update.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Update the store path to use the mounted volume.
+
+
+--- END OF FILE: docs/resources/source-code/dev_volume.md ---
+
+--- START OF FILE: docs/resources/source-code/dev_wasi.md ---
+# [Module dev_wasi.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_wasi.erl)
+
+
+
+
+A virtual filesystem device.
+
+
+
+## Description ##
+Implements a file-system-as-map structure, which is traversible externally.
+Each file is a binary and each directory is an AO-Core message.
+Additionally, this module adds a series of WASI-preview-1 compatible
+functions for accessing the filesystem as imported functions by WASM
+modules.
+
+## Function Index ##
+
+
+
On-boot, initialize the virtual file system with:
+- Empty stdio files
+- WASI-preview-1 compatible functions for accessing the filesystem
+- File descriptors for those files.
Log the call to the standard library as an event, and write the
+call details into the message.
+
+
+
+
+## Function Details ##
+
+
+
+### basic_execution_64_test/0 * ###
+
+`basic_execution_64_test() -> any()`
+
+
+
+### basic_execution_test/0 * ###
+
+`basic_execution_test() -> any()`
+
+
+
+### benchmark_test/0 * ###
+
+`benchmark_test() -> any()`
+
+
+
+### cache_wasm_image/1 ###
+
+`cache_wasm_image(Image) -> any()`
+
+
+
+### cache_wasm_image/2 ###
+
+`cache_wasm_image(Image, Opts) -> any()`
+
+
+
+### compute/3 ###
+
+`compute(RawM1, M2, Opts) -> any()`
+
+Call the WASM executor with a message that has been prepared by a prior
+pass.
+
+
+
+### default_import_resolver/3 * ###
+
+`default_import_resolver(Msg1, Msg2, Opts) -> any()`
+
+Take a BEAMR import call and resolve it using `hb_ao`.
+
+
+
+### import/3 ###
+
+`import(Msg1, Msg2, Opts) -> any()`
+
+Handle standard library calls by:
+1. Adding the right prefix to the path from BEAMR.
+2. Adding the state to the message at the stdlib path.
+3. Resolving the adjusted-path-Msg2 against the added-state-Msg1.
+4. If it succeeds, return the new state from the message.
+5. If it fails with `not_found`, call the stub handler.
+
+
+
+### imported_function_test/0 * ###
+
+`imported_function_test() -> any()`
+
+
+
+### info/2 ###
+
+`info(Msg1, Opts) -> any()`
+
+Export all functions aside the `instance/3` function.
+
+
+
+### init/0 * ###
+
+`init() -> any()`
+
+
+
+### init/3 ###
+
+`init(M1, M2, Opts) -> any()`
+
+Boot a WASM image on the image stated in the `process/image` field of
+the message.
+
+
+
+### init_test/0 * ###
+
+`init_test() -> any()`
+
+
+
+### input_prefix_test/0 * ###
+
+`input_prefix_test() -> any()`
+
+
+
+### instance/3 ###
+
+`instance(M1, M2, Opts) -> any()`
+
+Get the WASM instance from the message. Note that this function is exported
+such that other devices can use it, but it is excluded from calls from AO-Core
+resolution directly.
+
+
+
+### normalize/3 ###
+
+`normalize(RawM1, M2, Opts) -> any()`
+
+Normalize the message to have an open WASM instance, but no literal
+`State` key. Ensure that we do not change the hashpath during this process.
+
+
+
+### process_prefixes_test/0 * ###
+
+`process_prefixes_test() -> any()`
+
+Test that realistic prefixing for a `dev_process` works --
+including both inputs (from `Process/`) and outputs (to the
+Device-Key) work
+
+
+
+### snapshot/3 ###
+
+`snapshot(M1, M2, Opts) -> any()`
+
+Serialize the WASM state to a binary.
+
+
+
+### state_export_and_restore_test/0 * ###
+
+`state_export_and_restore_test() -> any()`
+
+
+
+### terminate/3 ###
+
+`terminate(M1, M2, Opts) -> any()`
+
+Tear down the WASM executor.
+
+
+
+### test_run_wasm/4 * ###
+
+`test_run_wasm(File, Func, Params, AdditionalMsg) -> any()`
+
+
+
+### undefined_import_stub/3 * ###
+
+`undefined_import_stub(Msg1, Msg2, Opts) -> any()`
+
+Log the call to the standard library as an event, and write the
+call details into the message.
+
+
+--- END OF FILE: docs/resources/source-code/dev_wasm.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_ao_test_vectors.md ---
+# [Module hb_ao_test_vectors.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_ao_test_vectors.erl)
+
+
+
+
+Uses a series of different `Opts` values to test the resolution engine's
+execution under different circumstances.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### as_path_test/1 * ###
+
+`as_path_test(Opts) -> any()`
+
+
+
+### basic_get_test/1 * ###
+
+`basic_get_test(Opts) -> any()`
+
+
+
+### basic_set_test/1 * ###
+
+`basic_set_test(Opts) -> any()`
+
+
+
+### continue_as_test/1 * ###
+
+`continue_as_test(Opts) -> any()`
+
+
+
+### deep_recursive_get_test/1 * ###
+
+`deep_recursive_get_test(Opts) -> any()`
+
+
+
+### deep_set_new_messages_test/0 * ###
+
+`deep_set_new_messages_test() -> any()`
+
+
+
+### deep_set_test/1 * ###
+
+`deep_set_test(Opts) -> any()`
+
+
+
+### deep_set_with_device_test/1 * ###
+
+`deep_set_with_device_test(Opts) -> any()`
+
+
+
+### denormalized_device_key_test/1 * ###
+
+`denormalized_device_key_test(Opts) -> any()`
+
+
+
+### device_excludes_test/1 * ###
+
+`device_excludes_test(Opts) -> any()`
+
+
+
+### device_exports_test/1 * ###
+
+`device_exports_test(Opts) -> any()`
+
+
+
+### device_with_default_handler_function_test/1 * ###
+
+`device_with_default_handler_function_test(Opts) -> any()`
+
+
+
+### device_with_handler_function_test/1 * ###
+
+`device_with_handler_function_test(Opts) -> any()`
+
+
+
+### exec_dummy_device/2 * ###
+
+`exec_dummy_device(SigningWallet, Opts) -> any()`
+
+Ensure that we can read a device from the cache then execute it. By
+extension, this will also allow us to load a device from Arweave due to the
+remote store implementations.
+
+
+
+### gen_default_device/0 * ###
+
+`gen_default_device() -> any()`
+
+Create a simple test device that implements the default handler.
+
+
+
+### gen_handler_device/0 * ###
+
+`gen_handler_device() -> any()`
+
+Create a simple test device that implements the handler key.
+
+
+
+### generate_device_with_keys_using_args/0 * ###
+
+`generate_device_with_keys_using_args() -> any()`
+
+Generates a test device with three keys, each of which uses
+progressively more of the arguments that can be passed to a device key.
+
+
+
+### get_as_with_device_test/1 * ###
+
+`get_as_with_device_test(Opts) -> any()`
+
+
+
+### get_with_device_test/1 * ###
+
+`get_with_device_test(Opts) -> any()`
+
+
+
+### key_from_id_device_with_args_test/1 * ###
+
+`key_from_id_device_with_args_test(Opts) -> any()`
+
+Test that arguments are passed to a device key as expected.
+Particularly, we need to ensure that the key function in the device can
+specify any arity (1 through 3) and the call is handled correctly.
+
+
+
+### key_to_binary_test/1 * ###
+
+`key_to_binary_test(Opts) -> any()`
+
+
+
+### list_transform_test/1 * ###
+
+`list_transform_test(Opts) -> any()`
+
+
+
+### load_as_test/1 * ###
+
+`load_as_test(Opts) -> any()`
+
+
+
+### load_device_test/0 * ###
+
+`load_device_test() -> any()`
+
+
+
+### recursive_get_test/1 * ###
+
+`recursive_get_test(Opts) -> any()`
+
+
+
+### resolve_binary_key_test/1 * ###
+
+`resolve_binary_key_test(Opts) -> any()`
+
+
+
+### resolve_from_multiple_keys_test/1 * ###
+
+`resolve_from_multiple_keys_test(Opts) -> any()`
+
+
+
+### resolve_id_test/1 * ###
+
+`resolve_id_test(Opts) -> any()`
+
+
+
+### resolve_key_twice_test/1 * ###
+
+`resolve_key_twice_test(Opts) -> any()`
+
+
+
+### resolve_path_element_test/1 * ###
+
+`resolve_path_element_test(Opts) -> any()`
+
+
+
+### resolve_simple_test/1 * ###
+
+`resolve_simple_test(Opts) -> any()`
+
+
+
+### run_all_test_/0 * ###
+
+`run_all_test_() -> any()`
+
+Run each test in the file with each set of options. Start and reset
+the store for each test.
+
+
+
+### run_test/0 * ###
+
+`run_test() -> any()`
+
+
+
+### set_with_device_test/1 * ###
+
+`set_with_device_test(Opts) -> any()`
+
+
+
+### start_as_test/1 * ###
+
+`start_as_test(Opts) -> any()`
+
+
+
+### start_as_with_parameters_test/1 * ###
+
+`start_as_with_parameters_test(Opts) -> any()`
+
+
+
+### step_hook_test/1 * ###
+
+`step_hook_test(InitOpts) -> any()`
+
+
+
+### test_opts/0 * ###
+
+`test_opts() -> any()`
+
+
+
+### test_suite/0 * ###
+
+`test_suite() -> any()`
+
+
+
+### untrusted_load_device_test/0 * ###
+
+`untrusted_load_device_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_ao_test_vectors.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_ao.md ---
+# [Module hb_ao.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_ao.erl)
+
+
+
+
+This module is the root of the device call logic of the
+AO-Core protocol in HyperBEAM.
+
+
+
+## Description ##
+
+At the implementation level, every message is simply a collection of keys,
+dictated by its `Device`, that can be resolved in order to yield their
+values. Each key may return another message or a raw value:
+
+`ao(Message1, Message2) -> {Status, Message3}`
+
+Under-the-hood, `AO-Core(Message1, Message2)` leads to the evaluation of
+`DeviceMod:PathPart(Message1, Message2)`, which defines the user compute
+to be performed. If `Message1` does not specify a device, `dev_message` is
+assumed. The key to resolve is specified by the `Path` field of the message.
+
+After each output, the `HashPath` is updated to include the `Message2`
+that was executed upon it.
+
+Because each message implies a device that can resolve its keys, as well
+as generating a merkle tree of the computation that led to the result,
+you can see AO-Core protocol as a system for cryptographically chaining
+the execution of `combinators`. See `docs/ao-core-protocol.md` for more
+information about AO-Core.
+
+The `Fun(Message1, Message2)` pattern is repeated throughout the HyperBEAM
+codebase, sometimes with `MessageX` replaced with `MX` or `MsgX` for brevity.
+
+Message3 can be either a new message or a raw output value (a binary, integer,
+float, atom, or list of such values).
+
+Devices can be expressed as either modules or maps. They can also be
+referenced by an Arweave ID, which can be used to load a device from
+the network (depending on the value of the `load_remote_devices` and
+`trusted_device_signers` environment settings).
+
+HyperBEAM device implementations are defined as follows:
+
+```
+
+ DevMod:ExportedFunc : Key resolution functions. All are assumed to be
+ device keys (thus, present in every message that
+ uses it) unless specified by DevMod:info().
+ Each function takes a set of parameters
+ of the form DevMod:KeyHandler(Msg1, Msg2, Opts).
+ Each of these arguments can be ommitted if not
+ needed. Non-exported functions are not assumed
+ to be device keys.
+ DevMod:info : Optional. Returns a map of options for the device. All
+ options are optional and assumed to be the defaults if
+ not specified. This function can accept a Message1 as
+ an argument, allowing it to specify its functionality
+ based on a specific message if appropriate.
+ info/exports : Overrides the export list of the Erlang module, such that
+ only the functions in this list are assumed to be device
+ keys. Defaults to all of the functions that DevMod
+ exports in the Erlang environment.
+ info/excludes : A list of keys that should not be resolved by the device,
+ despite being present in the Erlang module exports list.
+ info/handler : A function that should be used to handle _all_ keys for
+ messages using the device.
+ info/default : A function that should be used to handle all keys that
+ are not explicitly implemented by the device. Defaults to
+ the dev_message device, which contains general keys for
+ interacting with messages.
+ info/default_mod : A different device module that should be used to
+ handle all keys that are not explicitly implemented
+ by the device. Defaults to the dev_message device.
+ info/grouper : A function that returns the concurrency 'group' name for
+ an execution. Executions with the same group name will
+ be executed by sending a message to the associated process
+ and waiting for a response. This allows you to control
+ concurrency of execution and to allow executions to share
+ in-memory state as applicable. Default: A derivation of
+ Msg1+Msg2. This means that concurrent calls for the same
+ output will lead to only a single execution.
+ info/worker : A function that should be run as the 'server' loop of
+ the executor for interactions using the device.
+ The HyperBEAM resolver also takes a number of runtime options that change
+ the way that the environment operates:update_hashpath: Whether to add the Msg2 to HashPath for the Msg3.
+ Default: true.add_key: Whether to add the key to the start of the arguments.
+ Default: .
+```
+
+
+## Function Index ##
+
+
+
Verify that a device is compatible with the current machine.
+
+
+
+
+## Function Details ##
+
+
+
+### deep_set/4 ###
+
+`deep_set(Msg, Rest, Value, Opts) -> any()`
+
+Recursively search a map, resolving keys, and set the value of the key
+at the given path. This function has special cases for handling `set` calls
+where the path is an empty list (`/`). In this case, if the value is an
+immediate, non-complex term, we can set it directly. Otherwise, we use the
+device's `set` function to set the value.
+
+
+
+### default_module/0 * ###
+
+`default_module() -> any()`
+
+The default device is the identity device, which simply returns the
+value associated with any key as it exists in its Erlang map. It should also
+implement the `set` key, which returns a `Message3` with the values changed
+according to the `Message2` passed to it.
+
+
+
+### device_set/4 * ###
+
+`device_set(Msg, Key, Value, Opts) -> any()`
+
+Call the device's `set` function.
+
+
+
+### device_set/5 * ###
+
+`device_set(Msg, Key, Value, Mode, Opts) -> any()`
+
+
+
+### do_resolve_many/2 * ###
+
+`do_resolve_many(MsgList, Opts) -> any()`
+
+
+
+### ensure_loaded/2 * ###
+
+`ensure_loaded(MsgID, Opts) -> any()`
+
+Ensure that the message is loaded from the cache if it is an ID. If is
+not loadable or already present, we raise an error.
+
+
+
+### error_execution/5 * ###
+
+`error_execution(ExecGroup, Msg2, Whence, X4, Opts) -> any()`
+
+Handle an error in a device call.
+
+
+
+### error_infinite/3 * ###
+
+`error_infinite(Msg1, Msg2, Opts) -> any()`
+
+Catch all return if we are in an infinite loop.
+
+
+
+### error_invalid_intermediate_status/5 * ###
+
+`error_invalid_intermediate_status(Msg1, Msg2, Msg3, RemainingPath, Opts) -> any()`
+
+
+
+### error_invalid_message/3 * ###
+
+`error_invalid_message(Msg1, Msg2, Opts) -> any()`
+
+Catch all return if the message is invalid.
+
+
+
+### find_exported_function/5 ###
+
+`find_exported_function(Msg, Dev, Key, MaxArity, Opts) -> any()`
+
+Find the function with the highest arity that has the given name, if it
+exists.
+
+If the device is a module, we look for a function with the given name.
+
+If the device is a map, we look for a key in the map. First we try to find
+the key using its literal value. If that fails, we cast the key to an atom
+and try again.
+
+
+
+### force_message/2 ###
+
+`force_message(X1, Opts) -> any()`
+
+
+
+### get/2 ###
+
+`get(Path, Msg) -> any()`
+
+Shortcut for resolving a key in a message without its status if it is
+`ok`. This makes it easier to write complex logic on top of messages while
+maintaining a functional style.
+
+Additionally, this function supports the `{as, Device, Msg}` syntax, which
+allows the key to be resolved using another device to resolve the key,
+while maintaining the tracability of the `HashPath` of the output message.
+
+Returns the value of the key if it is found, otherwise returns the default
+provided by the user, or `not_found` if no default is provided.
+
+
+
+### get/3 ###
+
+`get(Path, Msg, Opts) -> any()`
+
+
+
+### get/4 ###
+
+`get(Path, Msg, Default, Opts) -> any()`
+
+
+
+### get_first/2 ###
+
+`get_first(Paths, Opts) -> any()`
+
+take a sequence of base messages and paths, then return the value of the
+first message that can be resolved using a path.
+
+
+
+### get_first/3 ###
+
+`get_first(Msgs, Default, Opts) -> any()`
+
+
+
+### info/2 ###
+
+`info(Msg, Opts) -> any()`
+
+Get the info map for a device, optionally giving it a message if the
+device's info function is parameterized by one.
+
+
+
+### info/3 * ###
+
+`info(DevMod, Msg, Opts) -> any()`
+
+
+
+### info_handler_to_fun/4 * ###
+
+`info_handler_to_fun(Handler, Msg, Key, Opts) -> any()`
+
+Parse a handler key given by a device's `info`.
+
+
+
+### internal_opts/1 * ###
+
+`internal_opts(Opts) -> any()`
+
+The execution options that are used internally by this module
+when calling itself.
+
+
+
+### is_exported/2 * ###
+
+`is_exported(Info, Key) -> any()`
+
+
+
+### is_exported/4 ###
+
+`is_exported(Msg, Dev, Key, Opts) -> any()`
+
+Check if a device is guarding a key via its `exports` list. Defaults to
+true if the device does not specify an `exports` list. The `info` function is
+always exported, if it exists. Elements of the `exludes` list are not
+exported. Note that we check for info _twice_ -- once when the device is
+given but the info result is not, and once when the info result is given.
+The reason for this is that `info/3` calls other functions that may need to
+check if a key is exported, so we must avoid infinite loops. We must, however,
+also return a consistent result in the case that only the info result is
+given, so we check for it in both cases.
+
+
+
+### keys/1 ###
+
+`keys(Msg) -> any()`
+
+Shortcut to get the list of keys from a message.
+
+
+
+### keys/2 ###
+
+`keys(Msg, Opts) -> any()`
+
+
+
+### keys/3 ###
+
+`keys(Msg, Opts, X3) -> any()`
+
+
+
+### load_device/2 ###
+
+`load_device(Map, Opts) -> any()`
+
+Load a device module from its name or a message ID.
+Returns {ok, Executable} where Executable is the device module. On error,
+a tuple of the form {error, Reason} is returned.
+
+
+
+### maybe_force_message/2 * ###
+
+`maybe_force_message(X1, Opts) -> any()`
+
+Force the result of a device call into a message if the result is not
+requested by the `Opts`. If the result is a literal, we wrap it in a message
+and signal the location of the result inside. We also similarly handle ao-result
+when the result is a single value and an explicit status code.
+
+
+
+### message_to_device/2 ###
+
+`message_to_device(Msg, Opts) -> any()`
+
+Extract the device module from a message.
+
+
+
+### message_to_fun/3 ###
+
+`message_to_fun(Msg, Key, Opts) -> any()`
+
+Calculate the Erlang function that should be called to get a value for
+a given key from a device.
+
+This comes in 7 forms:
+1. The message does not specify a device, so we use the default device.
+2. The device has a `handler` key in its `Dev:info()` map, which is a
+function that takes a key and returns a function to handle that key. We pass
+the key as an additional argument to this function.
+3. The device has a function of the name `Key`, which should be called
+directly.
+4. The device does not implement the key, but does have a default handler
+for us to call. We pass it the key as an additional argument.
+5. The device does not implement the key, and has no default handler. We use
+the default device to handle the key.
+Error: If the device is specified, but not loadable, we raise an error.
+
+Returns {ok | add_key, Fun} where Fun is the function to call, and add_key
+indicates that the key should be added to the start of the call's arguments.
+
+
+
+### normalize_key/1 ###
+
+`normalize_key(Key) -> any()`
+
+Convert a key to a binary in normalized form.
+
+
+
+### normalize_key/2 ###
+
+`normalize_key(Key, Opts) -> any()`
+
+
+
+### normalize_keys/1 ###
+
+`normalize_keys(Msg1) -> any()`
+
+Ensure that a message is processable by the AO-Core resolver: No lists.
+
+
+
+### remove/2 ###
+
+`remove(Msg, Key) -> any()`
+
+Remove a key from a message, using its underlying device.
+
+
+
+### remove/3 ###
+
+`remove(Msg, Key, Opts) -> any()`
+
+
+
+### resolve/2 ###
+
+`resolve(SingletonMsg, Opts) -> any()`
+
+Get the value of a message's key by running its associated device
+function. Optionally, takes options that control the runtime environment.
+This function returns the raw result of the device function call:
+`{ok | error, NewMessage}.`
+The resolver is composed of a series of discrete phases:
+1: Normalization.
+2: Cache lookup.
+3: Validation check.
+4: Persistent-resolver lookup.
+5: Device lookup.
+6: Execution.
+7: Execution of the `step` hook.
+8: Subresolution.
+9: Cryptographic linking.
+10: Result caching.
+11: Notify waiters.
+12: Fork worker.
+13: Recurse or terminate.
+
+
+
+### resolve/3 ###
+
+`resolve(Msg1, Path, Opts) -> any()`
+
+
+
+### resolve_many/2 ###
+
+`resolve_many(ListMsg, Opts) -> any()`
+
+Resolve a list of messages in sequence. Take the output of the first
+message as the input for the next message. Once the last message is resolved,
+return the result.
+A `resolve_many` call with only a single ID will attempt to read the message
+directly from the store. No execution is performed.
+
+
+
+### resolve_stage/4 * ###
+
+`resolve_stage(X1, Raw, Msg2, Opts) -> any()`
+
+
+
+### resolve_stage/5 * ###
+
+`resolve_stage(X1, Msg1, Msg2, ExecName, Opts) -> any()`
+
+
+
+### resolve_stage/6 * ###
+
+`resolve_stage(X1, Func, Msg1, Msg2, ExecName, Opts) -> any()`
+
+
+
+### set/2 ###
+
+`set(Msg1, Msg2) -> any()`
+
+Shortcut for setting a key in the message using its underlying device.
+Like the `get/3` function, this function honors the `error_strategy` option.
+`set` works with maps and recursive paths while maintaining the appropriate
+`HashPath` for each step.
+
+
+
+### set/3 ###
+
+`set(RawMsg1, RawMsg2, Opts) -> any()`
+
+
+
+### set/4 ###
+
+`set(Msg1, Key, Value, Opts) -> any()`
+
+
+
+### subresolve/4 * ###
+
+`subresolve(RawMsg1, DevID, ReqPath, Opts) -> any()`
+
+Execute a sub-resolution.
+
+
+
+### truncate_args/2 ###
+
+`truncate_args(Fun, Args) -> any()`
+
+Truncate the arguments of a function to the number of arguments it
+actually takes.
+
+
+
+### verify_device_compatibility/2 * ###
+
+`verify_device_compatibility(Msg, Opts) -> any()`
+
+Verify that a device is compatible with the current machine.
+
+
+--- END OF FILE: docs/resources/source-code/hb_ao.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_app.md ---
+# [Module hb_app.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_app.erl)
+
+
+
+
+The main HyperBEAM application module.
+
+__Behaviours:__ [`application`](application.md).
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### start/2 ###
+
+`start(StartType, StartArgs) -> any()`
+
+
+
+### stop/1 ###
+
+`stop(State) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_app.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_beamr_io.md ---
+# [Module hb_beamr_io.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_beamr_io.erl)
+
+
+
+
+Simple interface for memory management for Beamr instances.
+
+
+
+## Description ##
+
+It allows for reading and writing to memory, as well as allocating and
+freeing memory by calling the WASM module's exported malloc and free
+functions.
+
+Unlike the majority of HyperBEAM modules, this module takes a defensive
+approach to type checking, breaking from the conventional Erlang style,
+such that failures are caught in the Erlang-side of functions rather than
+in the C/WASM-side.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### do_read_string/3 * ###
+
+`do_read_string(WASM, Offset, ChunkSize) -> any()`
+
+
+
+### free/2 ###
+
+`free(WASM, Ptr) -> any()`
+
+Free space allocated in the Beamr instance's native memory via a
+call to the exported free function from the WASM.
+
+
+
+### malloc/2 ###
+
+`malloc(WASM, Size) -> any()`
+
+Allocate space for (via an exported malloc function from the WASM) in
+the Beamr instance's native memory.
+
+
+
+### malloc_test/0 * ###
+
+`malloc_test() -> any()`
+
+Test allocating and freeing memory.
+
+
+
+### read/3 ###
+
+`read(WASM, Offset, Size) -> any()`
+
+Read a binary from the Beamr instance's native memory at a given offset
+and of a given size.
+
+
+
+### read_string/2 ###
+
+`read_string(Port, Offset) -> any()`
+
+Simple helper function to read a string from the Beamr instance's native
+memory at a given offset. Memory is read by default in chunks of 8 bytes,
+but this can be overridden by passing a different chunk size. Strings are
+assumed to be null-terminated.
+
+
+
+### read_string/3 * ###
+
+`read_string(WASM, Offset, ChunkSize) -> any()`
+
+
+
+### read_test/0 * ###
+
+`read_test() -> any()`
+
+Test reading memory in and out of bounds.
+
+
+
+### size/1 ###
+
+`size(WASM) -> any()`
+
+Get the size (in bytes) of the native memory allocated in the Beamr
+instance. Note that WASM memory can never be reduced once granted to an
+instance (although it can, of course, be reallocated _inside_ the
+environment).
+
+
+
+### size_test/0 * ###
+
+`size_test() -> any()`
+
+
+
+### string_write_and_read_test/0 * ###
+
+`string_write_and_read_test() -> any()`
+
+Write and read strings to memory.
+
+
+
+### write/3 ###
+
+`write(WASM, Offset, Data) -> any()`
+
+Write a binary to the Beamr instance's native memory at a given offset.
+
+
+
+### write_string/2 ###
+
+`write_string(WASM, Data) -> any()`
+
+Simple helper function to allocate space for (via malloc) and write a
+string to the Beamr instance's native memory. This can be helpful for easily
+pushing a string into the instance, such that the resulting pointer can be
+passed to exported functions from the instance.
+Assumes that the input is either an iolist or a binary, adding a null byte
+to the end of the string.
+
+
+
+### write_test/0 * ###
+
+`write_test() -> any()`
+
+Test writing memory in and out of bounds.
+
+
+--- END OF FILE: docs/resources/source-code/hb_beamr_io.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_beamr.md ---
+# [Module hb_beamr.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_beamr.erl)
+
+
+
+
+BEAMR: A WAMR wrapper for BEAM.
+
+
+
+## Description ##
+
+Beamr is a library that allows you to run WASM modules in BEAM, using the
+Webassembly Micro Runtime (WAMR) as its engine. Each WASM module is
+executed using a Linked-In Driver (LID) that is loaded into BEAM. It is
+designed with a focus on supporting long-running WASM executions that
+interact with Erlang functions and processes easily.
+
+Because each WASM module runs as an independent async worker, if you plan
+to run many instances in parallel, you should be sure to configure the
+BEAM to have enough async worker threads enabled (see `erl +A N` in the
+Erlang manuals).
+
+The core API is simple:
+
+```
+
+ start(WasmBinary) -> {ok, Port, Imports, Exports}
+ Where:
+ WasmBinary is the WASM binary to load.
+ Port is the port to the LID.
+ Imports is a list of tuples of the form {Module, Function,
+ Args, Signature}.
+ Exports is a list of tuples of the form {Function, Args,
+ Signature}.
+ stop(Port) -> ok
+ call(Port, FunctionName, Args) -> {ok, Result}
+ Where:
+ FunctionName is the name of the function to call.
+ Args is a list of Erlang terms (converted to WASM values by
+ BEAMR) that match the signature of the function.
+ Result is a list of Erlang terms (converted from WASM values).
+ call(Port, FunName, Args[, Import, State, Opts]) -> {ok, Res, NewState}
+ Where:
+ ImportFun is a function that will be called upon each import.
+ ImportFun must have an arity of 2: Taking an arbitrary state
+ term, and a map containing the port, module, func, args,signature, and the options map of the import.
+ It must return a tuple of the form {ok, Response, NewState}.
+ serialize(Port) -> {ok, Mem}
+ Where:
+ Port is the port to the LID.
+ Mem is a binary representing the full WASM state.
+ deserialize(Port, Mem) -> ok
+ Where:
+ Port is the port to the LID.
+ Mem is a binary output of a previous serialize/1 call.
+```
+
+BEAMR was designed for use in the HyperBEAM project, but is suitable for
+deployment in other Erlang applications that need to run WASM modules. PRs
+are welcome.
+
+## Function Index ##
+
+
+
A worker process that is responsible for handling a WASM instance.
+
+
+
+
+## Function Details ##
+
+
+
+### benchmark_test/0 * ###
+
+`benchmark_test() -> any()`
+
+
+
+### call/3 ###
+
+`call(PID, FuncRef, Args) -> any()`
+
+Call a function in the WASM executor (see moduledoc for more details).
+
+
+
+### call/4 ###
+
+`call(PID, FuncRef, Args, ImportFun) -> any()`
+
+
+
+### call/5 ###
+
+`call(PID, FuncRef, Args, ImportFun, StateMsg) -> any()`
+
+
+
+### call/6 ###
+
+`call(PID, FuncRef, Args, ImportFun, StateMsg, Opts) -> any()`
+
+
+
+### deserialize/2 ###
+
+`deserialize(WASM, Bin) -> any()`
+
+Deserialize a WASM state from a binary.
+
+
+
+### dispatch_response/2 * ###
+
+`dispatch_response(WASM, Term) -> any()`
+
+Check the type of an import response and dispatch it to a Beamr port.
+
+
+
+### driver_loads_test/0 * ###
+
+`driver_loads_test() -> any()`
+
+
+
+### imported_function_test/0 * ###
+
+`imported_function_test() -> any()`
+
+Test that imported functions can be called from the WASM module.
+
+
+
+### is_valid_arg_list/1 * ###
+
+`is_valid_arg_list(Args) -> any()`
+
+Check that a list of arguments is valid for a WASM function call.
+
+
+
+### load_driver/0 * ###
+
+`load_driver() -> any()`
+
+Load the driver for the WASM executor.
+
+
+
+### monitor_call/4 * ###
+
+`monitor_call(WASM, ImportFun, StateMsg, Opts) -> any()`
+
+Synchonously monitor the WASM executor for a call result and any
+imports that need to be handled.
+
+
+
+### multiclient_test/0 * ###
+
+`multiclient_test() -> any()`
+
+Ensure that processes outside of the initial one can interact with
+the WASM executor.
+
+
+
+### serialize/1 ###
+
+`serialize(WASM) -> any()`
+
+Serialize the WASM state to a binary.
+
+
+
+### simple_wasm_test/0 * ###
+
+`simple_wasm_test() -> any()`
+
+Test standalone `hb_beamr` correctly after loading a WASM module.
+
+
+
+### start/1 ###
+
+`start(WasmBinary) -> any()`
+
+Start a WASM executor context. Yields a port to the LID, and the
+imports and exports of the WASM module. Optionally, specify a mode
+(wasm or aot) to indicate the type of WASM module being loaded.
+
+
+
+### start/2 ###
+
+`start(WasmBinary, Mode) -> any()`
+
+
+
+### stop/1 ###
+
+`stop(WASM) -> any()`
+
+Stop a WASM executor context.
+
+
+
+### stub/3 ###
+
+`stub(Msg1, Msg2, Opts) -> any()`
+
+Stub import function for the WASM executor.
+
+
+
+### wasm64_test/0 * ###
+
+`wasm64_test() -> any()`
+
+Test that WASM Memory64 modules load and execute correctly.
+
+
+
+### wasm_send/2 ###
+
+`wasm_send(WASM, Message) -> any()`
+
+
+
+### worker/2 * ###
+
+`worker(Port, Listener) -> any()`
+
+A worker process that is responsible for handling a WASM instance.
+It wraps the WASM port, handling inputs and outputs from the WASM module.
+The last sender to the port is always the recipient of its messages, so
+be careful to ensure that there is only one active sender to the port at
+any time.
+
+
+--- END OF FILE: docs/resources/source-code/hb_beamr.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_cache_control.md ---
+# [Module hb_cache_control.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_cache_control.erl)
+
+
+
+
+Cache control logic for the AO-Core resolver.
+
+
+
+## Description ##
+It derives cache settings
+from request, response, execution-local node Opts, as well as the global
+node Opts. It applies these settings when asked to maybe store/lookup in
+response to a request.
+
+## Function Index ##
+
+
+
Convert a cache control list as received via HTTP headers into a
+normalized map of simply whether we should store and/or lookup the result.
+
+
+
+
+## Function Details ##
+
+
+
+### cache_binary_result_test/0 * ###
+
+`cache_binary_result_test() -> any()`
+
+
+
+### cache_message_result_test/0 * ###
+
+`cache_message_result_test() -> any()`
+
+
+
+### cache_source_to_cache_settings/1 * ###
+
+`cache_source_to_cache_settings(Msg) -> any()`
+
+Convert a cache source to a cache setting. The setting _must_ always be
+directly in the source, not an AO-Core-derivable value. The
+`to_cache_control_map` function is used as the source of settings in all
+cases, except where an `Opts` specifies that hashpaths should not be updated,
+which leads to the result not being cached (as it may be stored with an
+incorrect hashpath).
+
+
+
+### derive_cache_settings/2 * ###
+
+`derive_cache_settings(SourceList, Opts) -> any()`
+
+Derive cache settings from a series of option sources and the opts,
+honoring precidence order. The Opts is used as the first source. Returns a
+map with `store` and `lookup` keys, each of which is a boolean.
+
+For example, if the last source has a `no_store`, the first expresses no
+preference, but the Opts has `cache_control => [always]`, then the result
+will contain a `store => true` entry.
+
+
+
+### dispatch_cache_write/4 * ###
+
+`dispatch_cache_write(Msg1, Msg2, Msg3, Opts) -> any()`
+
+Dispatch the cache write to a worker process if requested.
+Invoke the appropriate cache write function based on the type of the message.
+
+
+
+### empty_message_list_test/0 * ###
+
+`empty_message_list_test() -> any()`
+
+
+
+### exec_likely_faster_heuristic/3 * ###
+
+`exec_likely_faster_heuristic(Msg1, Msg2, Opts) -> any()`
+
+Determine whether we are likely to be faster looking up the result in
+our cache (hoping we have it), or executing it directly.
+
+
+
+### hashpath_ignore_prevents_storage_test/0 * ###
+
+`hashpath_ignore_prevents_storage_test() -> any()`
+
+
+
+### is_explicit_lookup/3 * ###
+
+`is_explicit_lookup(Msg1, X2, Opts) -> any()`
+
+
+
+### lookup/3 * ###
+
+`lookup(Msg1, Msg2, Opts) -> any()`
+
+
+
+### maybe_lookup/3 ###
+
+`maybe_lookup(Msg1, Msg2, Opts) -> any()`
+
+Handles cache lookup, modulated by the caching options requested by
+the user. Honors the following `Opts` cache keys:
+`only_if_cached`: If set and we do not find a result in the cache,
+return an error with a `Cache-Status` of `miss` and
+a 504 `Status`.
+`no_cache`: If set, the cached values are never used. Returns
+`continue` to the caller.
+
+
+
+### maybe_set/2 * ###
+
+`maybe_set(Map1, Map2) -> any()`
+
+Takes a key and two maps, returning the first map with the key set to
+the value of the second map _if_ the value is not undefined.
+
+
+
+### maybe_store/4 ###
+
+`maybe_store(Msg1, Msg2, Msg3, Opts) -> any()`
+
+Write a resulting M3 message to the cache if requested. The precedence
+order of cache control sources is as follows:
+1. The `Opts` map (letting the node operator have the final say).
+2. The `Msg3` results message (granted by Msg1's device).
+3. The `Msg2` message (the user's request).
+Msg1 is not used, such that it can specify cache control information about
+itself, without affecting its outputs.
+
+
+
+### message_source_cache_control_test/0 * ###
+
+`message_source_cache_control_test() -> any()`
+
+
+
+### message_without_cache_control_test/0 * ###
+
+`message_without_cache_control_test() -> any()`
+
+
+
+### msg_precidence_overrides_test/0 * ###
+
+`msg_precidence_overrides_test() -> any()`
+
+
+
+### msg_with_cc/1 * ###
+
+`msg_with_cc(CC) -> any()`
+
+
+
+### multiple_directives_test/0 * ###
+
+`multiple_directives_test() -> any()`
+
+
+
+### necessary_messages_not_found_error/3 * ###
+
+`necessary_messages_not_found_error(Msg1, Msg2, Opts) -> any()`
+
+Generate a message to return when the necessary messages to execute a
+cache lookup are not found in the cache.
+
+
+
+### no_cache_directive_test/0 * ###
+
+`no_cache_directive_test() -> any()`
+
+
+
+### no_store_directive_test/0 * ###
+
+`no_store_directive_test() -> any()`
+
+
+
+### only_if_cached_directive_test/0 * ###
+
+`only_if_cached_directive_test() -> any()`
+
+
+
+### only_if_cached_not_found_error/3 * ###
+
+`only_if_cached_not_found_error(Msg1, Msg2, Opts) -> any()`
+
+Generate a message to return when `only_if_cached` was specified, and
+we don't have a cached result.
+
+
+
+### opts_override_message_settings_test/0 * ###
+
+`opts_override_message_settings_test() -> any()`
+
+
+
+### opts_source_cache_control_test/0 * ###
+
+`opts_source_cache_control_test() -> any()`
+
+
+
+### opts_with_cc/1 * ###
+
+`opts_with_cc(CC) -> any()`
+
+
+
+### specifiers_to_cache_settings/1 * ###
+
+`specifiers_to_cache_settings(CCSpecifier) -> any()`
+
+Convert a cache control list as received via HTTP headers into a
+normalized map of simply whether we should store and/or lookup the result.
+
+
+--- END OF FILE: docs/resources/source-code/hb_cache_control.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_cache_render.md ---
+# [Module hb_cache_render.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_cache_render.erl)
+
+
+
+
+A module that helps to render given Key graphs into the .dot files.
+
+
+
+## Function Index ##
+
+
+
Traverse the store recursively to build the graph.
+
+
+
+
+## Function Details ##
+
+
+
+### add_arc/4 * ###
+
+`add_arc(Graph, From, To, Label) -> any()`
+
+Add an arc to the graph
+
+
+
+### add_node/3 * ###
+
+`add_node(Graph, ID, Color) -> any()`
+
+Add a node to the graph
+
+
+
+### cache_path_to_dot/2 ###
+
+`cache_path_to_dot(ToRender, StoreOrOpts) -> any()`
+
+Generate a dot file from a cache path and options/store
+
+
+
+### cache_path_to_dot/3 ###
+
+`cache_path_to_dot(ToRender, RenderOpts, StoreOrOpts) -> any()`
+
+
+
+### cache_path_to_graph/3 ###
+
+`cache_path_to_graph(ToRender, GraphOpts, StoreOrOpts) -> any()`
+
+Main function to collect graph elements
+
+
+
+### collect_output/2 * ###
+
+`collect_output(Port, Acc) -> any()`
+
+Helper function to collect output from port
+
+
+
+### dot_to_svg/1 ###
+
+`dot_to_svg(DotInput) -> any()`
+
+Convert a dot graph to SVG format
+
+
+
+### extract_label/1 * ###
+
+`extract_label(Path) -> any()`
+
+Extract a label from a path
+
+
+
+### get_graph_data/1 ###
+
+`get_graph_data(Opts) -> any()`
+
+Get graph data for the Three.js visualization
+
+
+
+### get_label/1 * ###
+
+`get_label(Path) -> any()`
+
+Extract a readable label from a path
+
+
+
+### get_node_type/1 * ###
+
+`get_node_type(Color) -> any()`
+
+Convert node color from hb_cache_render to node type for visualization
+
+
+
+### graph_to_dot/1 * ###
+
+`graph_to_dot(Graph) -> any()`
+
+Generate the DOT file from the graph
+
+
+
+### prepare_deeply_nested_complex_message/0 ###
+
+`prepare_deeply_nested_complex_message() -> any()`
+
+
+
+### prepare_signed_data/0 ###
+
+`prepare_signed_data() -> any()`
+
+
+
+### prepare_unsigned_data/0 ###
+
+`prepare_unsigned_data() -> any()`
+
+
+
+### process_composite_node/6 * ###
+
+`process_composite_node(Store, Key, Parent, ResolvedPath, JoinedPath, Graph) -> any()`
+
+Process a composite (directory) node
+
+
+
+### process_simple_node/6 * ###
+
+`process_simple_node(Store, Key, Parent, ResolvedPath, JoinedPath, Graph) -> any()`
+
+Process a simple (leaf) node
+
+
+
+### render/1 ###
+
+`render(StoreOrOpts) -> any()`
+
+Render the given Key into svg
+
+
+
+### render/2 ###
+
+`render(ToRender, StoreOrOpts) -> any()`
+
+
+
+### test_signed/2 * ###
+
+`test_signed(Data, Wallet) -> any()`
+
+
+
+### test_unsigned/1 * ###
+
+`test_unsigned(Data) -> any()`
+
+
+
+### traverse_store/4 * ###
+
+`traverse_store(Store, Key, Parent, Graph) -> any()`
+
+Traverse the store recursively to build the graph
+
+
+--- END OF FILE: docs/resources/source-code/hb_cache_render.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_cache.md ---
+# [Module hb_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_cache.erl)
+
+
+
+
+A cache of AO-Core protocol messages and compute results.
+
+
+
+## Description ##
+
+HyperBEAM stores all paths in key value stores, abstracted by the `hb_store`
+module. Each store has its own storage backend, but each works with simple
+key-value pairs. Each store can write binary keys at paths, and link between
+paths.
+
+There are three layers to HyperBEAMs internal data representation on-disk:
+
+1. The raw binary data, written to the store at the hash of the content.
+Storing binary paths in this way effectively deduplicates the data.
+2. The hashpath-graph of all content, stored as a set of links between
+hashpaths, their keys, and the data that underlies them. This allows
+all messages to share the same hashpath space, such that all requests
+from users additively fill-in the hashpath space, minimizing duplicated
+compute.
+3. Messages, referrable by their IDs (committed or uncommitted). These are
+stored as a set of links commitment IDs and the uncommitted message.
+
+Before writing a message to the store, we convert it to Type-Annotated
+Binary Messages (TABMs), such that each of the keys in the message is
+either a map or a direct binary.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### cache_suite_test_/0 * ###
+
+`cache_suite_test_() -> any()`
+
+
+
+### calculate_all_ids/2 * ###
+
+`calculate_all_ids(Bin, Opts) -> any()`
+
+Calculate the IDs for a message.
+
+
+
+### do_read/4 * ###
+
+`do_read(Path, Store, Opts, AlreadyRead) -> any()`
+
+Read a path from the store. Unsafe: May recurse indefinitely if circular
+links are present.
+
+
+
+### do_write_message/4 * ###
+
+`do_write_message(Bin, AllIDs, Store, Opts) -> any()`
+
+
+
+### link/3 ###
+
+`link(Existing, New, Opts) -> any()`
+
+Make a link from one path to another in the store.
+Note: Argument order is `link(Src, Dst, Opts)`.
+
+
+
+### list/2 ###
+
+`list(Path, Opts) -> any()`
+
+List all items under a given path.
+
+
+
+### list_numbered/2 ###
+
+`list_numbered(Path, Opts) -> any()`
+
+List all items in a directory, assuming they are numbered.
+
+
+
+### read/2 ###
+
+`read(Path, Opts) -> any()`
+
+Read the message at a path. Returns in `structured@1.0` format: Either a
+richly typed map or a direct binary.
+
+
+
+### read_resolved/3 ###
+
+`read_resolved(MsgID1, MsgID2, Opts) -> any()`
+
+Read the output of a prior computation, given Msg1, Msg2, and some
+options.
+
+
+
+### run_test/0 * ###
+
+`run_test() -> any()`
+
+
+
+### store_read/3 * ###
+
+`store_read(Path, Store, Opts) -> any()`
+
+List all of the subpaths of a given path, read each in turn, returning a
+flat map. We track the paths that we have already read to avoid circular
+links.
+
+
+
+### store_read/4 * ###
+
+`store_read(Path, Store, Opts, AlreadyRead) -> any()`
+
+
+
+### test_deeply_nested_complex_message/1 * ###
+
+`test_deeply_nested_complex_message(Opts) -> any()`
+
+Test deeply nested item storage and retrieval
+
+
+
+### test_device_map_cannot_be_written_test/0 * ###
+
+`test_device_map_cannot_be_written_test() -> any()`
+
+Test that message whose device is `#{}` cannot be written. If it were to
+be written, it would cause an infinite loop.
+
+
+
+### test_message_with_message/1 * ###
+
+`test_message_with_message(Opts) -> any()`
+
+
+
+### test_signed/1 ###
+
+`test_signed(Data) -> any()`
+
+
+
+### test_signed/2 * ###
+
+`test_signed(Data, Wallet) -> any()`
+
+
+
+### test_store_ans104_message/1 * ###
+
+`test_store_ans104_message(Opts) -> any()`
+
+
+
+### test_store_binary/1 * ###
+
+`test_store_binary(Opts) -> any()`
+
+
+
+### test_store_simple_signed_message/1 * ###
+
+`test_store_simple_signed_message(Opts) -> any()`
+
+Test storing and retrieving a simple unsigned item
+
+
+
+### test_store_simple_unsigned_message/1 * ###
+
+`test_store_simple_unsigned_message(Opts) -> any()`
+
+Test storing and retrieving a simple unsigned item
+
+
+
+### test_store_unsigned_empty_message/1 * ###
+
+`test_store_unsigned_empty_message(Opts) -> any()`
+
+
+
+### test_unsigned/1 ###
+
+`test_unsigned(Data) -> any()`
+
+
+
+### to_integer/1 * ###
+
+`to_integer(Value) -> any()`
+
+
+
+### write/2 ###
+
+`write(RawMsg, Opts) -> any()`
+
+Write a message to the cache. For raw binaries, we write the data at
+the hashpath of the data (by default the SHA2-256 hash of the data). We link
+the unattended ID's hashpath for the keys (including `/commitments`) on the
+message to the underlying data and recurse. We then link each commitment ID
+to the uncommitted message, such that any of the committed or uncommitted IDs
+can be read, and once in memory all of the commitments are available. For
+deep messages, the commitments will also be read, such that the ID of the
+outer message (which does not include its commitments) will be built upon
+the commitments of the inner messages. We do not, however, store the IDs from
+commitments on signed _inner_ messages. We may wish to revisit this.
+
+
+
+### write_binary/3 ###
+
+`write_binary(Hashpath, Bin, Opts) -> any()`
+
+Write a raw binary keys into the store and link it at a given hashpath.
+
+
+
+### write_binary/4 * ###
+
+`write_binary(Hashpath, Bin, Store, Opts) -> any()`
+
+
+
+### write_hashpath/2 ###
+
+`write_hashpath(Msg, Opts) -> any()`
+
+Write a hashpath and its message to the store and link it.
+
+
+
+### write_hashpath/3 * ###
+
+`write_hashpath(HP, Msg, Opts) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_cache.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_client.md ---
+# [Module hb_client.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_client.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### add_route/3 ###
+
+`add_route(Node, Route, Opts) -> any()`
+
+
+
+### arweave_timestamp/0 ###
+
+`arweave_timestamp() -> any()`
+
+Grab the latest block information from the Arweave gateway node.
+
+
+
+### prefix_keys/3 * ###
+
+`prefix_keys(Prefix, Message, Opts) -> any()`
+
+
+
+### resolve/4 ###
+
+`resolve(Node, Msg1, Msg2, Opts) -> any()`
+
+Resolve a message pair on a remote node.
+The message pair is first transformed into a singleton request, by
+prefixing the keys in both messages for the path segment that they relate to,
+and then adjusting the "Path" field from the second message.
+
+
+
+### routes/2 ###
+
+`routes(Node, Opts) -> any()`
+
+
+
+### upload/2 ###
+
+`upload(Msg, Opts) -> any()`
+
+Upload a data item to the bundler node.
+
+
+
+### upload/3 * ###
+
+`upload(Msg, Opts, X3) -> any()`
+
+
+
+### upload_empty_message_test/0 * ###
+
+`upload_empty_message_test() -> any()`
+
+
+
+### upload_empty_raw_ans104_test/0 * ###
+
+`upload_empty_raw_ans104_test() -> any()`
+
+
+
+### upload_raw_ans104_test/0 * ###
+
+`upload_raw_ans104_test() -> any()`
+
+
+
+### upload_raw_ans104_with_anchor_test/0 * ###
+
+`upload_raw_ans104_with_anchor_test() -> any()`
+
+
+
+### upload_single_layer_message_test/0 * ###
+
+`upload_single_layer_message_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_client.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_crypto.md ---
+# [Module hb_crypto.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_crypto.erl)
+
+
+
+
+Implements the cryptographic functions and wraps the primitives
+used in HyperBEAM.
+
+
+
+## Description ##
+
+Abstracted such that this (extremely!) dangerous code
+can be carefully managed.
+
+HyperBEAM currently implements two hashpath algorithms:
+
+* `sha-256-chain`: A simple chained SHA-256 hash.
+
+* `accumulate-256`: A SHA-256 hash that chains the given IDs and accumulates
+their values into a single commitment.
+
+The accumulate algorithm is experimental and at this point only exists to
+allow us to test multiple HashPath algorithms in HyperBEAM.
+
+## Function Index ##
+
+
+
Check that sha-256-chain correctly produces a hash matching
+the machine's OpenSSL lib's output.
+
+
+
+
+## Function Details ##
+
+
+
+### accumulate/2 ###
+
+`accumulate(ID1, ID2) -> any()`
+
+Accumulate two IDs into a single commitment.
+Experimental! This is not necessarily a cryptographically-secure operation.
+
+
+
+### count_zeroes/1 * ###
+
+`count_zeroes(X1) -> any()`
+
+Count the number of leading zeroes in a bitstring.
+
+
+
+### sha256/1 ###
+
+`sha256(Data) -> any()`
+
+Wrap Erlang's `crypto:hash/2` to provide a standard interface.
+Under-the-hood, this uses OpenSSL.
+
+
+
+### sha256_chain/2 ###
+
+`sha256_chain(ID1, ID2) -> any()`
+
+Add a new ID to the end of a SHA-256 hash chain.
+
+
+
+### sha256_chain_test/0 * ###
+
+`sha256_chain_test() -> any()`
+
+Check that `sha-256-chain` correctly produces a hash matching
+the machine's OpenSSL lib's output. Further (in case of a bug in our
+or Erlang's usage of OpenSSL), check that the output has at least has
+a high level of entropy.
+
+
+--- END OF FILE: docs/resources/source-code/hb_crypto.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_debugger.md ---
+# [Module hb_debugger.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_debugger.erl)
+
+
+
+
+A module that provides bootstrapping interfaces for external debuggers
+to connect to HyperBEAM.
+
+
+
+## Description ##
+
+The simplest way to utilize an external graphical debugger is to use the
+`erlang-ls` extension for VS Code, Emacs, or other Language Server Protocol
+(LSP) compatible editors. This repository contains a `launch.json`
+configuration file for VS Code that can be used to spawn a new HyperBEAM,
+attach the debugger to it, and execute the specified `Module:Function(Args)`.
+Additionally, the node can be started with `rebar3 debugging` in order to
+allow access to the console while also allowing the debugger to attach.
+
+Boot time is approximately 10 seconds.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### await_breakpoint/0 ###
+
+`await_breakpoint() -> any()`
+
+Await a new breakpoint being set by the debugger.
+
+
+
+### await_breakpoint/1 * ###
+
+`await_breakpoint(N) -> any()`
+
+
+
+### await_debugger/0 * ###
+
+`await_debugger() -> any()`
+
+Await a debugger to be attached to the node.
+
+
+
+### await_debugger/1 * ###
+
+`await_debugger(N) -> any()`
+
+
+
+### interpret/1 * ###
+
+`interpret(Module) -> any()`
+
+Attempt to interpret a specified module to load it into the debugger.
+`int:i/1` seems to have an issue that will cause it to fail sporadically
+with `error:undef` on some modules. This error appears not to be catchable
+through the normal means. Subsequently, we attempt the load in a separate
+process and wait for it to complete. If we do not receive a response in a
+reasonable amount of time, we assume that the module failed to load and
+return `false`.
+
+
+
+### is_debugging_node_connected/0 * ###
+
+`is_debugging_node_connected() -> any()`
+
+Is another Distributed Erlang node connected to us?
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+
+
+### start_and_break/2 ###
+
+`start_and_break(Module, Function) -> any()`
+
+A bootstrapping function to wait for an external debugger to be attached,
+then add a breakpoint on the specified `Module:Function(Args)`, then call it.
+
+
+
+### start_and_break/3 ###
+
+`start_and_break(Module, Function, Args) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_debugger.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_escape.md ---
+# [Module hb_escape.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_escape.erl)
+
+
+
+
+Escape and unescape mixed case values for use in HTTP headers.
+
+
+
+## Description ##
+This is necessary for encodings of AO-Core messages for transmission in
+HTTP/2 and HTTP/3, because uppercase header keys are explicitly disallowed.
+While most map keys in HyperBEAM are normalized to lowercase, IDs are not.
+Subsequently, we encode all header keys to lowercase %-encoded URI-style
+strings because transmission.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### create_schedule_aos2_test_disabled/0 * ###
+
+`create_schedule_aos2_test_disabled() -> any()`
+
+
+
+### paid_wasm_test/0 * ###
+
+`paid_wasm_test() -> any()`
+
+Gain signed WASM responses from a node and verify them.
+1. Start the client with a small balance.
+2. Execute a simple WASM function on the host node.
+3. Verify the response is correct and signed by the host node.
+4. Get the balance of the client and verify it has been deducted.
+
+
+
+### relay_with_payments_test/0 * ###
+
+`relay_with_payments_test() -> any()`
+
+Start a node running the simple pay meta device, and use it to relay
+a message for a client. We must ensure:
+1. When the client has no balance, the relay fails.
+2. The operator is able to topup for the client.
+3. The client has the correct balance after the topup.
+4. The relay succeeds when the client has enough balance.
+5. The received message is signed by the host using http-sig and validates
+correctly.
+
+
+
+### schedule/2 * ###
+
+`schedule(ProcMsg, Target) -> any()`
+
+
+
+### schedule/3 * ###
+
+`schedule(ProcMsg, Target, Wallet) -> any()`
+
+
+
+### schedule/4 * ###
+
+`schedule(ProcMsg, Target, Wallet, Node) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_examples.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_features.md ---
+# [Module hb_features.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_features.erl)
+
+
+
+
+A module that exports a list of feature flags that the node supports
+using the `-ifdef` macro.
+
+
+
+## Description ##
+As a consequence, this module acts as a proxy of information between the
+build system and the runtime execution environment.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### all/0 ###
+
+`all() -> any()`
+
+Returns a list of all feature flags that the node supports.
+
+
+
+### enabled/1 ###
+
+`enabled(Feature) -> any()`
+
+Returns true if the feature flag is enabled.
+
+
+
+### genesis_wasm/0 ###
+
+`genesis_wasm() -> any()`
+
+
+
+### http3/0 ###
+
+`http3() -> any()`
+
+
+
+### rocksdb/0 ###
+
+`rocksdb() -> any()`
+
+
+
+### test/0 ###
+
+`test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_features.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_gateway_client.md ---
+# [Module hb_gateway_client.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_gateway_client.erl)
+
+
+
+
+Implementation of Arweave's GraphQL API to gain access to specific
+items of data stored on the network.
+
+
+
+## Description ##
+This module must be used to get full HyperBEAM `structured@1.0` form messages
+from data items stored on the network, as Arweave gateways do not presently
+expose all necessary fields to retrieve this information outside of the
+GraphQL API. When gateways integrate serving in `httpsig@1.0` form, this
+module will be deprecated.
+
+## Function Index ##
+
+
+
Takes a list of messages with name and value fields, and formats
+them as a GraphQL tags argument.
+
+
+
+
+## Function Details ##
+
+
+
+### ans104_no_data_item_test/0 * ###
+
+`ans104_no_data_item_test() -> any()`
+
+
+
+### ao_dataitem_test/0 * ###
+
+`ao_dataitem_test() -> any()`
+
+Test optimistic index
+
+
+
+### data/2 ###
+
+`data(ID, Opts) -> any()`
+
+Get the data associated with a transaction by its ID, using the node's
+Arweave `gateway` peers. The item is expected to be available in its
+unmodified (by caches or other proxies) form at the following location:
+https:///raw/
+where `<id>` is the base64-url-encoded transaction ID.
+
+
+
+### decode_id_or_null/1 * ###
+
+`decode_id_or_null(Bin) -> any()`
+
+
+
+### decode_or_null/1 * ###
+
+`decode_or_null(Bin) -> any()`
+
+
+
+### item_spec/0 * ###
+
+`item_spec() -> any()`
+
+Gives the fields of a transaction that are needed to construct an
+ANS-104 message.
+
+
+
+### l1_transaction_test/0 * ###
+
+`l1_transaction_test() -> any()`
+
+Test l1 message from graphql
+
+
+
+### l2_dataitem_test/0 * ###
+
+`l2_dataitem_test() -> any()`
+
+Test l2 message from graphql
+
+
+
+### normalize_null/1 * ###
+
+`normalize_null(Bin) -> any()`
+
+
+
+### query/2 * ###
+
+`query(Query, Opts) -> any()`
+
+Run a GraphQL request encoded as a binary. The node message may contain
+a list of URLs to use, optionally as a tuple with an additional map of options
+to use for the request.
+
+
+
+### read/2 ###
+
+`read(ID, Opts) -> any()`
+
+Get a data item (including data and tags) by its ID, using the node's
+GraphQL peers.
+It uses the following GraphQL schema:
+type Transaction {
+id: ID!
+anchor: String!
+signature: String!
+recipient: String!
+owner: Owner { address: String! key: String! }!
+fee: Amount!
+quantity: Amount!
+data: MetaData!
+tags: [Tag { name: String! value: String! }!]!
+}
+type Amount {
+winston: String!
+ar: String!
+}
+
+
+
+### result_to_message/2 ###
+
+`result_to_message(Item, Opts) -> any()`
+
+Takes a GraphQL item node, matches it with the appropriate data from a
+gateway, then returns `{ok, ParsedMsg}`.
+
+
+
+### result_to_message/3 * ###
+
+`result_to_message(ExpectedID, Item, Opts) -> any()`
+
+
+
+### scheduler_location/2 ###
+
+`scheduler_location(Address, Opts) -> any()`
+
+Find the location of the scheduler based on its ID, through GraphQL.
+
+
+
+### scheduler_location_test/0 * ###
+
+`scheduler_location_test() -> any()`
+
+Test that we can get the scheduler location.
+
+
+
+### subindex_to_tags/1 * ###
+
+`subindex_to_tags(Subindex) -> any()`
+
+Takes a list of messages with `name` and `value` fields, and formats
+them as a GraphQL `tags` argument.
+
+
+--- END OF FILE: docs/resources/source-code/hb_gateway_client.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_http_benchmark_tests.md ---
+# [Module hb_http_benchmark_tests.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_benchmark_tests.erl)
+
+
+
+
+
+--- END OF FILE: docs/resources/source-code/hb_http_benchmark_tests.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_http_client_sup.md ---
+# [Module hb_http_client_sup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_client_sup.erl)
+
+
+
+
+The supervisor for the gun HTTP client wrapper.
+
+__Behaviours:__ [`supervisor`](supervisor.md).
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### allowed_methods/2 ###
+
+`allowed_methods(Req, State) -> any()`
+
+Return the list of allowed methods for the HTTP server.
+
+
+
+### cors_reply/2 * ###
+
+`cors_reply(Req, ServerID) -> any()`
+
+Reply to CORS preflight requests.
+
+
+
+### get_opts/1 ###
+
+`get_opts(NodeMsg) -> any()`
+
+
+
+### handle_request/3 * ###
+
+`handle_request(RawReq, Body, ServerID) -> any()`
+
+Handle all non-CORS preflight requests as AO-Core requests. Execution
+starts by parsing the HTTP request into HyerBEAM's message format, then
+passing the message directly to `meta@1.0` which handles calling AO-Core in
+the appropriate way.
+
+
+
+### http3_conn_sup_loop/0 * ###
+
+`http3_conn_sup_loop() -> any()`
+
+
+
+### init/2 ###
+
+`init(Req, ServerID) -> any()`
+
+Entrypoint for all HTTP requests. Receives the Cowboy request option and
+the server ID, which can be used to lookup the node message.
+
+
+
+### new_server/1 * ###
+
+`new_server(RawNodeMsg) -> any()`
+
+Trigger the creation of a new HTTP server node. Accepts a `NodeMsg`
+message, which is used to configure the server. This function executed the
+`start` hook on the node, giving it the opportunity to modify the `NodeMsg`
+before it is used to configure the server. The `start` hook expects gives and
+expects the node message to be in the `body` key.
+
+
+
+### read_body/1 * ###
+
+`read_body(Req) -> any()`
+
+Helper to grab the full body of a HTTP request, even if it's chunked.
+
+
+
+### read_body/2 * ###
+
+`read_body(Req0, Acc) -> any()`
+
+
+
+### set_default_opts/1 ###
+
+`set_default_opts(Opts) -> any()`
+
+
+
+### set_node_opts_test/0 * ###
+
+`set_node_opts_test() -> any()`
+
+Ensure that the `start` hook can be used to modify the node options. We
+do this by creating a message with a device that has a `start` key. This
+key takes the message's body (the anticipated node options) and returns a
+modified version of that body, which will be used to configure the node. We
+then check that the node options were modified as we expected.
+
+
+
+### set_opts/1 ###
+
+`set_opts(Opts) -> any()`
+
+Merges the provided `Opts` with uncommitted values from `Request`,
+preserves the http_server value, and updates node_history by prepending
+the `Request`. If a server reference exists, updates the Cowboy environment
+variable 'node_msg' with the resulting options map.
+
+
+
+### set_opts/2 ###
+
+`set_opts(Request, Opts) -> any()`
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+Starts the HTTP server. Optionally accepts an `Opts` message, which
+is used as the source for server configuration settings, as well as the
+`Opts` argument to use for all AO-Core resolution requests downstream.
+
+
+
+### start/1 ###
+
+`start(Opts) -> any()`
+
+
+
+### start_http2/3 * ###
+
+`start_http2(ServerID, ProtoOpts, NodeMsg) -> any()`
+
+
+
+### start_http3/3 * ###
+
+`start_http3(ServerID, ProtoOpts, NodeMsg) -> any()`
+
+
+
+### start_node/0 ###
+
+`start_node() -> any()`
+
+Test that we can start the server, send a message, and get a response.
+
+
+
+### start_node/1 ###
+
+`start_node(Opts) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_http_server.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_http.md ---
+# [Module hb_http.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### accept_to_codec/2 ###
+
+`accept_to_codec(TABMReq, Opts) -> any()`
+
+Calculate the codec name to use for a reply given its initiating Cowboy
+request, the parsed TABM request, and the response message. The precidence
+order for finding the codec is:
+1. The `accept-codec` field in the message
+2. The `accept` field in the request headers
+3. The default codec
+Options can be specified in mime-type format (`application/*`) or in
+AO device format (`device@1.0`).
+
+
+
+### add_cors_headers/2 * ###
+
+`add_cors_headers(Msg, ReqHdr) -> any()`
+
+Add permissive CORS headers to a message, if the message has not already
+specified CORS headers.
+
+
+
+### allowed_status/2 * ###
+
+`allowed_status(ResponseMsg, Statuses) -> any()`
+
+Check if a status is allowed, according to the configuration.
+
+
+
+### ans104_wasm_test/0 * ###
+
+`ans104_wasm_test() -> any()`
+
+
+
+### codec_to_content_type/2 * ###
+
+`codec_to_content_type(Codec, Opts) -> any()`
+
+Call the `content-type` key on a message with the given codec, using
+a fast-path for options that are not needed for this one-time lookup.
+
+
+
+### cors_get_test/0 * ###
+
+`cors_get_test() -> any()`
+
+
+
+### default_codec/1 * ###
+
+`default_codec(Opts) -> any()`
+
+Return the default codec for the given options.
+
+
+
+### empty_inbox/1 * ###
+
+`empty_inbox(Ref) -> any()`
+
+Empty the inbox of the current process for all messages with the given
+reference.
+
+
+
+### encode_reply/3 * ###
+
+`encode_reply(TABMReq, Message, Opts) -> any()`
+
+Generate the headers and body for a HTTP response message.
+
+
+
+### get/2 ###
+
+`get(Node, Opts) -> any()`
+
+Gets a URL via HTTP and returns the resulting message in deserialized
+form.
+
+
+
+### get/3 ###
+
+`get(Node, PathBin, Opts) -> any()`
+
+
+
+### get_deep_signed_wasm_state_test/0 * ###
+
+`get_deep_signed_wasm_state_test() -> any()`
+
+
+
+### get_deep_unsigned_wasm_state_test/0 * ###
+
+`get_deep_unsigned_wasm_state_test() -> any()`
+
+
+
+### http_response_to_httpsig/4 * ###
+
+`http_response_to_httpsig(Status, HeaderMap, Body, Opts) -> any()`
+
+Convert a HTTP response to a httpsig message.
+
+
+
+### httpsig_to_tabm_singleton/3 * ###
+
+`httpsig_to_tabm_singleton(Req, Body, Opts) -> any()`
+
+HTTPSig messages are inherently mixed into the transport layer, so they
+require special handling in order to be converted to a normalized message.
+In particular, the signatures are verified if present and required by the
+node configuration. Additionally, non-committed fields are removed from the
+message if it is signed, with the exception of the `path` and `method` fields.
+
+
+
+### maybe_add_unsigned/3 * ###
+
+`maybe_add_unsigned(Req, Msg, Opts) -> any()`
+
+Add the method and path to a message, if they are not already present.
+The precidence order for finding the path is:
+1. The path in the message
+2. The path in the request URI
+
+
+
+### message_to_request/2 * ###
+
+`message_to_request(M, Opts) -> any()`
+
+Given a message, return the information needed to make the request.
+
+
+
+### mime_to_codec/2 * ###
+
+`mime_to_codec(X1, Opts) -> any()`
+
+Find a codec name from a mime-type.
+
+
+
+### multirequest/5 * ###
+
+`multirequest(Config, Method, Path, Message, Opts) -> any()`
+
+Dispatch the same HTTP request to many nodes. Can be configured to
+await responses from all nodes or just one, and to halt all requests after
+after it has received the required number of responses, or to leave all
+requests running until they have all completed. Default: Race for first
+response.
+
+Expects a config message of the following form:
+/Nodes/1..n: Hostname | #{ hostname => Hostname, address => Address }
+/Responses: Number of responses to gather
+/Stop-After: Should we stop after the required number of responses?
+/Parallel: Should we run the requests in parallel?
+
+
+
+### multirequest_opt/5 * ###
+
+`multirequest_opt(Key, Config, Message, Default, Opts) -> any()`
+
+Get a value for a multirequest option from the config or message.
+
+
+
+### multirequest_opts/3 * ###
+
+`multirequest_opts(Config, Message, Opts) -> any()`
+
+Get the multirequest options from the config or message. The options in
+the message take precidence over the options in the config.
+
+
+
+### nested_ao_resolve_test/0 * ###
+
+`nested_ao_resolve_test() -> any()`
+
+
+
+### parallel_multirequest/8 * ###
+
+`parallel_multirequest(Nodes, Responses, StopAfter, Method, Path, Message, Statuses, Opts) -> any()`
+
+Dispatch the same HTTP request to many nodes in parallel.
+
+
+
+### parallel_responses/7 * ###
+
+`parallel_responses(Res, Procs, Ref, Awaiting, StopAfter, Statuses, Opts) -> any()`
+
+Collect the necessary number of responses, and stop workers if
+configured to do so.
+
+
+
+### post/3 ###
+
+`post(Node, Message, Opts) -> any()`
+
+Posts a message to a URL on a remote peer via HTTP. Returns the
+resulting message in deserialized form.
+
+
+
+### post/4 ###
+
+`post(Node, Path, Message, Opts) -> any()`
+
+
+
+### prepare_request/6 * ###
+
+`prepare_request(Format, Method, Peer, Path, RawMessage, Opts) -> any()`
+
+Turn a set of request arguments into a request message, formatted in the
+preferred format.
+
+
+
+### remove_unsigned_fields/2 * ###
+
+`remove_unsigned_fields(Msg, Opts) -> any()`
+
+
+
+### reply/4 ###
+
+`reply(Req, TABMReq, Message, Opts) -> any()`
+
+Reply to the client's HTTP request with a message.
+
+
+
+### reply/5 * ###
+
+`reply(Req, TABMReq, BinStatus, RawMessage, Opts) -> any()`
+
+
+
+### req_to_tabm_singleton/3 ###
+
+`req_to_tabm_singleton(Req, Body, Opts) -> any()`
+
+Convert a cowboy request to a normalized message.
+
+
+
+### request/2 ###
+
+`request(Message, Opts) -> any()`
+
+Posts a binary to a URL on a remote peer via HTTP, returning the raw
+binary body.
+
+
+
+### request/4 ###
+
+`request(Method, Peer, Path, Opts) -> any()`
+
+
+
+### request/5 ###
+
+`request(Method, Config, Path, Message, Opts) -> any()`
+
+
+
+### route_to_request/3 * ###
+
+`route_to_request(M, X2, Opts) -> any()`
+
+Parse a `dev_router:route` response and return a tuple of request
+parameters.
+
+
+
+### run_wasm_signed_test/0 * ###
+
+`run_wasm_signed_test() -> any()`
+
+
+
+### run_wasm_unsigned_test/0 * ###
+
+`run_wasm_unsigned_test() -> any()`
+
+
+
+### send_encoded_node_message_test/2 * ###
+
+`send_encoded_node_message_test(Config, Codec) -> any()`
+
+
+
+### send_flat_encoded_node_message_test/0 * ###
+
+`send_flat_encoded_node_message_test() -> any()`
+
+
+
+### send_json_encoded_node_message_test/0 * ###
+
+`send_json_encoded_node_message_test() -> any()`
+
+
+
+### send_large_signed_request_test/0 * ###
+
+`send_large_signed_request_test() -> any()`
+
+
+
+### serial_multirequest/7 * ###
+
+`serial_multirequest(Nodes, Remaining, Method, Path, Message, Statuses, Opts) -> any()`
+
+Serially request a message, collecting responses until the required
+number of responses have been gathered. Ensure that the statuses are
+allowed, according to the configuration.
+
+
+
+### simple_ao_resolve_signed_test/0 * ###
+
+`simple_ao_resolve_signed_test() -> any()`
+
+
+
+### simple_ao_resolve_unsigned_test/0 * ###
+
+`simple_ao_resolve_unsigned_test() -> any()`
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+
+
+### wasm_compute_request/3 * ###
+
+`wasm_compute_request(ImageFile, Func, Params) -> any()`
+
+
+
+### wasm_compute_request/4 * ###
+
+`wasm_compute_request(ImageFile, Func, Params, ResultPath) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_http.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_json.md ---
+# [Module hb_json.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_json.erl)
+
+
+
+
+Wrapper for encoding and decoding JSON.
+
+
+
+## Description ##
+Supports maps and Jiffy's old
+`ejson` format. This module abstracts the underlying JSON library, allowing
+us to switch between libraries as needed in the future.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### console/2 * ###
+
+`console(State, Act) -> any()`
+
+
+
+### log/2 ###
+
+`log(Monitor, Data) -> any()`
+
+
+
+### loop/1 * ###
+
+`loop(State) -> any()`
+
+
+
+### register/1 ###
+
+`register(Monitor) -> any()`
+
+
+
+### report/1 ###
+
+`report(Monitor) -> any()`
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+
+
+### start/1 ###
+
+`start(Client) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_logger.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_message.md ---
+# [Module hb_message.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_message.erl)
+
+
+
+
+This module acts an adapter between messages, as modeled in the
+AO-Core protocol, and their uderlying binary representations and formats.
+
+
+
+## Description ##
+
+Unless you are implementing a new message serialization codec, you should
+not need to interact with this module directly. Instead, use the
+`hb_ao` interfaces to interact with all messages. The `dev_message`
+module implements a device interface for abstracting over the different
+message formats.
+
+`hb_message` and the HyperBEAM caches can interact with multiple different
+types of message formats:
+
+- Richly typed AO-Core structured messages.
+- Arweave transations.
+- ANS-104 data items.
+- HTTP Signed Messages.
+- Flat Maps.
+
+This module is responsible for converting between these formats. It does so
+by normalizing messages to a common format: `Type Annotated Binary Messages`
+(TABM). TABMs are deep Erlang maps with keys than only contain either other
+TABMs or binary values. By marshalling all messages into this format, they
+can easily be coerced into other output formats. For example, generating a
+`HTTP Signed Message` format output from an Arweave transaction. TABM is
+also a simple format from a computational perspective (only binary literals
+and O(1) access maps), such that operations upon them are efficient.
+
+The structure of the conversions is as follows:
+
+
+
+Additionally, this module provides a number of utility functions for
+manipulating messages. For example, `hb_message:sign/2` to sign a message of
+arbitrary type, or `hb_message:format/1` to print an AO-Core/TABM message in
+a human-readable format.
+
+The `hb_cache` module is responsible for storing and retrieving messages in
+the HyperBEAM stores configured on the node. Each store has its own storage
+backend, but each works with simple key-value pairs. Subsequently, the
+`hb_cache` module uses TABMs as the internal format for storing and
+retrieving messages.
+
+## Function Index ##
+
+
+
Test that the filter_default_keys/1 function removes TX fields
+that have the default values found in the tx record, but not those that
+have been set by the user.
+
+
+
+
+## Function Details ##
+
+
+
+### basic_map_codec_test/1 * ###
+
+`basic_map_codec_test(Codec) -> any()`
+
+
+
+### binary_to_binary_test/1 * ###
+
+`binary_to_binary_test(Codec) -> any()`
+
+
+
+### commit/2 ###
+
+`commit(Msg, WalletOrOpts) -> any()`
+
+Sign a message with the given wallet.
+
+
+
+### commit/3 ###
+
+`commit(Msg, Wallet, Format) -> any()`
+
+
+
+### commitment/2 ###
+
+`commitment(Committer, Msg) -> any()`
+
+Extract a commitment from a message given a `committer` ID, or a spec
+message to match against. Returns only the first matching commitment, or
+`not_found`.
+
+
+
+### commitment/3 ###
+
+`commitment(CommitterID, Msg, Opts) -> any()`
+
+
+
+### committed/1 ###
+
+`committed(Msg) -> any()`
+
+Return the list of committed keys from a message.
+
+
+
+### committed/2 ###
+
+`committed(Msg, Committers) -> any()`
+
+
+
+### committed/3 ###
+
+`committed(Msg, List, Opts) -> any()`
+
+
+
+### committed_empty_keys_test/1 * ###
+
+`committed_empty_keys_test(Codec) -> any()`
+
+
+
+### committed_keys_test/1 * ###
+
+`committed_keys_test(Codec) -> any()`
+
+
+
+### complex_signed_message_test/1 * ###
+
+`complex_signed_message_test(Codec) -> any()`
+
+
+
+### convert/3 ###
+
+`convert(Msg, TargetFormat, Opts) -> any()`
+
+Convert a message from one format to another. Taking a message in the
+source format, a target format, and a set of opts. If not given, the source
+is assumed to be `structured@1.0`. Additional codecs can be added by ensuring they
+are part of the `Opts` map -- either globally, or locally for a computation.
+
+The encoding happens in two phases:
+1. Convert the message to a TABM.
+2. Convert the TABM to the target format.
+
+The conversion to a TABM is done by the `structured@1.0` codec, which is always
+available. The conversion from a TABM is done by the target codec.
+
+
+
+### convert/4 ###
+
+`convert(Msg, TargetFormat, SourceFormat, Opts) -> any()`
+
+
+
+### deep_multisignature_test/0 * ###
+
+`deep_multisignature_test() -> any()`
+
+
+
+### deeply_nested_committed_keys_test/0 * ###
+
+`deeply_nested_committed_keys_test() -> any()`
+
+
+
+### deeply_nested_message_with_content_test/1 * ###
+
+`deeply_nested_message_with_content_test(Codec) -> any()`
+
+Test that we can convert a 3 layer nested message into a tx record and back.
+
+
+
+### deeply_nested_message_with_only_content/1 * ###
+
+`deeply_nested_message_with_only_content(Codec) -> any()`
+
+
+
+### default_keys_removed_test/0 * ###
+
+`default_keys_removed_test() -> any()`
+
+Test that the filter_default_keys/1 function removes TX fields
+that have the default values found in the tx record, but not those that
+have been set by the user.
+
+
+
+### default_tx_list/0 ###
+
+`default_tx_list() -> any()`
+
+Get the ordered list of fields as AO-Core keys and default values of
+the tx record.
+
+
+
+### default_tx_message/0 * ###
+
+`default_tx_message() -> any()`
+
+Get the normalized fields and default values of the tx record.
+
+
+
+### empty_string_in_tag_test/1 * ###
+
+`empty_string_in_tag_test(Codec) -> any()`
+
+
+
+### encode_balance_table/2 * ###
+
+`encode_balance_table(Size, Codec) -> any()`
+
+
+
+### encode_large_balance_table_test/1 * ###
+
+`encode_large_balance_table_test(Codec) -> any()`
+
+
+
+### encode_small_balance_table_test/1 * ###
+
+`encode_small_balance_table_test(Codec) -> any()`
+
+
+
+### filter_default_keys/1 ###
+
+`filter_default_keys(Map) -> any()`
+
+Remove keys from a map that have the default values found in the tx
+record.
+
+
+
+### find_target/3 ###
+
+`find_target(Self, Req, Opts) -> any()`
+
+Implements a standard pattern in which the target for an operation is
+found by looking for a `target` key in the request. If the target is `self`,
+or not present, the operation is performed on the original message. Otherwise,
+the target is expected to be a key in the message, and the operation is
+performed on the value of that key.
+
+
+
+### format/1 ###
+
+`format(Item) -> any()`
+
+Format a message for printing, optionally taking an indentation level
+to start from.
+
+
+
+### format/2 ###
+
+`format(Bin, Indent) -> any()`
+
+
+
+### from_tabm/4 * ###
+
+`from_tabm(Msg, TargetFormat, OldPriv, Opts) -> any()`
+
+
+
+### generate_test_suite/1 * ###
+
+`generate_test_suite(Suite) -> any()`
+
+
+
+### get_codec/2 * ###
+
+`get_codec(TargetFormat, Opts) -> any()`
+
+Get a codec from the options.
+
+
+
+### hashpath_sign_verify_test/1 * ###
+
+`hashpath_sign_verify_test(Codec) -> any()`
+
+
+
+### id/1 ###
+
+`id(Msg) -> any()`
+
+Return the ID of a message.
+
+
+
+### id/2 ###
+
+`id(Msg, Committers) -> any()`
+
+
+
+### id/3 ###
+
+`id(Msg, RawCommitters, Opts) -> any()`
+
+
+
+### large_body_committed_keys_test/1 * ###
+
+`large_body_committed_keys_test(Codec) -> any()`
+
+
+
+### match/2 ###
+
+`match(Map1, Map2) -> any()`
+
+Check if two maps match, including recursively checking nested maps.
+Takes an optional mode argument to control the matching behavior:
+`strict`: All keys in both maps be present and match.
+`only_present`: Only present keys in both maps must match.
+`primary`: Only the primary map's keys must be present.
+
+
+
+### match/3 ###
+
+`match(Map1, Map2, Mode) -> any()`
+
+
+
+### match_modes_test/0 * ###
+
+`match_modes_test() -> any()`
+
+
+
+### match_test/1 * ###
+
+`match_test(Codec) -> any()`
+
+Test that the message matching function works.
+
+
+
+### matchable_keys/1 * ###
+
+`matchable_keys(Map) -> any()`
+
+
+
+### message_suite_test_/0 * ###
+
+`message_suite_test_() -> any()`
+
+
+
+### message_with_large_keys_test/1 * ###
+
+`message_with_large_keys_test(Codec) -> any()`
+
+Test that the data field is correctly managed when we have multiple
+uses for it (the 'data' key itself, as well as keys that cannot fit in
+tags).
+
+
+
+### message_with_simple_embedded_list_test/1 * ###
+
+`message_with_simple_embedded_list_test(Codec) -> any()`
+
+
+
+### minimization_test/0 * ###
+
+`minimization_test() -> any()`
+
+
+
+### minimize/1 ###
+
+`minimize(Msg) -> any()`
+
+Remove keys from the map that can be regenerated. Optionally takes an
+additional list of keys to include in the minimization.
+
+
+
+### minimize/2 * ###
+
+`minimize(RawVal, ExtraKeys) -> any()`
+
+
+
+### nested_body_list_test/1 * ###
+
+`nested_body_list_test(Codec) -> any()`
+
+
+
+### nested_empty_map_test/1 * ###
+
+`nested_empty_map_test(Codec) -> any()`
+
+
+
+### nested_message_with_large_content_test/1 * ###
+
+`nested_message_with_large_content_test(Codec) -> any()`
+
+Test that the data field is correctly managed when we have multiple
+uses for it (the 'data' key itself, as well as keys that cannot fit in
+tags).
+
+
+
+### nested_message_with_large_keys_and_content_test/1 * ###
+
+`nested_message_with_large_keys_and_content_test(Codec) -> any()`
+
+Check that large keys and data fields are correctly handled together.
+
+
+
+### nested_message_with_large_keys_test/1 * ###
+
+`nested_message_with_large_keys_test(Codec) -> any()`
+
+
+
+### nested_structured_fields_test/1 * ###
+
+`nested_structured_fields_test(Codec) -> any()`
+
+
+
+### normalize/1 * ###
+
+`normalize(Map) -> any()`
+
+Return a map with only the keys that necessary, without those that can
+be regenerated.
+
+
+
+### print/1 ###
+
+`print(Msg) -> any()`
+
+Pretty-print a message.
+
+
+
+### print/2 * ###
+
+`print(Msg, Indent) -> any()`
+
+
+
+### priv_survives_conversion_test/1 * ###
+
+`priv_survives_conversion_test(Codec) -> any()`
+
+
+
+### recursive_nested_list_test/1 * ###
+
+`recursive_nested_list_test(Codec) -> any()`
+
+
+
+### restore_priv/2 * ###
+
+`restore_priv(Msg, EmptyPriv) -> any()`
+
+Add the existing `priv` sub-map back to a converted message, honoring
+any existing `priv` sub-map that may already be present.
+
+
+
+### run_test/0 * ###
+
+`run_test() -> any()`
+
+
+
+### set_body_codec_test/1 * ###
+
+`set_body_codec_test(Codec) -> any()`
+
+
+
+### sign_node_message_test/1 * ###
+
+`sign_node_message_test(Codec) -> any()`
+
+
+
+### signed_deep_message_test/1 * ###
+
+`signed_deep_message_test(Codec) -> any()`
+
+
+
+### signed_list_test/1 * ###
+
+`signed_list_test(Codec) -> any()`
+
+
+
+### signed_message_encode_decode_verify_test/1 * ###
+
+`signed_message_encode_decode_verify_test(Codec) -> any()`
+
+
+
+### signed_message_with_derived_components_test/1 * ###
+
+`signed_message_with_derived_components_test(Codec) -> any()`
+
+
+
+### signed_nested_data_key_test/1 * ###
+
+`signed_nested_data_key_test(Codec) -> any()`
+
+
+
+### signed_only_committed_data_field_test/1 * ###
+
+`signed_only_committed_data_field_test(Codec) -> any()`
+
+
+
+### signed_with_inner_signed_message_test/1 * ###
+
+`signed_with_inner_signed_message_test(Codec) -> any()`
+
+
+
+### signers/1 ###
+
+`signers(Msg) -> any()`
+
+Return all of the committers on a message that have 'normal', 256 bit,
+addresses.
+
+
+
+### simple_nested_message_test/1 * ###
+
+`simple_nested_message_test(Codec) -> any()`
+
+
+
+### single_layer_message_to_encoding_test/1 * ###
+
+`single_layer_message_to_encoding_test(Codec) -> any()`
+
+Test that we can convert a message into a tx record and back.
+
+
+
+### structured_field_atom_parsing_test/1 * ###
+
+`structured_field_atom_parsing_test(Codec) -> any()`
+
+Structured field parsing tests.
+
+
+
+### structured_field_decimal_parsing_test/1 * ###
+
+`structured_field_decimal_parsing_test(Codec) -> any()`
+
+
+
+### tabm_ao_ids_equal_test/1 * ###
+
+`tabm_ao_ids_equal_test(Codec) -> any()`
+
+
+
+### test_codecs/0 * ###
+
+`test_codecs() -> any()`
+
+
+
+### to_tabm/3 * ###
+
+`to_tabm(Msg, SourceFormat, Opts) -> any()`
+
+
+
+### type/1 ###
+
+`type(TX) -> any()`
+
+Return the type of an encoded message.
+
+
+
+### uncommitted/1 ###
+
+`uncommitted(Bin) -> any()`
+
+Return the unsigned version of a message in AO-Core format.
+
+
+
+### unsigned_id_test/1 * ###
+
+`unsigned_id_test(Codec) -> any()`
+
+
+
+### verify/1 ###
+
+`verify(Msg) -> any()`
+
+wrapper function to verify a message.
+
+
+
+### verify/2 ###
+
+`verify(Msg, Committers) -> any()`
+
+
+
+### with_commitments/2 ###
+
+`with_commitments(Spec, Msg) -> any()`
+
+Filter messages that do not match the 'spec' given. The underlying match
+is performed in the `only_present` mode, such that match specifications only
+need to specify the keys that must be present.
+
+
+
+### with_commitments/3 * ###
+
+`with_commitments(Spec, Msg, Opts) -> any()`
+
+
+
+### with_only_committed/1 ###
+
+`with_only_committed(Msg) -> any()`
+
+Return a message with only the committed keys. If no commitments are
+present, the message is returned unchanged. This means that you need to
+check if the message is:
+- Committed
+- Verifies
+...before using the output of this function as the 'canonical' message. This
+is such that expensive operations like signature verification are not
+performed unless necessary.
+
+
+
+### with_only_committed/2 ###
+
+`with_only_committed(Msg, Opts) -> any()`
+
+
+
+### with_only_committers/2 ###
+
+`with_only_committers(Msg, Committers) -> any()`
+
+Return the message with only the specified committers attached.
+
+
+
+### without_commitments/2 ###
+
+`without_commitments(Spec, Msg) -> any()`
+
+Filter messages that match the 'spec' given. Inverts the `with_commitments/2`
+function, such that only messages that do _not_ match the spec are returned.
+
+
+
+### without_commitments/3 * ###
+
+`without_commitments(Spec, Msg, Opts) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_message.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_metrics_collector.md ---
+# [Module hb_metrics_collector.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_metrics_collector.erl)
+
+
+
+
+__Behaviours:__ [`prometheus_collector`](prometheus_collector.md).
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### collect_metrics/2 ###
+
+`collect_metrics(X1, SystemLoad) -> any()`
+
+
+
+### collect_mf/2 ###
+
+`collect_mf(Registry, Callback) -> any()`
+
+
+
+### create_gauge/3 * ###
+
+`create_gauge(Name, Help, Data) -> any()`
+
+
+
+### deregister_cleanup/1 ###
+
+`deregister_cleanup(X1) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_metrics_collector.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_name.md ---
+# [Module hb_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_name.erl)
+
+
+
+
+An abstraction for name registration/deregistration in Hyperbeam.
+
+
+
+## Description ##
+Its motivation is to provide a way to register names that are not necessarily
+atoms, but can be any term (for example: hashpaths or `process@1.0` IDs).
+An important characteristic of these functions is that they are atomic:
+There can only ever be one registrant for a given name at a time.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### all/0 ###
+
+`all() -> any()`
+
+List the names in the registry.
+
+
+
+### all_test/0 * ###
+
+`all_test() -> any()`
+
+
+
+### atom_test/0 * ###
+
+`atom_test() -> any()`
+
+
+
+### basic_test/1 * ###
+
+`basic_test(Term) -> any()`
+
+
+
+### cleanup_test/0 * ###
+
+`cleanup_test() -> any()`
+
+
+
+### concurrency_test/0 * ###
+
+`concurrency_test() -> any()`
+
+
+
+### dead_process_test/0 * ###
+
+`dead_process_test() -> any()`
+
+
+
+### ets_lookup/1 * ###
+
+`ets_lookup(Name) -> any()`
+
+
+
+### lookup/1 ###
+
+`lookup(Name) -> any()`
+
+Lookup a name -> PID.
+
+
+
+### register/1 ###
+
+`register(Name) -> any()`
+
+Register a name. If the name is already registered, the registration
+will fail. The name can be any Erlang term.
+
+
+
+### register/2 ###
+
+`register(Name, Pid) -> any()`
+
+
+
+### spawn_test_workers/1 * ###
+
+`spawn_test_workers(Name) -> any()`
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
+
+
+### start_ets/0 * ###
+
+`start_ets() -> any()`
+
+
+
+### term_test/0 * ###
+
+`term_test() -> any()`
+
+
+
+### unregister/1 ###
+
+`unregister(Name) -> any()`
+
+Unregister a name.
+
+
+
+### wait_for_cleanup/2 * ###
+
+`wait_for_cleanup(Name, Retries) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_name.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_opts.md ---
+# [Module hb_opts.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_opts.erl)
+
+
+
+
+A module for interacting with local and global options inside
+HyperBEAM.
+
+
+
+## Description ##
+
+Options are set globally, but can also be overridden using an
+an optional local `Opts` map argument. Many functions across the HyperBEAM
+environment accept an `Opts` argument, which can be used to customize
+behavior.
+
+Options set in an `Opts` map must _never_ change the behavior of a function
+that should otherwise be deterministic. Doing so may lead to loss of funds
+by the HyperBEAM node operator, as the results of their executions will be
+different than those of other node operators. If they are economically
+staked on the correctness of these results, they may experience punishments
+for non-verifiable behavior. Instead, if a local node setting makes
+deterministic behavior impossible, the caller should fail the execution
+with a refusal to execute.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### cached_os_env/2 * ###
+
+`cached_os_env(Key, DefaultValue) -> any()`
+
+Cache the result of os:getenv/1 in the process dictionary, as it never
+changes during the lifetime of a node.
+
+
+
+### check_required_opts/2 ###
+
+
+
+
+`KeyValuePairs`: A list of {Name, Value} pairs to check. `Opts`: The original options map to return if validation succeeds.
+
+returns: `{ok, Opts}` if all required options are present, or
+`{error, <<"Missing required parameters: ", MissingOptsStr/binary>>}`
+where `MissingOptsStr` is a comma-separated list of missing option names.
+
+Utility function to check for required options in a list.
+Takes a list of {Name, Value} pairs and returns:
+- {ok, Opts} when all required options are present (Value =/= not_found)
+- {error, ErrorMsg} with a message listing all missing options when any are not_found
+
+
+
+### config_lookup/2 * ###
+
+`config_lookup(Key, Default) -> any()`
+
+An abstraction for looking up configuration variables. In the future,
+this is the function that we will want to change to support a more dynamic
+configuration system.
+
+
+
+### default_message/0 ###
+
+`default_message() -> any()`
+
+The default configuration options of the hyperbeam node.
+
+
+
+### get/1 ###
+
+`get(Key) -> any()`
+
+Get an option from the global options, optionally overriding with a
+local `Opts` map if `prefer` or `only` is set to `local`. If the `only`
+option is provided in the `local` map, only keys found in the corresponding
+(`local` or `global`) map will be returned. This function also offers users
+a way to specify a default value to return if the option is not set.
+
+`prefer` defaults to `local`.
+
+
+
+### get/2 ###
+
+`get(Key, Default) -> any()`
+
+
+
+### get/3 ###
+
+`get(Key, Default, Opts) -> any()`
+
+
+
+### global_get/2 * ###
+
+`global_get(Key, Default) -> any()`
+
+Get an environment variable or configuration key.
+
+
+
+### load/1 ###
+
+`load(Path) -> any()`
+
+Parse a `flat@1.0` encoded file into a map, matching the types of the
+keys to those in the default message.
+
+
+
+### load_bin/1 ###
+
+`load_bin(Bin) -> any()`
+
+
+
+### mimic_default_types/2 ###
+
+`mimic_default_types(Map, Mode) -> any()`
+
+Mimic the types of the default message for a given map.
+
+
+
+### normalize_default/1 * ###
+
+`normalize_default(Default) -> any()`
+
+Get an option from environment variables, optionally consulting the
+`hb_features` of the node if a conditional default tuple is provided.
+
+
+
+### validate_node_history/1 ###
+
+`validate_node_history(Opts) -> any()`
+
+Validate that the node_history length is within an acceptable range.
+
+
+
+### validate_node_history/3 ###
+
+`validate_node_history(Opts, MinLength, MaxLength) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_opts.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_path.md ---
+# [Module hb_path.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_path.erl)
+
+
+
+
+This module provides utilities for manipulating the paths of a
+message: Its request path (referred to in messages as just the `Path`), and
+its HashPath.
+
+
+
+## Description ##
+
+A HashPath is a rolling Merkle list of the messages that have been applied
+in order to generate a given message. Because applied messages can
+themselves be the result of message applications with the AO-Core protocol,
+the HashPath can be thought of as the tree of messages that represent the
+history of a given message. The initial message on a HashPath is referred to
+by its ID and serves as its user-generated 'root'.
+
+Specifically, the HashPath can be generated by hashing the previous HashPath
+and the current message. This means that each message in the HashPath is
+dependent on all previous messages.
+
+```
+
+ Msg1.HashPath = Msg1.ID
+ Msg3.HashPath = Msg1.Hash(Msg1.HashPath, Msg2.ID)
+ Msg3.{...} = AO-Core.apply(Msg1, Msg2)
+ ...
+```
+
+A message's ID itself includes its HashPath, leading to the mixing of
+a Msg2's merkle list into the resulting Msg3's HashPath. This allows a single
+message to represent a history _tree_ of all of the messages that were
+applied to generate it -- rather than just a linear history.
+
+A message may also specify its own algorithm for generating its HashPath,
+which allows for custom logic to be used for representing the history of a
+message. When Msg2's are applied to a Msg1, the resulting Msg3's HashPath
+will be generated according to Msg1's algorithm choice.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### do_to_binary/1 * ###
+
+`do_to_binary(Path) -> any()`
+
+
+
+### from_message/2 ###
+
+`from_message(X1, Msg) -> any()`
+
+Extract the request path or hashpath from a message. We do not use
+AO-Core for this resolution because this function is called from inside AO-Core
+itself. This imparts a requirement: the message's device must store a
+viable hashpath and path in its Erlang map at all times, unless the message
+is directly from a user (in which case paths and hashpaths will not have
+been assigned yet).
+
+
+
+### hashpath/2 ###
+
+`hashpath(Bin, Opts) -> any()`
+
+Add an ID of a Msg2 to the HashPath of another message.
+
+
+
+### hashpath/3 ###
+
+`hashpath(Msg1, Msg2, Opts) -> any()`
+
+
+
+### hashpath/4 ###
+
+`hashpath(Msg1, Msg2, HashpathAlg, Opts) -> any()`
+
+
+
+### hashpath_alg/1 ###
+
+`hashpath_alg(Msg) -> any()`
+
+Get the hashpath function for a message from its HashPath-Alg.
+If no hashpath algorithm is specified, the protocol defaults to
+`sha-256-chain`.
+
+
+
+### hashpath_direct_msg2_test/0 * ###
+
+`hashpath_direct_msg2_test() -> any()`
+
+
+
+### hashpath_test/0 * ###
+
+`hashpath_test() -> any()`
+
+
+
+### hd/2 ###
+
+`hd(Msg2, Opts) -> any()`
+
+Extract the first key from a `Message2`'s `Path` field.
+Note: This function uses the `dev_message:get/2` function, rather than
+a generic call as the path should always be an explicit key in the message.
+
+
+
+### hd_test/0 * ###
+
+`hd_test() -> any()`
+
+
+
+### matches/2 ###
+
+`matches(Key1, Key2) -> any()`
+
+Check if two keys match.
+
+
+
+### multiple_hashpaths_test/0 * ###
+
+`multiple_hashpaths_test() -> any()`
+
+
+
+### normalize/1 ###
+
+`normalize(Path) -> any()`
+
+Normalize a path to a binary, removing the leading slash if present.
+
+
+
+### pop_from_message_test/0 * ###
+
+`pop_from_message_test() -> any()`
+
+
+
+### pop_from_path_list_test/0 * ###
+
+`pop_from_path_list_test() -> any()`
+
+
+
+### pop_request/2 ###
+
+`pop_request(Msg, Opts) -> any()`
+
+Pop the next element from a request path or path list.
+
+
+
+### priv_remaining/2 ###
+
+`priv_remaining(Msg, Opts) -> any()`
+
+Return the `Remaining-Path` of a message, from its hidden `AO-Core`
+key. Does not use the `get` or set `hb_private` functions, such that it
+can be safely used inside the main AO-Core resolve function.
+
+
+
+### priv_store_remaining/2 ###
+
+`priv_store_remaining(Msg, RemainingPath) -> any()`
+
+Store the remaining path of a message in its hidden `AO-Core` key.
+
+
+
+### push_request/2 ###
+
+`push_request(Msg, Path) -> any()`
+
+Add a message to the head (next to execute) of a request path.
+
+
+
+### queue_request/2 ###
+
+`queue_request(Msg, Path) -> any()`
+
+Queue a message at the back of a request path. `path` is the only
+key that we cannot use dev_message's `set/3` function for (as it expects
+the compute path to be there), so we use `maps:put/3` instead.
+
+
+
+### regex_matches/2 ###
+
+`regex_matches(Path1, Path2) -> any()`
+
+Check if two keys match using regex.
+
+
+
+### regex_matches_test/0 * ###
+
+`regex_matches_test() -> any()`
+
+
+
+### term_to_path_parts/1 ###
+
+`term_to_path_parts(Path) -> any()`
+
+Convert a term into an executable path. Supports binaries, lists, and
+atoms. Notably, it does not support strings as lists of characters.
+
+
+
+### term_to_path_parts/2 ###
+
+`term_to_path_parts(Binary, Opts) -> any()`
+
+
+
+### term_to_path_parts_test/0 * ###
+
+`term_to_path_parts_test() -> any()`
+
+
+
+### tl/2 ###
+
+`tl(Msg2, Opts) -> any()`
+
+Return the message without its first path element. Note that this
+is the only transformation in AO-Core that does _not_ make a log of its
+transformation. Subsequently, the message's IDs will not be verifiable
+after executing this transformation.
+This may or may not be the mainnet behavior we want.
+
+
+
+### tl_test/0 * ###
+
+`tl_test() -> any()`
+
+
+
+### to_binary/1 ###
+
+`to_binary(Path) -> any()`
+
+Convert a path of any form to a binary.
+
+
+
+### to_binary_test/0 * ###
+
+`to_binary_test() -> any()`
+
+
+
+### validate_path_transitions/2 * ###
+
+`validate_path_transitions(X, Opts) -> any()`
+
+
+
+### verify_hashpath/2 ###
+
+`verify_hashpath(Rest, Opts) -> any()`
+
+Verify the HashPath of a message, given a list of messages that
+represent its history.
+
+
+
+### verify_hashpath_test/0 * ###
+
+`verify_hashpath_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_path.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_persistent.md ---
+# [Module hb_persistent.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_persistent.erl)
+
+
+
+
+Creates and manages long-lived AO-Core resolution processes.
+
+
+
+## Description ##
+
+These can be useful for situations where a message is large and expensive
+to serialize and deserialize, or when executions should be deliberately
+serialized to avoid parallel executions of the same computation. This
+module is called during the core `hb_ao` execution process, so care
+must be taken to avoid recursive spawns/loops.
+
+Built using the `pg` module, which is a distributed Erlang process group
+manager.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### await/4 ###
+
+`await(Worker, Msg1, Msg2, Opts) -> any()`
+
+If there was already an Erlang process handling this execution,
+we should register with them and wait for them to notify us of
+completion.
+
+
+
+### deduplicated_execution_test/0 * ###
+
+`deduplicated_execution_test() -> any()`
+
+Test merging and returning a value with a persistent worker.
+
+
+
+### default_await/5 ###
+
+`default_await(Worker, GroupName, Msg1, Msg2, Opts) -> any()`
+
+Default await function that waits for a resolution from a worker.
+
+
+
+### default_grouper/3 ###
+
+`default_grouper(Msg1, Msg2, Opts) -> any()`
+
+Create a group name from a Msg1 and Msg2 pair as a tuple.
+
+
+
+### default_worker/3 ###
+
+`default_worker(GroupName, Msg1, Opts) -> any()`
+
+A server function for handling persistent executions.
+
+
+
+### do_monitor/1 * ###
+
+`do_monitor(Group) -> any()`
+
+
+
+### do_monitor/2 * ###
+
+`do_monitor(Group, Last) -> any()`
+
+
+
+### find_execution/2 * ###
+
+`find_execution(Groupname, Opts) -> any()`
+
+Find a group with the given name.
+
+
+
+### find_or_register/3 ###
+
+`find_or_register(Msg1, Msg2, Opts) -> any()`
+
+Register the process to lead an execution if none is found, otherwise
+signal that we should await resolution.
+
+
+
+### find_or_register/4 * ###
+
+`find_or_register(GroupName, Msg1, Msg2, Opts) -> any()`
+
+
+
+### forward_work/2 ###
+
+`forward_work(NewPID, Opts) -> any()`
+
+Forward requests to a newly delegated execution process.
+
+
+
+### group/3 ###
+
+`group(Msg1, Msg2, Opts) -> any()`
+
+Calculate the group name for a Msg1 and Msg2 pair. Uses the Msg1's
+`group` function if it is found in the `info`, otherwise uses the default.
+
+
+
+### notify/4 ###
+
+`notify(GroupName, Msg2, Msg3, Opts) -> any()`
+
+Check our inbox for processes that are waiting for the resolution
+of this execution. Comes in two forms:
+1. Notify on group name alone.
+2. Notify on group name and Msg2.
+
+
+
+### persistent_worker_test/0 * ###
+
+`persistent_worker_test() -> any()`
+
+Test spawning a default persistent worker.
+
+
+
+### register_groupname/2 * ###
+
+`register_groupname(Groupname, Opts) -> any()`
+
+Register for performing an AO-Core resolution.
+
+
+
+### send_response/4 * ###
+
+`send_response(Listener, GroupName, Msg2, Msg3) -> any()`
+
+Helper function that wraps responding with a new Msg3.
+
+
+
+### spawn_after_execution_test/0 * ###
+
+`spawn_after_execution_test() -> any()`
+
+
+
+### spawn_test_client/2 * ###
+
+`spawn_test_client(Msg1, Msg2) -> any()`
+
+
+
+### spawn_test_client/3 * ###
+
+`spawn_test_client(Msg1, Msg2, Opts) -> any()`
+
+
+
+### start/0 * ###
+
+`start() -> any()`
+
+Ensure that the `pg` module is started.
+
+
+
+### start_monitor/0 ###
+
+`start_monitor() -> any()`
+
+Start a monitor that prints the current members of the group every
+n seconds.
+
+
+
+### start_monitor/1 ###
+
+`start_monitor(Group) -> any()`
+
+
+
+### start_worker/2 ###
+
+`start_worker(Msg, Opts) -> any()`
+
+Start a worker process that will hold a message in memory for
+future executions.
+
+
+
+### start_worker/3 ###
+
+`start_worker(GroupName, NotMsg, Opts) -> any()`
+
+
+
+### stop_monitor/1 ###
+
+`stop_monitor(PID) -> any()`
+
+
+
+### test_device/0 * ###
+
+`test_device() -> any()`
+
+
+
+### test_device/1 * ###
+
+`test_device(Base) -> any()`
+
+
+
+### unregister/3 * ###
+
+`unregister(Msg1, Msg2, Opts) -> any()`
+
+Unregister for being the leader on an AO-Core resolution.
+
+
+
+### unregister_groupname/2 * ###
+
+`unregister_groupname(Groupname, Opts) -> any()`
+
+
+
+### unregister_notify/4 ###
+
+`unregister_notify(GroupName, Msg2, Msg3, Opts) -> any()`
+
+Unregister as the leader for an execution and notify waiting processes.
+
+
+
+### wait_for_test_result/1 * ###
+
+`wait_for_test_result(Ref) -> any()`
+
+
+
+### worker_event/5 * ###
+
+`worker_event(Group, Data, Msg1, Msg2, Opts) -> any()`
+
+Log an event with the worker process. If we used the default grouper
+function, we should also include the Msg1 and Msg2 in the event. If we did not,
+we assume that the group name expresses enough information to identify the
+request.
+
+
+--- END OF FILE: docs/resources/source-code/hb_persistent.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_private.md ---
+# [Module hb_private.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_private.erl)
+
+
+
+
+This module provides basic helper utilities for managing the
+private element of a message, which can be used to store state that is
+not included in serialized messages, or those granted to users via the
+APIs.
+
+
+
+## Description ##
+
+Private elements of a message can be useful for storing state that
+is only relevant temporarily. For example, a device might use the private
+element to store a cache of values that are expensive to recompute. They
+should _not_ be used for encoding state that makes the execution of a
+device non-deterministic (unless you are sure you know what you are doing).
+
+The `set` and `get` functions of this module allow you to run those keys
+as AO-Core paths if you would like to have private `devices` in the
+messages non-public zone.
+
+See `hb_ao` for more information about the AO-Core protocol
+and private elements of messages.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### from_message/1 ###
+
+`from_message(Msg) -> any()`
+
+Return the `private` key from a message. If the key does not exist, an
+empty map is returned.
+
+
+
+### get/3 ###
+
+`get(Key, Msg, Opts) -> any()`
+
+Helper for getting a value from the private element of a message. Uses
+AO-Core resolve under-the-hood, removing the private specifier from the
+path if it exists.
+
+
+
+### get/4 ###
+
+`get(InputPath, Msg, Default, Opts) -> any()`
+
+
+
+### get_private_key_test/0 * ###
+
+`get_private_key_test() -> any()`
+
+
+
+### is_private/1 ###
+
+`is_private(Key) -> any()`
+
+Check if a key is private.
+
+
+
+### priv_ao_opts/1 * ###
+
+`priv_ao_opts(Opts) -> any()`
+
+The opts map that should be used when resolving paths against the
+private element of a message.
+
+
+
+### remove_private_specifier/1 * ###
+
+`remove_private_specifier(InputPath) -> any()`
+
+Remove the first key from the path if it is a private specifier.
+
+
+
+### reset/1 ###
+
+`reset(Msg) -> any()`
+
+Unset all of the private keys in a message.
+
+
+
+### set/3 ###
+
+`set(Msg, PrivMap, Opts) -> any()`
+
+
+
+### set/4 ###
+
+`set(Msg, InputPath, Value, Opts) -> any()`
+
+Helper function for setting a key in the private element of a message.
+
+
+
+### set_priv/2 ###
+
+`set_priv(Msg, PrivMap) -> any()`
+
+Helper function for setting the complete private element of a message.
+
+
+
+### set_private_test/0 * ###
+
+`set_private_test() -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb_private.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_process_monitor.md ---
+# [Module hb_process_monitor.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_process_monitor.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### add_prefix/2 * ###
+
+`add_prefix(X1, Path) -> any()`
+
+Add the directory prefix to a path.
+
+
+
+### list/2 ###
+
+`list(Opts, Path) -> any()`
+
+List contents of a directory in the store.
+
+
+
+### make_group/2 ###
+
+`make_group(Opts, Path) -> any()`
+
+Create a directory (group) in the store.
+
+
+
+### make_link/3 ###
+
+`make_link(Opts, Link, New) -> any()`
+
+Create a symlink, handling the case where the link would point to itself.
+
+
+
+### read/1 * ###
+
+`read(Path) -> any()`
+
+
+
+### read/2 ###
+
+`read(Opts, Key) -> any()`
+
+Read a key from the store, following symlinks as needed.
+
+
+
+### remove_prefix/2 * ###
+
+`remove_prefix(X1, Path) -> any()`
+
+Remove the directory prefix from a path.
+
+
+
+### reset/1 ###
+
+`reset(X1) -> any()`
+
+Reset the store by completely removing its directory and recreating it.
+
+
+
+### resolve/2 ###
+
+`resolve(Opts, RawPath) -> any()`
+
+Replace links in a path successively, returning the final path.
+Each element of the path is resolved in turn, with the result of each
+resolution becoming the prefix for the next resolution. This allows
+paths to resolve across many links. For example, a structure as follows:
+
+/a/b/c: "Not the right data"
+/a/b -> /a/alt-b
+/a/alt-b/c: "Correct data"
+
+will resolve "a/b/c" to "Correct data".
+
+
+
+### resolve/3 * ###
+
+`resolve(Opts, CurrPath, Rest) -> any()`
+
+
+
+### scope/1 ###
+
+`scope(X1) -> any()`
+
+The file-based store is always local, for now. In the future, we may
+want to allow that an FS store is shared across a cluster and thus remote.
+
+
+
+### start/1 ###
+
+`start(X1) -> any()`
+
+Initialize the file system store with the given data directory.
+
+
+
+### stop/1 ###
+
+`stop(X1) -> any()`
+
+Stop the file system store. Currently a no-op.
+
+
+
+### type/1 * ###
+
+`type(Path) -> any()`
+
+
+
+### type/2 ###
+
+`type(Opts, Key) -> any()`
+
+Determine the type of a key in the store.
+
+
+
+### write/3 ###
+
+`write(Opts, PathComponents, Value) -> any()`
+
+Write a value to the specified path in the store.
+
+
+--- END OF FILE: docs/resources/source-code/hb_store_fs.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_store_gateway.md ---
+# [Module hb_store_gateway.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_gateway.erl)
+
+
+
+
+A store module that reads data from the nodes Arweave gateway and
+GraphQL routes, additionally including additional store-specific routes.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### cache_read_message_test/0 * ###
+
+`cache_read_message_test() -> any()`
+
+Ensure that saving to the gateway store works.
+
+
+
+### external_http_access_test/0 * ###
+
+`external_http_access_test() -> any()`
+
+Test that the default node config allows for data to be accessed.
+
+
+
+### graphql_as_store_test_/0 * ###
+
+`graphql_as_store_test_() -> any()`
+
+Store is accessible via the default options.
+
+
+
+### graphql_from_cache_test/0 * ###
+
+`graphql_from_cache_test() -> any()`
+
+Stored messages are accessible via `hb_cache` accesses.
+
+
+
+### list/2 ###
+
+`list(StoreOpts, Key) -> any()`
+
+
+
+### manual_local_cache_test/0 * ###
+
+`manual_local_cache_test() -> any()`
+
+
+
+### maybe_cache/2 * ###
+
+`maybe_cache(StoreOpts, Data) -> any()`
+
+Cache the data if the cache is enabled. The `store` option may either
+be `false` to disable local caching, or a store definition to use as the
+cache.
+
+
+
+### read/2 ###
+
+`read(StoreOpts, Key) -> any()`
+
+Read the data at the given key from the GraphQL route. Will only attempt
+to read the data if the key is an ID.
+
+
+
+### resolve/2 ###
+
+`resolve(X1, Key) -> any()`
+
+
+
+### resolve_on_gateway_test_/0 * ###
+
+`resolve_on_gateway_test_() -> any()`
+
+
+
+### scope/1 ###
+
+`scope(X1) -> any()`
+
+The scope of a GraphQL store is always remote, due to performance.
+
+
+
+### specific_route_test/0 * ###
+
+`specific_route_test() -> any()`
+
+Routes can be specified in the options, overriding the default routes.
+We test this by inversion: If the above cache read test works, then we know
+that the default routes allow access to the item. If the test below were to
+produce the same result, despite an empty 'only' route list, then we would
+know that the module is not respecting the route list.
+
+
+
+### store_opts_test/0 * ###
+
+`store_opts_test() -> any()`
+
+Test to verify store opts is being set for Data-Protocol ao
+
+
+
+### type/2 ###
+
+`type(StoreOpts, Key) -> any()`
+
+Get the type of the data at the given key. We potentially cache the
+result, so that we don't have to read the data from the GraphQL route
+multiple times.
+
+
+--- END OF FILE: docs/resources/source-code/hb_store_gateway.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_store_remote_node.md ---
+# [Module hb_store_remote_node.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_remote_node.erl)
+
+
+
+
+A store module that reads data from another AO node.
+
+
+
+## Description ##
+Notably, this store only provides the _read_ side of the store interface.
+The write side could be added, returning an commitment that the data has
+been written to the remote node. In that case, the node would probably want
+to upload it to an Arweave bundler to ensure persistence, too.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### make_link/3 ###
+
+`make_link(Opts, Source, Destination) -> any()`
+
+Link a source to a destination in the remote node.
+
+Constructs an HTTP POST link request. If a wallet is provided,
+the message is signed. Returns {ok, Path} on HTTP 200, or
+{error, Reason} on failure.
+
+
+
+### read/2 ###
+
+`read(Opts, Key) -> any()`
+
+Read a key from the remote node.
+
+Makes an HTTP GET request to the remote node and returns the
+committed message.
+
+
+
+### read_test/0 * ###
+
+`read_test() -> any()`
+
+Test that we can create a store, write a random message to it, then
+start a remote node with that store, and read the message from it.
+
+
+
+### resolve/2 ###
+
+`resolve(X1, Key) -> any()`
+
+Resolve a key path in the remote store.
+
+For the remote node store, the key is returned as-is.
+
+
+
+### scope/1 ###
+
+`scope(Arg) -> any()`
+
+Return the scope of this store.
+
+For the remote store, the scope is always `remote`.
+
+
+
+### type/2 ###
+
+`type(Opts, Key) -> any()`
+
+Determine the type of value at a given key.
+
+Remote nodes support only the `simple` type or `not_found`.
+
+
+
+### write/3 ###
+
+`write(Opts, Key, Value) -> any()`
+
+Write a key to the remote node.
+
+Constructs an HTTP POST write request. If a wallet is provided,
+the message is signed. Returns {ok, Path} on HTTP 200, or
+{error, Reason} on failure.
+
+
+--- END OF FILE: docs/resources/source-code/hb_store_remote_node.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_store_rocksdb.md ---
+# [Module hb_store_rocksdb.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_rocksdb.erl)
+
+
+
+
+A process wrapper over rocksdb storage.
+
+__Behaviours:__ [`gen_server`](gen_server.md), [`hb_store`](hb_store.md).
+
+
+
+## Description ##
+
+Replicates functionality of the
+hb_fs_store module.
+
+Encodes the item types with the help of prefixes, see `encode_value/2`
+and `decode_value/1`
+
+
+## Data Types ##
+
+
+
+
+### key() ###
+
+
+
+
+Write given Key and Value to the database
+
+
+
+### enabled/0 ###
+
+`enabled() -> any()`
+
+Returns whether the RocksDB store is enabled.
+
+
+
+### encode_value/2 * ###
+
+
+
+
+
+
+### ensure_dir/2 * ###
+
+`ensure_dir(DBHandle, BaseDir) -> any()`
+
+
+
+### ensure_dir/3 * ###
+
+`ensure_dir(DBHandle, CurrentPath, Rest) -> any()`
+
+
+
+### ensure_list/1 * ###
+
+`ensure_list(Value) -> any()`
+
+Ensure that the given filename is a list, not a binary.
+
+
+
+### handle_call/3 ###
+
+`handle_call(Request, From, State) -> any()`
+
+
+
+### handle_cast/2 ###
+
+`handle_cast(Request, State) -> any()`
+
+
+
+### handle_info/2 ###
+
+`handle_info(Info, State) -> any()`
+
+
+
+### init/1 ###
+
+`init(Dir) -> any()`
+
+
+
+### join/1 * ###
+
+`join(Key) -> any()`
+
+
+
+### list/0 ###
+
+`list() -> any()`
+
+List all items registered in rocksdb store. Should be used only
+for testing/debugging, as the underlying operation is doing full traversal
+on the KV storage, and is slow.
+
+
+
+### list/2 ###
+
+
+list(Opts, Path) -> Result
+
+
+
Opts = any()
Path = any()
Result = {ok, [string()]} | {error, term()}
+
+Returns the full list of items stored under the given path. Where the path
+child items is relevant to the path of parentItem. (Same as in `hb_store_fs`).
+
+
+
+### make_group/2 ###
+
+
+make_group(Opts, Key) -> Result
+
+
+
Opts = any()
Key = binary()
Result = ok | {error, already_added}
+
+Creates group under the given path.
+
+
+
+### make_link/3 ###
+
+
+make_link(Opts::any(), Key1::key(), New::key()) -> ok
+
+
+Write given Key and Value to the database
+
+
+--- END OF FILE: docs/resources/source-code/hb_store_rocksdb.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_store.md ---
+# [Module hb_store.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### add_path/2 ###
+
+`add_path(Path1, Path2) -> any()`
+
+Add two path components together. If no store implements the add_path
+function, we concatenate the paths.
+
+
+
+### add_path/3 ###
+
+`add_path(Store, Path1, Path2) -> any()`
+
+
+
+### behavior_info/1 ###
+
+`behavior_info(X1) -> any()`
+
+
+
+### call_all/3 * ###
+
+`call_all(X, Function, Args) -> any()`
+
+Call a function on all modules in the store.
+
+
+
+### call_function/3 * ###
+
+`call_function(X, Function, Args) -> any()`
+
+Call a function on the first store module that succeeds. Returns its
+result, or no_viable_store if none of the stores succeed.
+
+
+
+### filter/2 ###
+
+`filter(Module, Filter) -> any()`
+
+Takes a store object and a filter function or match spec, returning a
+new store object with only the modules that match the filter. The filter
+function takes 2 arguments: the scope and the options. It calls the store's
+scope function to get the scope of the module.
+
+
+
+### generate_test_suite/1 ###
+
+`generate_test_suite(Suite) -> any()`
+
+
+
+### generate_test_suite/2 ###
+
+`generate_test_suite(Suite, Stores) -> any()`
+
+
+
+### get_store_scope/1 * ###
+
+`get_store_scope(Store) -> any()`
+
+Ask a store for its own scope. If it doesn't have one, return the
+default scope (local).
+
+
+
+### hierarchical_path_resolution_test/1 * ###
+
+`hierarchical_path_resolution_test(Opts) -> any()`
+
+Ensure that we can resolve links through a directory.
+
+
+
+### join/1 ###
+
+`join(Path) -> any()`
+
+Join a list of path components together.
+
+
+
+### list/2 ###
+
+`list(Modules, Path) -> any()`
+
+List the keys in a group in the store. Use only in debugging.
+The hyperbeam model assumes that stores are built as efficient hash-based
+structures, so this is likely to be very slow for most stores.
+
+
+
+### make_group/2 ###
+
+`make_group(Modules, Path) -> any()`
+
+Make a group in the store. A group can be seen as a namespace or
+'directory' in a filesystem.
+
+
+
+### make_link/3 ###
+
+`make_link(Modules, Existing, New) -> any()`
+
+Make a link from one path to another in the store.
+
+
+
+### path/1 ###
+
+`path(Path) -> any()`
+
+Create a path from a list of path components. If no store implements
+the path function, we return the path with the 'default' transformation (id).
+
+
+
+### path/2 ###
+
+`path(X1, Path) -> any()`
+
+
+
+### read/2 ###
+
+`read(Modules, Key) -> any()`
+
+Read a key from the store.
+
+
+
+### reset/1 ###
+
+`reset(Modules) -> any()`
+
+Delete all of the keys in a store. Should be used with extreme
+caution. Lost data can lose money in many/most of hyperbeam's use cases.
+
+
+
+### resolve/2 ###
+
+`resolve(Modules, Path) -> any()`
+
+Follow links through the store to resolve a path to its ultimate target.
+
+
+
+### resursive_path_resolution_test/1 * ###
+
+`resursive_path_resolution_test(Opts) -> any()`
+
+Ensure that we can resolve links recursively.
+
+
+
+### scope/2 ###
+
+`scope(Scope, Opts) -> any()`
+
+Limit the store scope to only a specific (set of) option(s).
+Takes either an Opts message or store, and either a single scope or a list
+of scopes.
+
+
+
+### simple_path_resolution_test/1 * ###
+
+`simple_path_resolution_test(Opts) -> any()`
+
+Test path resolution dynamics.
+
+
+
+### sort/2 ###
+
+`sort(Stores, PreferenceOrder) -> any()`
+
+Order a store by a preference of its scopes. This is useful for making
+sure that faster (or perhaps cheaper) stores are used first. If a list is
+provided, it will be used as a preference order. If a map is provided,
+scopes will be ordered by the scores in the map. Any unknown scopes will
+default to a score of 0.
+
+
+
+### start/1 ###
+
+`start(Modules) -> any()`
+
+
+
+### stop/1 ###
+
+`stop(Modules) -> any()`
+
+
+
+### store_suite_test_/0 * ###
+
+`store_suite_test_() -> any()`
+
+
+
+### test_stores/0 ###
+
+`test_stores() -> any()`
+
+
+
+### type/2 ###
+
+`type(Modules, Path) -> any()`
+
+Get the type of element of a given path in the store. This can be
+a performance killer if the store is remote etc. Use only when necessary.
+
+
+
+### write/3 ###
+
+`write(Modules, Key, Value) -> any()`
+
+Write a key with a value to the store.
+
+
+--- END OF FILE: docs/resources/source-code/hb_store.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_structured_fields.md ---
+# [Module hb_structured_fields.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_structured_fields.erl)
+
+
+
+
+A module for parsing and converting between Erlang and HTTP Structured
+Fields, as described in RFC-9651.
+
+
+
+## Description ##
+
+The mapping between Erlang and structured headers types is as follow:
+
+List: list()
+Inner list: {list, [item()], params()}
+Dictionary: [{binary(), item()}]
+There is no distinction between empty list and empty dictionary.
+Item with parameters: {item, bare_item(), params()}
+Parameters: [{binary(), bare_item()}]
+Bare item: one bare_item() that can be of type:
+Integer: integer()
+Decimal: {decimal, {integer(), integer()}}
+String: {string, binary()}
+Token: {token, binary()}
+Byte sequence: {binary, binary()}
+Boolean: boolean()
+
+
+## Data Types ##
+
+
+
+
+### sh_bare_item() ###
+
+
+
Run each test in a suite with each set of options.
+
+
+
+
+## Function Details ##
+
+
+
+### run/4 ###
+
+`run(Name, OptsName, Suite, OptsList) -> any()`
+
+
+
+### satisfies_requirements/1 * ###
+
+`satisfies_requirements(Requirements) -> any()`
+
+Determine if the environment satisfies the given test requirements.
+Requirements is a list of atoms, each corresponding to a module that must
+return true if it exposes an `enabled/0` function.
+
+
+
+### suite_with_opts/2 ###
+
+`suite_with_opts(Suite, OptsList) -> any()`
+
+Run each test in a suite with each set of options. Start and reset
+the store(s) for each test. Expects suites to be a list of tuples with
+the test name, description, and test function.
+The list of `Opts` should contain maps with the `name` and `opts` keys.
+Each element may also contain a `skip` key with a list of test names to skip.
+They can also contain a `desc` key with a description of the options.
+
+
+--- END OF FILE: docs/resources/source-code/hb_test_utils.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_tracer.md ---
+# [Module hb_tracer.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_tracer.erl)
+
+
+
+
+A module for tracing the flow of requests through the system.
+
+
+
+## Description ##
+This allows for tracking the lifecycle of a request from HTTP receipt through processing and response.
+
+## Function Index ##
+
+
+
Return a random element from a list, weighted by the values in the list.
+
+
+
+
+## Function Details ##
+
+
+
+### add_commas/1 * ###
+
+`add_commas(Rest) -> any()`
+
+
+
+### all_hb_modules/0 ###
+
+`all_hb_modules() -> any()`
+
+Get all loaded modules that are loaded and are part of HyperBEAM.
+
+
+
+### atom/1 ###
+
+`atom(Str) -> any()`
+
+Coerce a string to an atom.
+
+
+
+### bin/1 ###
+
+`bin(Value) -> any()`
+
+Coerce a value to a binary.
+
+
+
+### count/2 ###
+
+`count(Item, List) -> any()`
+
+
+
+### debug_fmt/1 ###
+
+`debug_fmt(X) -> any()`
+
+Convert a term to a string for debugging print purposes.
+
+
+
+### debug_fmt/2 ###
+
+`debug_fmt(X, Indent) -> any()`
+
+
+
+### debug_print/4 ###
+
+`debug_print(X, Mod, Func, LineNum) -> any()`
+
+Print a message to the standard error stream, prefixed by the amount
+of time that has elapsed since the last call to this function.
+
+
+
+### decode/1 ###
+
+`decode(Input) -> any()`
+
+Try to decode a URL safe base64 into a binary or throw an error when
+invalid.
+
+
+
+### deep_merge/2 ###
+
+`deep_merge(Map1, Map2) -> any()`
+
+Deep merge two maps, recursively merging nested maps.
+
+
+
+### do_debug_fmt/2 * ###
+
+`do_debug_fmt(Wallet, Indent) -> any()`
+
+
+
+### do_to_lines/1 * ###
+
+`do_to_lines(In) -> any()`
+
+
+
+### encode/1 ###
+
+`encode(Bin) -> any()`
+
+Encode a binary to URL safe base64 binary string.
+
+
+
+### eunit_print/2 ###
+
+`eunit_print(FmtStr, FmtArgs) -> any()`
+
+Format and print an indented string to standard error.
+
+
+
+### find_value/2 ###
+
+`find_value(Key, List) -> any()`
+
+Find the value associated with a key in parsed a JSON structure list.
+
+
+
+### find_value/3 ###
+
+`find_value(Key, Map, Default) -> any()`
+
+
+
+### float/1 ###
+
+`float(Str) -> any()`
+
+Coerce a string to a float.
+
+
+
+### format_address/2 * ###
+
+`format_address(Wallet, Indent) -> any()`
+
+If the user attempts to print a wallet, format it as an address.
+
+
+
+### format_binary/1 ###
+
+`format_binary(Bin) -> any()`
+
+Format a binary as a short string suitable for printing.
+
+
+
+### format_debug_trace/3 * ###
+
+`format_debug_trace(Mod, Func, Line) -> any()`
+
+Generate the appropriate level of trace for a given call.
+
+
+
+### format_indented/2 ###
+
+`format_indented(Str, Indent) -> any()`
+
+Format a string with an indentation level.
+
+
+
+### format_indented/3 ###
+
+`format_indented(RawStr, Fmt, Ind) -> any()`
+
+
+
+### format_maybe_multiline/2 ###
+
+`format_maybe_multiline(X, Indent) -> any()`
+
+Format a map as either a single line or a multi-line string depending
+on the value of the `debug_print_map_line_threshold` runtime option.
+
+
+
+### format_trace/1 ###
+
+`format_trace(Stack) -> any()`
+
+Format a stack trace as a list of strings, one for each stack frame.
+Each stack frame is formatted if it matches the `stack_print_prefixes`
+option. At the first frame that does not match a prefix in the
+`stack_print_prefixes` option, the rest of the stack is not formatted.
+
+
+
+### format_trace/2 * ###
+
+`format_trace(Rest, Prefixes) -> any()`
+
+
+
+### format_trace_short/1 ###
+
+`format_trace_short(Trace) -> any()`
+
+Format a trace to a short string.
+
+
+
+### format_trace_short/4 * ###
+
+`format_trace_short(Max, Latch, Trace, Prefixes) -> any()`
+
+
+
+### format_tuple/2 * ###
+
+`format_tuple(Tuple, Indent) -> any()`
+
+Helper function to format tuples with arity greater than 2.
+
+
+
+### get_trace/0 * ###
+
+`get_trace() -> any()`
+
+Get the trace of the current process.
+
+
+
+### hd/1 ###
+
+`hd(Message) -> any()`
+
+Get the first element (the lowest integer key >= 1) of a numbered map.
+Optionally, it takes a specifier of whether to return the key or the value,
+as well as a standard map of HyperBEAM runtime options.
+
+
+
+### hd/2 ###
+
+`hd(Message, ReturnType) -> any()`
+
+
+
+### hd/3 ###
+
+`hd(Message, ReturnType, Opts) -> any()`
+
+
+
+### hd/5 * ###
+
+`hd(Map, Rest, Index, ReturnType, Opts) -> any()`
+
+
+
+### human_id/1 ###
+
+`human_id(Bin) -> any()`
+
+Convert a native binary ID to a human readable ID. If the ID is already
+a human readable ID, it is returned as is. If it is an ethereum address, it
+is returned as is.
+
+
+
+### human_int/1 ###
+
+`human_int(Int) -> any()`
+
+Add `,` characters to a number every 3 digits to make it human readable.
+
+
+
+### id/1 ###
+
+`id(Item) -> any()`
+
+Return the human-readable form of an ID of a message when given either
+a message explicitly, raw encoded ID, or an Erlang Arweave `tx` record.
+
+
+
+### id/2 ###
+
+`id(TX, Type) -> any()`
+
+
+
+### int/1 ###
+
+`int(Str) -> any()`
+
+Coerce a string to an integer.
+
+
+
+### is_hb_module/1 ###
+
+`is_hb_module(Atom) -> any()`
+
+Is the given module part of HyperBEAM?
+
+
+
+### is_hb_module/2 ###
+
+`is_hb_module(Atom, Prefixes) -> any()`
+
+
+
+### is_human_binary/1 * ###
+
+`is_human_binary(Bin) -> any()`
+
+Determine whether a binary is human-readable.
+
+
+
+### is_ordered_list/1 ###
+
+`is_ordered_list(Msg) -> any()`
+
+Determine if the message given is an ordered list, starting from 1.
+
+
+
+### is_ordered_list/2 * ###
+
+`is_ordered_list(N, Msg) -> any()`
+
+
+
+### is_string_list/1 ###
+
+`is_string_list(MaybeString) -> any()`
+
+Is the given term a string list?
+
+
+
+### key_to_atom/2 ###
+
+`key_to_atom(Key, Mode) -> any()`
+
+Convert keys in a map to atoms, lowering `-` to `_`.
+
+
+
+### list/1 ###
+
+`list(Value) -> any()`
+
+Coerce a value to a list.
+
+
+
+### list_to_numbered_map/1 ###
+
+`list_to_numbered_map(List) -> any()`
+
+Convert a list of elements to a map with numbered keys.
+
+
+
+### maybe_throw/2 ###
+
+`maybe_throw(Val, Opts) -> any()`
+
+Throw an exception if the Opts map has an `error_strategy` key with the
+value `throw`. Otherwise, return the value.
+
+
+
+### mean/1 ###
+
+`mean(List) -> any()`
+
+
+
+### message_to_ordered_list/1 ###
+
+`message_to_ordered_list(Message) -> any()`
+
+Take a message with numbered keys and convert it to a list of tuples
+with the associated key as an integer and a value. Optionally, it takes a
+standard map of HyperBEAM runtime options.
+
+
+
+### message_to_ordered_list/2 ###
+
+`message_to_ordered_list(Message, Opts) -> any()`
+
+
+
+### message_to_ordered_list/4 * ###
+
+`message_to_ordered_list(Message, Keys, Key, Opts) -> any()`
+
+
+
+### native_id/1 ###
+
+`native_id(Bin) -> any()`
+
+Convert a human readable ID to a native binary ID. If the ID is already
+a native binary ID, it is returned as is.
+
+
+
+### normalize_trace/1 * ###
+
+`normalize_trace(Rest) -> any()`
+
+Remove all calls from this module from the top of a trace.
+
+
+
+### number/1 ###
+
+`number(List) -> any()`
+
+Label a list of elements with a number.
+
+
+
+### ok/1 ###
+
+`ok(Value) -> any()`
+
+Unwrap a tuple of the form `{ok, Value}`, or throw/return, depending on
+the value of the `error_strategy` option.
+
+
+
+### ok/2 ###
+
+`ok(Other, Opts) -> any()`
+
+
+
+### pick_weighted/2 * ###
+
+`pick_weighted(Rest, Remaining) -> any()`
+
+
+
+### print_trace/3 * ###
+
+`print_trace(Stack, Label, CallerInfo) -> any()`
+
+
+
+### print_trace/4 ###
+
+`print_trace(Stack, CallMod, CallFunc, CallLine) -> any()`
+
+Print the trace of the current stack, up to the first non-hyperbeam
+module. Prints each stack frame on a new line, until it finds a frame that
+does not start with a prefix in the `stack_print_prefixes` hb_opts.
+Optionally, you may call this function with a custom label and caller info,
+which will be used instead of the default.
+
+
+
+### print_trace_short/4 ###
+
+`print_trace_short(Trace, Mod, Func, Line) -> any()`
+
+Print a trace to the standard error stream.
+
+
+
+### remove_common/2 ###
+
+`remove_common(MainStr, SubStr) -> any()`
+
+Remove the common prefix from two strings, returning the remainder of the
+first string. This function also coerces lists to binaries where appropriate,
+returning the type of the first argument.
+
+
+
+### remove_trailing_noise/1 * ###
+
+`remove_trailing_noise(Str) -> any()`
+
+
+
+### remove_trailing_noise/2 ###
+
+`remove_trailing_noise(Str, Noise) -> any()`
+
+
+
+### safe_decode/1 ###
+
+`safe_decode(E) -> any()`
+
+Safely decode a URL safe base64 into a binary returning an ok or error
+tuple.
+
+
+
+### safe_encode/1 ###
+
+`safe_encode(Bin) -> any()`
+
+Safely encode a binary to URL safe base64.
+
+
+
+### short_id/1 ###
+
+`short_id(Bin) -> any()`
+
+Return a short ID for the different types of IDs used in AO-Core.
+
+
+
+### shuffle/1 * ###
+
+`shuffle(List) -> any()`
+
+Shuffle a list.
+
+
+
+### stddev/1 ###
+
+`stddev(List) -> any()`
+
+
+
+### to_hex/1 ###
+
+`to_hex(Bin) -> any()`
+
+Convert a binary to a hex string. Do not use this for anything other than
+generating a lower-case, non-special character id. It should not become part of
+the core protocol. We use b64u for efficient encoding.
+
+
+
+### to_lines/1 * ###
+
+`to_lines(Elems) -> any()`
+
+
+
+### to_lower/1 ###
+
+`to_lower(Str) -> any()`
+
+Convert a binary to a lowercase.
+
+
+
+### to_sorted_keys/1 ###
+
+`to_sorted_keys(Msg) -> any()`
+
+Given a map or KVList, return a deterministically ordered list of its keys.
+
+
+
+### to_sorted_list/1 ###
+
+`to_sorted_list(Msg) -> any()`
+
+Given a map or KVList, return a deterministically sorted list of its
+key-value pairs.
+
+
+
+### trace_macro_helper/5 ###
+
+`trace_macro_helper(Fun, X2, Mod, Func, Line) -> any()`
+
+Utility function to help macro `?trace/0` remove the first frame of the
+stack trace.
+
+
+
+### until/1 ###
+
+`until(Condition) -> any()`
+
+Utility function to wait for a condition to be true. Optionally,
+you can pass a function that will be called with the current count of
+iterations, returning an integer that will be added to the count. Once the
+condition is true, the function will return the count.
+
+
+
+### until/2 ###
+
+`until(Condition, Count) -> any()`
+
+
+
+### until/3 ###
+
+`until(Condition, Fun, Count) -> any()`
+
+
+
+### variance/1 ###
+
+`variance(List) -> any()`
+
+
+
+### weighted_random/1 ###
+
+`weighted_random(List) -> any()`
+
+Return a random element from a list, weighted by the values in the list.
+
+
+--- END OF FILE: docs/resources/source-code/hb_util.md ---
+
+--- START OF FILE: docs/resources/source-code/hb_volume.md ---
+# [Module hb_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_volume.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+
+--- END OF FILE: docs/resources/source-code/hb_volume.md ---
+
+--- START OF FILE: docs/resources/source-code/hb.md ---
+# [Module hb.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb.erl)
+
+
+
+
+Hyperbeam is a decentralized node implementing the AO-Core protocol
+on top of Arweave.
+
+
+
+## Description ##
+
+This protocol offers a computation layer for executing arbitrary logic on
+top of the network's data.
+
+Arweave is built to offer a robust, permanent storage layer for static data
+over time. It can be seen as a globally distributed key-value store that
+allows users to lookup IDs to retrieve data at any point in time:
+
+`Arweave(ID) => Message`
+
+Hyperbeam adds another layer of functionality on top of Arweave's protocol:
+Allowing users to store and retrieve not only arbitrary bytes, but also to
+perform execution of computation upon that data:
+
+`Hyperbeam(Message1, Message2) => Message3`
+
+When Hyperbeam executes a message, it will return a new message containing
+the result of that execution, as well as signed commitments of its
+correctness. If the computation that is executed is deterministic, recipients
+of the new message are able to verify that the computation was performed
+correctly. The new message may be stored back to Arweave if desired,
+forming a permanent, verifiable, and decentralized log of computation.
+
+The mechanisms described above form the basis of a decentralized and
+verifiable compute engine without any relevant protocol-enforced
+scalability limits. It is an implementation of a global, shared
+supercomputer.
+
+Hyperbeam can be used for an extremely large variety of applications, from
+serving static Arweave data with signed commitments of correctness, to
+executing smart contracts that have _built-in_ HTTP APIs. The Hyperbeam
+node implementation implements AO, an Actor-Oriented process-based
+environment for orchestrating computation over Arweave messages in order to
+facilitate the execution of more traditional, consensus-based smart
+contracts.
+
+The core abstractions of the Hyperbeam node are broadly as follows:
+
+1. The `hb` and `hb_opts` modules manage the node's configuration,
+environment variables, and debugging tools.
+
+2. The `hb_http` and `hb_http_server` modules manage all HTTP-related
+functionality. `hb_http_server` handles turning received HTTP requests
+into messages and applying those messages with the appropriate devices.
+`hb_http` handles making requests and responding with messages. `cowboy`
+is used to implement the underlying HTTP server.
+
+3. `hb_ao` implements the computation logic of the node: A mechanism
+for resolving messages to other messages, via the application of logic
+implemented in `devices`. `hb_ao` also manages the loading of Erlang
+modules for each device into the node's environment. There are many
+different default devices implemented in the hyperbeam node, using the
+namespace `dev_*`. Some of the critical components are:
+
+- `dev_message`: The default handler for all messages that do not
+specify their own device. The message device is also used to resolve
+keys that are not implemented by the device specified in a message,
+unless otherwise signalled.
+
+- `dev_stack`: The device responsible for creating and executing stacks
+of other devices on messages that request it. There are many uses for
+this device, one of which is the resolution of AO processes.
+
+- `dev_p4`: The device responsible for managing payments for the services
+provided by the node.
+
+4. `hb_store`, `hb_cache` and the store implementations forms a layered
+system for managing the node's access to persistent storage. `hb_cache`
+is used as a resolution mechanism for reading and writing messages, while
+`hb_store` provides an abstraction over the underlying persistent key-value
+byte storage mechanisms. Example `hb_store` mechanisms can be found in
+`hb_store_fs` and `hb_store_remote_node`.
+
+5. `ar_*` modules implement functionality related to the base-layer Arweave
+protocol and are largely unchanged from their counterparts in the Arweave
+node codebase presently maintained by the Digital History Association
+(@dha-team/Arweave).
+
+You can find documentation of a similar form to this note in each of the core
+modules of the hyperbeam node.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### address/0 ###
+
+`address() -> any()`
+
+Get the address of a wallet. Defaults to the address of the wallet
+specified by the `priv_key_location` configuration key. It can also take a
+wallet tuple as an argument.
+
+
+
+### address/1 * ###
+
+`address(Wallet) -> any()`
+
+
+
+### benchmark/2 ###
+
+`benchmark(Fun, TLen) -> any()`
+
+Run a function as many times as possible in a given amount of time.
+
+
+
+### benchmark/3 ###
+
+`benchmark(Fun, TLen, Procs) -> any()`
+
+Run multiple instances of a function in parallel for a given amount of time.
+
+
+
+### build/0 ###
+
+`build() -> any()`
+
+Utility function to hot-recompile and load the hyperbeam environment.
+
+
+
+### debug_wait/4 ###
+
+`debug_wait(T, Mod, Func, Line) -> any()`
+
+Utility function to wait for a given amount of time, printing a debug
+message to the console first.
+
+
+
+### do_start_simple_pay/1 * ###
+
+`do_start_simple_pay(Opts) -> any()`
+
+
+
+### init/0 ###
+
+`init() -> any()`
+
+Initialize system-wide settings for the hyperbeam node.
+
+
+
+### no_prod/3 ###
+
+`no_prod(X, Mod, Line) -> any()`
+
+Utility function to throw an error if the current mode is prod and
+non-prod ready code is being executed. You can find these in the codebase
+by looking for ?NO_PROD calls.
+
+
+
+### now/0 ###
+
+`now() -> any()`
+
+Utility function to get the current time in milliseconds.
+
+
+
+### profile/1 ###
+
+`profile(Fun) -> any()`
+
+Utility function to start a profiling session and run a function,
+then analyze the results. Obviously -- do not use in production.
+
+
+
+### read/1 ###
+
+`read(ID) -> any()`
+
+Debugging function to read a message from the cache.
+Specify either a scope atom (local or remote) or a store tuple
+as the second argument.
+
+
+
+### read/2 ###
+
+`read(ID, ScopeAtom) -> any()`
+
+
+
+### start_mainnet/0 ###
+
+`start_mainnet() -> any()`
+
+Start a mainnet server without payments.
+
+
+
+### start_mainnet/1 ###
+
+`start_mainnet(Port) -> any()`
+
+
+
+### start_simple_pay/0 ###
+
+`start_simple_pay() -> any()`
+
+Start a server with a `simple-pay@1.0` pre-processor.
+
+
+
+### start_simple_pay/1 ###
+
+`start_simple_pay(Addr) -> any()`
+
+
+
+### start_simple_pay/2 ###
+
+`start_simple_pay(Addr, Port) -> any()`
+
+
+
+### topup/3 ###
+
+`topup(Node, Amount, Recipient) -> any()`
+
+Helper for topping up a user's balance on a simple-pay node.
+
+
+
+### topup/4 ###
+
+`topup(Node, Amount, Recipient, Wallet) -> any()`
+
+
+
+### wallet/0 ###
+
+`wallet() -> any()`
+
+
+
+### wallet/1 ###
+
+`wallet(Location) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/hb.md ---
+
+--- START OF FILE: docs/resources/source-code/index.md ---
+# Source Code Documentation
+
+Welcome to the source code documentation for HyperBEAM. This section provides detailed insights into the codebase, helping developers understand the structure, functionality, and implementation details of HyperBEAM and its components.
+
+## Overview
+
+HyperBEAM is built with a modular architecture to ensure scalability, maintainability, and extensibility. The source code is organized into distinct components, each serving a specific purpose within the ecosystem.
+
+## Sections
+
+- **HyperBEAM Core**: The main framework that orchestrates data processing, storage, and routing.
+- **Compute Unit**: Handles computational tasks and integrates with the HyperBEAM core for distributed processing.
+- **Trusted Execution Environment (TEE)**: Ensures secure execution of sensitive operations.
+- **Client Libraries**: Tools and SDKs for interacting with HyperBEAM, including the JavaScript client.
+
+## Getting Started
+
+To explore the source code, you can clone the repository from [GitHub](https://github.com/permaweb/HyperBEAM).
+
+## Navigation
+
+Use the navigation menu to dive into specific parts of the codebase. Each module includes detailed documentation, code comments, and examples to assist in understanding and contributing to the project.
+
+
+--- END OF FILE: docs/resources/source-code/index.md ---
+
+--- START OF FILE: docs/resources/source-code/README.md ---
+
+
+# The hb application #
+
+
+## Modules ##
+
+
+
+
+
+
+### verify_legacy/4 ###
+
+`verify_legacy(Message, DigestType, Signature, PublicKey) -> any()`
+
+
+--- END OF FILE: docs/resources/source-code/rsa_pss.md ---
+
+--- START OF FILE: docs/run/configuring-your-machine.md ---
+# Configuring Your HyperBEAM Node
+
+This guide details the various ways to configure your HyperBEAM node's behavior, including ports, storage, keys, and logging.
+
+## Configuration (`config.flat`)
+
+The primary way to configure your HyperBEAM node is through a `config.flat` file located in the node's working directory or specified by the `HB_CONFIG_LOCATION` environment variable.
+
+This file uses a simple `Key = Value.` format (note the period at the end of each line).
+
+**Example `config.flat`:**
+
+```erlang
+% Set the HTTP port
+port = 8080.
+
+% Specify the Arweave key file
+priv_key_location = "/path/to/your/wallet.json".
+
+% Set the data store directory
+% Note: Storage configuration can be complex. See below.
+% store = [{local, [{root, <<"./node_data_mainnet">>}]}]. % Example of complex config, not for config.flat
+
+% Enable verbose logging for specific modules
+% debug_print = [hb_http, dev_router]. % Example of complex config, not for config.flat
+```
+
+Below is a reference of commonly used configuration keys. Remember that `config.flat` only supports simple key-value pairs (Atoms, Strings, Integers, Booleans). For complex configurations (Lists, Maps), you must use environment variables or `hb:start_mainnet/1`.
+
+### Core Configuration
+
+These options control fundamental HyperBEAM behavior.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `port` | Integer | 8734 | HTTP API port |
+| `hb_config_location` | String | "config.flat" | Path to configuration file |
+| `priv_key_location` | String | "hyperbeam-key.json" | Path to operator wallet key file |
+| `mode` | Atom | debug | Execution mode (debug, prod) |
+
+### Server & Network Configuration
+
+These options control networking behavior and HTTP settings.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `host` | String | "localhost" | Choice of remote node for non-local tasks |
+| `gateway` | String | "https://arweave.net" | Default gateway |
+| `bundler_ans104` | String | "https://up.arweave.net:443" | Location of ANS-104 bundler |
+| `protocol` | Atom | http2 | Protocol for HTTP requests (http1, http2, http3) |
+| `http_client` | Atom | gun | HTTP client to use (gun, httpc) |
+| `http_connect_timeout` | Integer | 5000 | HTTP connection timeout in milliseconds |
+| `http_keepalive` | Integer | 120000 | HTTP keepalive time in milliseconds |
+| `http_request_send_timeout` | Integer | 60000 | HTTP request send timeout in milliseconds |
+| `relay_http_client` | Atom | httpc | HTTP client for the relay device |
+
+
+### Security & Identity
+
+These options control identity and security settings.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `scheduler_location_ttl` | Integer | 604800000 | TTL for scheduler registration (7 days in ms) |
+
+
+### Caching & Storage
+
+These options control caching behavior. **Note:** Detailed storage configuration (`store` option) involves complex data structures and cannot be set via `config.flat`.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `cache_lookup_hueristics` | Boolean | false | Whether to use caching heuristics or always consult the local data store |
+| `access_remote_cache_for_client` | Boolean | false | Whether to access data from remote caches for client requests |
+| `store_all_signed` | Boolean | true | Whether the node should store all signed messages |
+| `await_inprogress` | Atom/Boolean | named | Whether to await in-progress executions (false, named, true) |
+
+
+### Execution & Processing
+
+These options control how HyperBEAM executes messages and processes.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `scheduling_mode` | Atom | local_confirmation | When to inform recipients about scheduled assignments (aggressive, local_confirmation, remote_confirmation) |
+| `compute_mode` | Atom | lazy | Whether to execute more messages after returning a result (aggressive, lazy) |
+| `process_workers` | Boolean | true | Whether the node should use persistent processes |
+| `client_error_strategy` | Atom | throw | What to do if a client error occurs |
+| `wasm_allow_aot` | Boolean | false | Allow ahead-of-time compilation for WASM |
+
+### Device Management
+
+These options control how HyperBEAM manages devices.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `load_remote_devices` | Boolean | false | Whether to load devices from remote signers |
+
+
+### Debug & Development
+
+These options control debugging and development features.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `debug_stack_depth` | Integer | 40 | Maximum stack depth for debug printing |
+| `debug_print_map_line_threshold` | Integer | 30 | Maximum lines for map printing |
+| `debug_print_binary_max` | Integer | 60 | Maximum binary size for debug printing |
+| `debug_print_indent` | Integer | 2 | Indentation for debug printing |
+| `debug_print_trace` | Atom | short | Trace mode (short, false) |
+| `short_trace_len` | Integer | 5 | Length of short traces |
+| `debug_hide_metadata` | Boolean | true | Whether to hide metadata in debug output |
+| `debug_ids` | Boolean | false | Whether to print IDs in debug output |
+| `debug_hide_priv` | Boolean | true | Whether to hide private data in debug output |
+
+
+**Note:** For the *absolute complete* and most up-to-date list, including complex options not suitable for `config.flat`, refer to the `default_message/0` function in the `hb_opts` module source code.
+
+## Overrides (Environment Variables & Args)
+
+You can override settings from `config.flat` or provide values if the file is missing using environment variables or command-line arguments.
+
+**Using Environment Variables:**
+
+Environment variables typically use an `HB_` prefix followed by the configuration key in uppercase.
+
+* **`HB_PORT=`:** Overrides `hb_port`.
+ * Example: `HB_PORT=8080 rebar3 shell`
+* **`HB_KEY=`:** Overrides `hb_key`.
+ * Example: `HB_KEY=~/.keys/arweave_key.json rebar3 shell`
+* **`HB_STORE=`:** Overrides `hb_store`.
+ * Example: `HB_STORE=./node_data_1 rebar3 shell`
+* **`HB_PRINT=`:** Overrides `hb_print`. `` can be `true` (or `1`), or a comma-separated list of modules/topics (e.g., `hb_path,hb_ao,ao_result`).
+ * Example: `HB_PRINT=hb_http,dev_router rebar3 shell`
+* **`HB_CONFIG_LOCATION=`:** Specifies a custom location for the configuration file.
+
+**Using `erl_opts` (Direct Erlang VM Arguments):**
+
+You can also pass arguments directly to the Erlang VM using the `-` format within `erl_opts`. This is generally less common for application configuration than `config.flat` or environment variables.
+
+```bash
+rebar3 shell --erl_opts "-hb_port 8080 -hb_key path/to/key.json"
+```
+
+**Order of Precedence:**
+
+1. Command-line arguments (`erl_opts`).
+2. Settings in `config.flat`.
+3. Environment variables (`HB_*`).
+4. Default values from `hb_opts.erl`.
+
+## Configuration in Releases
+
+When running a release build (see [Running a HyperBEAM Node](./running-a-hyperbeam-node.md)), configuration works similarly:
+
+1. A `config.flat` file will be present in the release directory (e.g., `_build/default/rel/hb/config.flat`). Edit this file to set your desired parameters for the release environment.
+2. Environment variables (`HB_*`) can still be used to override the settings in the release's `config.flat` when starting the node using the `bin/hb` script.
+
+--- END OF FILE: docs/run/configuring-your-machine.md ---
+
+--- START OF FILE: docs/run/joining-running-a-router.md ---
+# Joining or Running a Router Node
+
+Router nodes play a crucial role in the HyperBEAM network by directing incoming HTTP requests to appropriate worker nodes capable of handling the requested computation or data retrieval. They act as intelligent load balancers and entry points into the AO ecosystem.
+
+!!! info "Advanced Topic"
+ Configuring and running a production-grade router involves considerations beyond the scope of this introductory guide, including network topology, security, high availability, and performance tuning.
+
+## What is a Router?
+
+In HyperBEAM, the `dev_router` module (and associated logic) implements routing functionality. A node configured as a router typically:
+
+1. Receives external HTTP requests (HyperPATH calls).
+2. Parses the request path to determine the target process, device, and desired operation.
+3. Consults its routing table or logic to select an appropriate downstream worker node (which might be itself or another node).
+4. Forwards the request to the selected worker.
+5. Receives the response from the worker.
+6. Returns the response to the original client.
+
+Routers often maintain information about the capabilities and load of worker nodes they know about.
+
+## Configuring Routing Behavior
+
+Routing logic is primarily configured through node options, often managed via `hb_opts` or environment variables when starting the node. Key aspects include:
+
+* **Route Definitions:** Defining patterns (templates) and corresponding downstream targets (worker node URLs or internal handlers). Routes are typically ordered by precedence.
+* **Load Balancing Strategy:** How the router chooses among multiple potential workers for a given route (e.g., round-robin, least connections, latency-based).
+* **Worker Discovery/Management:** How the router learns about available worker nodes and their status.
+
+**Example Configuration Snippet (Conceptual - from `hb_opts` or config file):**
+
+```erlang
+{
+ routes,
+ [
+ #{ template => "/~meta@1.0/.*", target => self }, % Handle meta locally
+ #{ template => "/PROCESS_ID1~process@1.0/.*", target => "http://worker1.example.com" },
+ #{ template => "/PROCESS_ID2~process@1.0/.*", target => "http://worker2.example.com" },
+ #{ template => "/.*~wasm64@1.0/.*", target => ["http://wasm_worker1", "http://wasm_worker2"], strategy => round_robin }, % Route WASM requests
+ #{ template => "/.*", target => "http://default_worker.example.com" } % Default fallback
+ ]
+},
+{ router_load_balancing_strategy, latency_aware }
+```
+
+*(Note: The actual configuration format and options should be verified in the `hb_opts.erl` and `dev_router.erl` source code.)*
+
+## Running a Simple Router
+
+While a dedicated router setup is complex, any HyperBEAM node implicitly performs some level of routing, especially if it needs to interact with other nodes (e.g., via the `~relay@1.0` device). The default configuration might route certain requests internally or have basic forwarding capabilities.
+
+To run a node that explicitly acts *more* like a router, you would typically configure it with specific `routes` pointing to other worker nodes, potentially disabling local execution for certain devices it intends to forward.
+
+## Joining an Existing Router Network
+
+As a user or developer, you typically don't *run* the main public routers (like `router-1.forward.computer`). Instead, you configure your client applications (or your own local node if it needs to relay requests) to *use* these public routers as entry points.
+
+When making HyperPATH calls, you simply target the public router's URL:
+
+```
+https:///~/...
+```
+The router handles directing your request to an appropriate compute node.
+
+## Further Exploration
+
+* Examine the `dev_router.erl` source code for detailed implementation.
+* Review the available configuration options in `hb_opts.erl` related to routing (`routes`, strategies, etc.).
+* Consult community channels or advanced documentation for best practices on deploying production routers.
+
+--- END OF FILE: docs/run/joining-running-a-router.md ---
+
+--- START OF FILE: docs/run/running-a-hyperbeam-node.md ---
+# Running a HyperBEAM Node
+
+This guide provides the basics for running your own HyperBEAM node, installing dependencies, and connecting to the AO network.
+
+## System Dependencies
+
+To successfully build and run a HyperBEAM node, your system needs several software dependencies installed.
+
+=== "macOS"
+ Install core dependencies using [Homebrew](https://brew.sh/):
+
+ ```bash
+ brew install cmake git pkg-config openssl ncurses
+ ```
+
+=== "Linux (Debian/Ubuntu)"
+ Install core dependencies using `apt`:
+ ```bash
+ sudo apt-get update && sudo apt-get install -y --no-install-recommends \
+ build-essential \
+ cmake \
+ git \
+ pkg-config \
+ ncurses-dev \
+ libssl-dev \
+ sudo \
+ curl
+ ca-certificates
+ ```
+
+=== "Windows (WSL)"
+ Using the Windows Subsystem for Linux (WSL) with a distribution like Ubuntu is recommended. Follow the Linux (Debian/Ubuntu) instructions within your WSL environment.
+
+
+
+### Erlang/OTP
+
+HyperBEAM is built on Erlang/OTP. You need a compatible version installed (check the `rebar.config` or project documentation for specific version requirements, **typically OTP 27**).
+
+Installation methods:
+
+=== "macOS (brew)"
+ ```bash
+ brew install erlang
+ ```
+
+=== "Linux (apt)"
+ ```bash
+ sudo apt install erlang
+ ```
+
+
+=== "Source Build"
+ Download from [erlang.org](https://www.erlang.org/downloads) and follow the build instructions for your platform.
+
+### Rebar3
+
+Rebar3 is the build tool for Erlang projects.
+
+Installation methods:
+
+=== "macOS (brew)"
+ ```bash
+ brew install rebar3
+ ```
+
+=== "Linux / macOS (Direct Download)"
+ Get the `rebar3` binary from the [official website](https://rebar3.org/). Place the downloaded `rebar3` file in your system's `PATH` (e.g., `/usr/local/bin`) and make it executable (`chmod +x rebar3`).
+
+
+
+### Node.js
+
+Node.js might be required for certain JavaScript-related tools or dependencies.
+
+Installation methods:
+
+=== "macOS (brew)"
+ ```bash
+ brew install node
+ ```
+
+=== "Linux (apt)"
+ ```bash
+ # Check your distribution's recommended method, might need nodesource repo
+ sudo apt install nodejs npm
+ ```
+
+=== "asdf (Recommended)"
+ `asdf-vm` with the `asdf-nodejs` plugin is recommended.
+ ```bash
+ asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs.git
+ asdf install nodejs # e.g., lts
+ asdf global nodejs
+ ```
+
+### Rust
+
+Rust is needed if you intend to work with or build components involving WebAssembly (WASM) or certain Native Implemented Functions (NIFs) used by some devices (like `~snp@1.0`).
+
+The recommended way to install Rust on **all platforms** is via `rustup`:
+
+```bash
+curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
+source "$HOME/.cargo/env" # Or follow the instructions provided by rustup
+```
+
+## Prerequisites for Running
+
+Before starting a node, ensure you have:
+
+* Installed the [system dependencies](#system-dependencies) mentioned above.
+* Cloned the [HyperBEAM repository](https://github.com/permaweb/HyperBEAM) (`git clone ...`).
+* Compiled the source code (`rebar3 compile` in the repo directory).
+* An Arweave **wallet keyfile** (e.g., generated via [Wander](https://www.wander.app)). The path to this file is typically set via the `hb_key` configuration option (see [Configuring Your HyperBEAM Node](./configuring-your-machine.md)).
+
+## Starting a Basic Node
+
+The simplest way to start a HyperBEAM node for development or testing is using `rebar3` from the repository's root directory:
+
+```bash
+rebar3 shell
+```
+
+This command:
+
+1. Starts the Erlang Virtual Machine (BEAM) with all HyperBEAM modules loaded.
+2. Initializes the node with default settings (from `hb_opts.erl`).
+3. Starts the default HTTP server (typically on **port 10000**), making the node accessible via HyperPATHs.
+4. Drops you into an interactive Erlang shell where you can interact with the running node.
+
+This basic setup is suitable for local development and exploring HyperBEAM's functionalities.
+
+## Optional Build Profiles
+
+HyperBEAM uses build profiles to enable optional features, often requiring extra dependencies. To run a node with specific profiles enabled, use `rebar3 as ... shell`:
+
+**Available Profiles (Examples):**
+
+* `genesis_wasm`: Enables Genesis WebAssembly support.
+* `rocksdb`: Enables the RocksDB storage backend.
+* `http3`: Enables HTTP/3 support.
+
+**Example Usage:**
+
+```bash
+# Start with RocksDB profile
+rebar3 as rocksdb shell
+
+# Start with RocksDB and Genesis WASM profiles
+rebar3 as rocksdb, genesis_wasm shell
+```
+
+*Note: Choose profiles **before** starting the shell, as they affect compile-time options.*
+
+## Node Configuration
+
+HyperBEAM offers various configuration options (port, key file, data storage, logging, etc.). These are primarily set using a `config.flat` file and can be overridden by environment variables or command-line arguments.
+
+See the dedicated **[Configuring Your HyperBEAM Node](./configuring-your-machine.md)** guide for detailed information on all configuration methods and options.
+
+## Verify Installation
+
+To quickly check if your node is running and accessible, you can send a request to its `~meta@1.0` device (assuming default port 10000):
+
+```bash
+curl http://localhost:10000/~meta@1.0/info
+```
+
+A JSON response containing node information indicates success.
+
+## Running for Production (Mainnet)
+
+While you can connect to the main AO network using the `rebar3 shell` for testing purposes (potentially using specific configurations or helper functions like `hb:start_mainnet/1` if available and applicable), the standard and recommended method for a stable production deployment (like running on the mainnet) is to build and run a **release**.
+
+**1. Build the Release:**
+
+From the root of the HyperBEAM repository, build the release package. You might include specific profiles needed for your mainnet setup (e.g., `rocksdb` if you intend to use it):
+
+```bash
+# Build release with default profile
+rebar3 release
+
+# Or, build with specific profiles (example)
+# rebar3 as rocksdb release
+```
+
+This command compiles the project and packages it along with the Erlang Runtime System (ERTS) and all dependencies into a directory, typically `_build/default/rel/hb`.
+
+**2. Configure the Release:**
+
+Navigate into the release directory (e.g., `cd _build/default/rel/hb`). Ensure you have a correctly configured `config.flat` file here. See the [configuration guide](./configuring-your-machine.md) for details on setting mainnet parameters (port, key file location, store path, specific peers, etc.). Environment variables can also be used to override settings in the release's `config.flat` when starting the node.
+
+**3. Start the Node:**
+
+Use the generated start script (`bin/hb`) to run the node:
+
+```bash
+# Start the node in the foreground (logs to console)
+./bin/hb console
+
+# Start the node as a background daemon
+./bin/hb start
+
+# Check the status
+./bin/hb ping
+./bin/hb status
+
+# Stop the node
+./bin/hb stop
+```
+
+Consult the generated `bin/hb` script or Erlang/OTP documentation for more advanced start-up options (e.g., attaching a remote shell).
+
+Running as a release provides a more robust, isolated, and manageable way to operate a node compared to running directly from the `rebar3 shell`.
+
+## Stopping the Node (rebar3 shell)
+
+To stop the node running *within the `rebar3 shell`*, press `Ctrl+C` twice or use the Erlang command `q().`.
+
+## Next Steps
+
+* **Configure Your Node:** Deep dive into [configuration options](./configuring-your-machine.md).
+* **TEE Nodes:** Learn about running nodes in [Trusted Execution Environments](./tee-nodes.md) for enhanced security.
+* **Routers:** Understand how to configure and run a [router node](./joining-running-a-router.md).
+
+--- END OF FILE: docs/run/running-a-hyperbeam-node.md ---
+
+--- START OF FILE: docs/run/tee-nodes.md ---
+# Trusted Execution Environment (TEE)
+
+!!! info "Documentation Coming Soon"
+ Detailed documentation about Trusted Execution Environment support in HyperBEAM is currently being developed and will be available soon.
+
+## Overview
+
+HyperBEAM supports Trusted Execution Environments (TEEs) through the `~snp@1.0` device, which enables secure, trust-minimized computation on remote machines. TEEs provide hardware-level isolation and attestation capabilities that allow users to verify that their code is running in a protected environment, exactly as intended, even on untrusted hardware.
+
+The `~snp@1.0` device in HyperBEAM is used to generate and validate proofs that a node is executing inside a Trusted Execution Environment. Nodes executing inside these environments use an ephemeral key pair that provably only exists inside the TEE, and can sign attestations of AO-Core executions in a trust-minimized way.
+
+## Key Features
+
+- Hardware-level isolation for secure computation
+- Remote attestation capabilities
+- Protected execution state
+- Confidential computing support
+- Compatibility with AMD SEV-SNP technology
+
+## Coming Soon
+
+Detailed documentation on the following topics will be added:
+
+- TEE setup and configuration
+- Using the `~snp@1.0` device
+- Verifying TEE attestations
+- Developing for TEEs
+- Security considerations
+- Performance characteristics
+
+If you intend to offer TEE-based computation of AO-Core devices, please see the [HyperBEAM OS repository](https://github.com/permaweb/hb-os) for preliminary details on configuration and deployment.
+--- END OF FILE: docs/run/tee-nodes.md ---
+
diff --git a/docs/llms.txt b/docs/llms.txt
new file mode 100644
index 000000000..31023fbde
--- /dev/null
+++ b/docs/llms.txt
@@ -0,0 +1,155 @@
+Generated: 2025-05-15T13:32:25Z
+
+## HyperBEAM Documentation Summary
+
+This document provides an overview and routes for the HyperBEAM documentation, intended for LLM consumption.
+Key sections include: Getting Started (begin), Running HyperBEAM (run), Developer Guides (guides), Device Integration (devices), and Resources (resources).
+
+## Documentation Pages by Section
+
+### introduction
+
+* [AO Devices](./introduction/ao-devices.html)
+* [Pathing in AO-Core](./introduction/pathing-in-ao-core.html)
+* [What is AO-Core?](./introduction/what-is-ao-core.html)
+* [What is HyperBEAM?](./introduction/what-is-hyperbeam.html)
+
+### run
+
+* [Configuring Your HyperBEAM Node](./run/configuring-your-machine.html)
+* [Joining or Running a Router Node](./run/joining-running-a-router.html)
+* [Running a HyperBEAM Node](./run/running-a-hyperbeam-node.html)
+* [Trusted Execution Environment (TEE)](./run/tee-nodes.html)
+
+### uuild
+
+* [Exposing Process State with the Patch Device](./build/exposing-process-state.html)
+* [Extending HyperBEAM](./build/extending-hyperbeam.html)
+* [Getting Started Building on AO-Core](./build/get-started-building-on-ao-core.html)
+* [Serverless Decentralized Compute on AO](./build/serverless-decentralized-compute.html)
+
+### devices
+
+* [Device: ~json@1.0](./devices/json-at-1-0.html)
+* [Device: ~lua@5.3a](./devices/lua-at-5-3a.html)
+* [Device: ~message@1.0](./devices/message-at-1-0.html)
+* [Device: ~meta@1.0](./devices/meta-at-1-0.html)
+* [Devices](./devices/overview.html)
+* [Device: ~process@1.0](./devices/process-at-1-0.html)
+* [Device: ~relay@1.0](./devices/relay-at-1-0.html)
+* [Device: ~scheduler@1.0](./devices/scheduler-at-1-0.html)
+* [Device: ~wasm64@1.0](./devices/wasm64-at-1-0.html)
+
+### resources
+
+* [LLM Context Files](./resources/llms.html)
+* [Frequently Asked Questions](./resources/reference/faq.html)
+* [Glossary](./resources/reference/glossary.html)
+* [Troubleshooting Guide](./resources/reference/troubleshooting.html)
+* [[Module ar_bundles.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_bundles.erl)](./resources/source-code/ar_bundles.html)
+* [[Module ar_deep_hash.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_deep_hash.erl)](./resources/source-code/ar_deep_hash.html)
+* [[Module ar_rate_limiter.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_rate_limiter.erl)](./resources/source-code/ar_rate_limiter.html)
+* [[Module ar_timestamp.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_timestamp.erl)](./resources/source-code/ar_timestamp.html)
+* [[Module ar_tx.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_tx.erl)](./resources/source-code/ar_tx.html)
+* [[Module ar_wallet.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/ar_wallet.erl)](./resources/source-code/ar_wallet.html)
+* [[Module dev_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cache.erl)](./resources/source-code/dev_cache.html)
+* [[Module dev_cacheviz.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cacheviz.erl)](./resources/source-code/dev_cacheviz.html)
+* [[Module dev_codec_ans104.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_ans104.erl)](./resources/source-code/dev_codec_ans104.html)
+* [[Module dev_codec_flat.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_flat.erl)](./resources/source-code/dev_codec_flat.html)
+* [[Module dev_codec_httpsig.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_httpsig.erl)](./resources/source-code/dev_codec_httpsig.html)
+* [[Module dev_codec_httpsig_conv.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_httpsig_conv.erl)](./resources/source-code/dev_codec_httpsig_conv.html)
+* [[Module dev_codec_json.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_json.erl)](./resources/source-code/dev_codec_json.html)
+* [[Module dev_codec_structured.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_structured.erl)](./resources/source-code/dev_codec_structured.html)
+* [[Module dev_cron.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cron.erl)](./resources/source-code/dev_cron.html)
+* [[Module dev_cu.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cu.erl)](./resources/source-code/dev_cu.html)
+* [[Module dev_dedup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_dedup.erl)](./resources/source-code/dev_dedup.html)
+* [[Module dev_delegated_compute.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_delegated_compute.erl)](./resources/source-code/dev_delegated_compute.html)
+* [[Module dev_faff.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_faff.erl)](./resources/source-code/dev_faff.html)
+* [[Module dev_genesis_wasm.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_genesis_wasm.erl)](./resources/source-code/dev_genesis_wasm.html)
+* [[Module dev_green_zone.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_green_zone.erl)](./resources/source-code/dev_green_zone.html)
+* [[Module dev_hook.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hook.erl)](./resources/source-code/dev_hook.html)
+* [[Module dev_hyperbuddy.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hyperbuddy.erl)](./resources/source-code/dev_hyperbuddy.html)
+* [[Module dev_json_iface.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_json_iface.erl)](./resources/source-code/dev_json_iface.html)
+* [[Module dev_local_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_local_name.erl)](./resources/source-code/dev_local_name.html)
+* [[Module dev_lookup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lookup.erl)](./resources/source-code/dev_lookup.html)
+* [[Module dev_lua.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua.erl)](./resources/source-code/dev_lua.html)
+* [[Module dev_lua_lib.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua_lib.erl)](./resources/source-code/dev_lua_lib.html)
+* [[Module dev_lua_test.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua_test.erl)](./resources/source-code/dev_lua_test.html)
+* [[Module dev_manifest.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_manifest.erl)](./resources/source-code/dev_manifest.html)
+* [[Module dev_message.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_message.erl)](./resources/source-code/dev_message.html)
+* [[Module dev_meta.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_meta.erl)](./resources/source-code/dev_meta.html)
+* [[Module dev_monitor.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_monitor.erl)](./resources/source-code/dev_monitor.html)
+* [[Module dev_multipass.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_multipass.erl)](./resources/source-code/dev_multipass.html)
+* [[Module dev_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_name.erl)](./resources/source-code/dev_name.html)
+* [[Module dev_node_process.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_node_process.erl)](./resources/source-code/dev_node_process.html)
+* [[Module dev_p4.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_p4.erl)](./resources/source-code/dev_p4.html)
+* [[Module dev_patch.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_patch.erl)](./resources/source-code/dev_patch.html)
+* [[Module dev_poda.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_poda.erl)](./resources/source-code/dev_poda.html)
+* [[Module dev_process.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process.erl)](./resources/source-code/dev_process.html)
+* [[Module dev_process_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process_cache.erl)](./resources/source-code/dev_process_cache.html)
+* [[Module dev_process_worker.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process_worker.erl)](./resources/source-code/dev_process_worker.html)
+* [[Module dev_push.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_push.erl)](./resources/source-code/dev_push.html)
+* [[Module dev_relay.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_relay.erl)](./resources/source-code/dev_relay.html)
+* [[Module dev_router.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_router.erl)](./resources/source-code/dev_router.html)
+* [[Module dev_scheduler.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler.erl)](./resources/source-code/dev_scheduler.html)
+* [[Module dev_scheduler_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_cache.erl)](./resources/source-code/dev_scheduler_cache.html)
+* [[Module dev_scheduler_formats.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_formats.erl)](./resources/source-code/dev_scheduler_formats.html)
+* [[Module dev_scheduler_registry.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_registry.erl)](./resources/source-code/dev_scheduler_registry.html)
+* [[Module dev_scheduler_server.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_server.erl)](./resources/source-code/dev_scheduler_server.html)
+* [[Module dev_simple_pay.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_simple_pay.erl)](./resources/source-code/dev_simple_pay.html)
+* [[Module dev_snp.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_snp.erl)](./resources/source-code/dev_snp.html)
+* [[Module dev_snp_nif.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_snp_nif.erl)](./resources/source-code/dev_snp_nif.html)
+* [[Module dev_stack.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_stack.erl)](./resources/source-code/dev_stack.html)
+* [[Module dev_test.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_test.erl)](./resources/source-code/dev_test.html)
+* [[Module dev_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_volume.erl)](./resources/source-code/dev_volume.html)
+* [[Module dev_wasi.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_wasi.erl)](./resources/source-code/dev_wasi.html)
+* [[Module dev_wasm.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_wasm.erl)](./resources/source-code/dev_wasm.html)
+* [[Module hb.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb.erl)](./resources/source-code/hb.html)
+* [[Module hb_ao.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_ao.erl)](./resources/source-code/hb_ao.html)
+* [[Module hb_ao_test_vectors.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_ao_test_vectors.erl)](./resources/source-code/hb_ao_test_vectors.html)
+* [[Module hb_app.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_app.erl)](./resources/source-code/hb_app.html)
+* [[Module hb_beamr.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_beamr.erl)](./resources/source-code/hb_beamr.html)
+* [[Module hb_beamr_io.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_beamr_io.erl)](./resources/source-code/hb_beamr_io.html)
+* [[Module hb_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_cache.erl)](./resources/source-code/hb_cache.html)
+* [[Module hb_cache_control.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_cache_control.erl)](./resources/source-code/hb_cache_control.html)
+* [[Module hb_cache_render.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_cache_render.erl)](./resources/source-code/hb_cache_render.html)
+* [[Module hb_client.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_client.erl)](./resources/source-code/hb_client.html)
+* [[Module hb_crypto.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_crypto.erl)](./resources/source-code/hb_crypto.html)
+* [[Module hb_debugger.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_debugger.erl)](./resources/source-code/hb_debugger.html)
+* [[Module hb_escape.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_escape.erl)](./resources/source-code/hb_escape.html)
+* [[Module hb_event.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_event.erl)](./resources/source-code/hb_event.html)
+* [[Module hb_examples.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_examples.erl)](./resources/source-code/hb_examples.html)
+* [[Module hb_features.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_features.erl)](./resources/source-code/hb_features.html)
+* [[Module hb_gateway_client.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_gateway_client.erl)](./resources/source-code/hb_gateway_client.html)
+* [[Module hb_http.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http.erl)](./resources/source-code/hb_http.html)
+* [[Module hb_http_benchmark_tests.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_benchmark_tests.erl)](./resources/source-code/hb_http_benchmark_tests.html)
+* [[Module hb_http_client.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_client.erl)](./resources/source-code/hb_http_client.html)
+* [[Module hb_http_client_sup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_client_sup.erl)](./resources/source-code/hb_http_client_sup.html)
+* [[Module hb_http_server.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_server.erl)](./resources/source-code/hb_http_server.html)
+* [[Module hb_json.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_json.erl)](./resources/source-code/hb_json.html)
+* [[Module hb_keccak.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_keccak.erl)](./resources/source-code/hb_keccak.html)
+* [[Module hb_logger.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_logger.erl)](./resources/source-code/hb_logger.html)
+* [[Module hb_message.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_message.erl)](./resources/source-code/hb_message.html)
+* [[Module hb_metrics_collector.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_metrics_collector.erl)](./resources/source-code/hb_metrics_collector.html)
+* [[Module hb_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_name.erl)](./resources/source-code/hb_name.html)
+* [[Module hb_opts.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_opts.erl)](./resources/source-code/hb_opts.html)
+* [[Module hb_path.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_path.erl)](./resources/source-code/hb_path.html)
+* [[Module hb_persistent.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_persistent.erl)](./resources/source-code/hb_persistent.html)
+* [[Module hb_private.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_private.erl)](./resources/source-code/hb_private.html)
+* [[Module hb_process_monitor.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_process_monitor.erl)](./resources/source-code/hb_process_monitor.html)
+* [[Module hb_router.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_router.erl)](./resources/source-code/hb_router.html)
+* [[Module hb_singleton.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_singleton.erl)](./resources/source-code/hb_singleton.html)
+* [[Module hb_store.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store.erl)](./resources/source-code/hb_store.html)
+* [[Module hb_store_fs.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_fs.erl)](./resources/source-code/hb_store_fs.html)
+* [[Module hb_store_gateway.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_gateway.erl)](./resources/source-code/hb_store_gateway.html)
+* [[Module hb_store_remote_node.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_remote_node.erl)](./resources/source-code/hb_store_remote_node.html)
+* [[Module hb_store_rocksdb.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_rocksdb.erl)](./resources/source-code/hb_store_rocksdb.html)
+* [[Module hb_structured_fields.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_structured_fields.erl)](./resources/source-code/hb_structured_fields.html)
+* [[Module hb_sup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_sup.erl)](./resources/source-code/hb_sup.html)
+* [[Module hb_test_utils.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_test_utils.erl)](./resources/source-code/hb_test_utils.html)
+* [[Module hb_tracer.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_tracer.erl)](./resources/source-code/hb_tracer.html)
+* [[Module hb_util.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_util.erl)](./resources/source-code/hb_util.html)
+* [[Module hb_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_volume.erl)](./resources/source-code/hb_volume.html)
+* [Source Code Documentation](./resources/source-code/index.html)
+* [The hb application #](./resources/source-code/README.html)
+* [[Module rsa_pss.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/rsa_pss.erl)](./resources/source-code/rsa_pss.html)
diff --git a/docs/misc/ao-core-web-apis.md b/docs/misc/ao-core-web-apis.md
new file mode 100644
index 000000000..c57946d24
--- /dev/null
+++ b/docs/misc/ao-core-web-apis.md
@@ -0,0 +1,86 @@
+# Integrating AO-Core into UIs
+
+## Overview
+
+This guide provides a practical approach to using the `patch@1.0` AO-Core device to create RESTful-like APIs for AO processes. If you're familiar with dryruns, this method offers a more efficient alternative by making process data directly accessible via HTTP endpoints from any operational AO Mainnet HyperBEAM node, eliminating the need for repeated dryruns. Responses are cryptographically signed and linked to individual nodes.
+
+## Key Features
+
+- **HTTP Endpoints**: Access process data via HTTP, similar to RESTful APIs
+- **Cryptographic Signatures**: Each response is signed, ensuring data integrity
+- **No DryRuns**: Directly access and update process states without the overhead of dryruns
+- **Real-time State Access**: Get the latest process state without waiting for dryrun computation
+
+## Implementation Steps
+
+### Initial State Synchronization
+
+Add an initial sync at the top of your process code to export the initial state:
+
+```lua
+-- Sync state on spawn
+InitialSync = InitialSync or 'INCOMPLETE'
+if InitialSync == 'INCOMPLETE' then
+ Send({
+ device = 'patch@1.0',
+ cache = {
+ table1 = {
+ [recordId1] = table1[recordId1]
+ },
+ table2 = {
+ [recordId2] = table2[recordId2]
+ }
+ }
+ })
+ InitialSync = 'COMPLETE'
+end
+```
+
+### State Updates During Operation
+
+Incorporate patch messages wherever state changes occur. Example for an auction system:
+
+```lua
+-- Inside any handler that modifies data
+Handlers.add('update-data', function(msg)
+ -- Process your logic...
+ table1[recordId1].field = msg.newValue
+ table2[recordId2] = {
+ field1 = msg.value1,
+ field2 = msg.From
+ }
+
+ -- Export the updated state
+ Send({
+ device = 'patch@1.0',
+ cache = {
+ table1 = {
+ [recordId1] = table1[recordId1]
+ },
+ table2 = {
+ [recordId2] = table2[recordId2]
+ }
+ }
+ })
+
+ -- Rest of handler logic...
+end)
+```
+
+### Accessing Your API
+
+Access your process data via any HyperBEAM node. This provides immediate access to the latest state without the need for dryruns:
+
+- **Latest State**: `GET /YOUR_PROCESS_ID~process@1.0/now/cache`
+- **Pre-computed State**: `GET /YOUR_PROCESS_ID~process@1.0/compute/cache`
+
+## Best Practices
+
+- **Selective Updates**: For large datasets, update only the altered data
+- **Consistent State Updates**: Ensure all state-changing handlers include patch messages
+- **Custom State Naming**: Name the state something other than `cache` if needed, and adjust HTTP requests accordingly
+- **Migration Strategy**: Consider gradually transitioning from dryruns to this approach for existing applications
+
+## Important Note
+
+This approach leverages HyperBEAM milestone 3 functionality and is currently in preview. It is not recommended for applications that may lead to loss of value due to potential changes and existing bugs. If you're currently using dryruns, consider this as a more efficient alternative for state access, but maintain your existing dryrun-based validation where needed.
\ No newline at end of file
diff --git a/docs/misc/building-pre-post-processors.md b/docs/misc/building-pre-post-processors.md
new file mode 100644
index 000000000..56fe7cf3d
--- /dev/null
+++ b/docs/misc/building-pre-post-processors.md
@@ -0,0 +1,91 @@
+# Building Pre/Post-Processors in AO
+
+Pre/post-processors in AO allow you to intercept and potentially modify incoming requests before they are executed by the target device or process. This guide explains how to build a preprocessor, focusing on a common pattern: exempting certain request paths from being relayed or modified.
+
+## Core Concepts
+
+1. **Exempt Routes (`exempt-routes`):** A list of route templates defined in the node's configuration (`Msg1`). If an incoming request (`Msg2`) matches any of these templates, it should bypass the main preprocessor logic (e.g., relaying).
+2. **Exemption Check (`is_exempt/3`):** A function that determines if a request should be exempt. It checks two things:
+ * An optional `is-exempt` message defined in the node's configuration (`Msg1`). If present, this message is resolved, and its result determines exemption.
+ * If `is-exempt` is not found, it matches the incoming request's path against the `exempt-routes` list using `dev_router:match/3`.
+3. **Preprocessing Logic (`preprocess/3`):** The main function that receives the node configuration (`Msg1`), the incoming request (`Msg2`), and options (`Opts`). Based on the result of `is_exempt/3`, it either:
+ * **If Exempt:** Returns the original, parsed list of messages to be executed. This list is found in the `body` key of `Msg2`.
+ * **If Not Exempt:** Performs the main preprocessing action, such as rewriting the request to be relayed to another node. This often involves using the raw, unparsed request singleton found in the `request` key of `Msg2`.
+
+## Implementation Example (Pseudo-code)
+
+This pseudo-code illustrates the flow:
+
+```erlang
+-define(DEFAULT_EXEMPT_ROUTES, [
+ % Default routes that should bypass preprocessing
+ #{ <<"template">> => <<"/~meta@1.0/.*">> },
+ #{ <<"template">> => <<"/~greenzone@1.0/.*">> }
+ % ... other default exempt routes
+]).
+
+%% @doc Check if a request is exempt from preprocessing.
+is_exempt(Msg1, Msg2, Opts) ->
+ case ao.get(<<"is-exempt">>, Msg1, Opts) of
+ not_found ->
+ % No explicit is-exempt message, check against exempt-routes
+ ExemptRoutes =
+ hb_opts:get(
+ exempt_routes,
+ ?DEFAULT_EXEMPT_ROUTES,
+ Msg1
+ ),
+ Req = hb_ao:get(<<"request">>, Msg2, Opts),
+ {_, Matches} =
+ dev_router:match(
+ #{ <<"routes">> => ExemptRoutes },
+ Req,
+ Msg1 % Use NodeMsg (Msg1) for Opts context if needed
+ ),
+ case Matches of
+ no_matching_route -> {ok, false}; % Not exempt
+ _ -> {ok, true} % Exempt
+ end;
+ IsExemptMsg ->
+ % Resolve the custom is-exempt message
+ ao.resolve(IsExemptMsg, Msg2, Opts)
+ end.
+
+%% @doc Preprocess an incoming request.
+preprocess(Msg1, Msg2, Opts) ->
+ case is_exempt(Msg1, Msg2, Opts) of
+ {ok, true} ->
+ % Request is exempt. Return the original parsed message list.
+ % IMPORTANT: Use the 'body' key from Msg2.
+ {ok, hb_ao:get(<<"body">>, Msg2, Opts)};
+ {ok, false} ->
+ % Request is not exempt. Perform preprocessing (e.g., relay).
+ % IMPORTANT: Use the 'request' key from Msg2 for the raw singleton.
+ {ok,
+ [
+ #{ <<"device">> => <<"relay@1.0">> }, % Example: Relay device
+ #{
+ <<"path">> => <<"call">>,
+ <<"target">> => <<"body">>, % Target the 'body' of the relay message
+ <<"body">> =>
+ % Get the raw request singleton
+ hb_ao:get(<<"request">>, Msg2, Opts#{ hashpath => ignore })
+ }
+ ]
+ };
+ {error, Reason} ->
+ % Handle errors from is_exempt resolution
+ {error, Reason}
+ end.
+```
+
+## Key Considerations
+
+* **`body` vs. `request`:** The preprocessor receives the incoming request in two forms within `Msg2`:
+ * `body`: A **parsed list** of AO messages that represent the steps to be executed. Use this when you want to return the original execution plan (i.e., when exempting).
+ * `request`: The **raw, unparsed TABM singleton** message sent by the user. Use this when you need the original message structure, for example, to forward it unmodified in a relay request.
+* **`dev_router:match/3`:** This function is used to match a request (`Req`) against a list of route templates (`Routes`). It's borrowed from the routing logic but is useful here for checking path-based exemptions.
+* **Configuration:** The `exempt-routes` and the optional `is-exempt` message should be configured in the node's options (accessible via `Msg1` or `Opts`).
+* **Error Handling:** Ensure proper error handling, especially when resolving the `is-exempt` message.
+
+By following this pattern, you can create flexible preprocessors that selectively apply logic based on configurable rules and request paths.
\ No newline at end of file
diff --git a/docs/misc/community/contributing-docs.md b/docs/misc/community/contributing-docs.md
new file mode 100644
index 000000000..3565bbd98
--- /dev/null
+++ b/docs/misc/community/contributing-docs.md
@@ -0,0 +1,100 @@
+# Contributing Documentation
+
+This guide explains how to contribute documentation to the HyperBEAM project. Following these steps will help ensure your documentation is properly integrated into the official documentation.
+
+## Overview
+
+The HyperBEAM documentation is built using [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme. All documentation is written in Markdown and organized into logical sections within the `docs/` directory.
+
+## Contribution Process
+
+### 1. Fork the Repository
+
+First, fork the [HyperBEAM repository](https://github.com/permaweb/HyperBEAM) to your GitHub account.
+
+### 2. Choose the Right Location
+
+Review the existing documentation structure in `./docs/` to determine the appropriate location for your content. The documentation is organized into several main sections:
+
+- `overview/`: High-level concepts and architecture
+- `installation-core/`: Setup and configuration guides
+- `components/`: Detailed component documentation
+- `usage/`: Tutorials and usage guides
+- `resources/`: Reference materials and source code documentation
+- `community/`: Contribution guidelines and community resources
+
+### 3. Create Your Documentation
+
+Create a new Markdown file (`.md`) in the appropriate directory. Follow these guidelines:
+
+- Use proper Markdown syntax
+- Include clear headings and subheadings
+- Add code blocks with appropriate language specification
+- Link to related documentation
+- For images:
+ - Upload images to Arweave using [ArDrive](https://ardrive.io/) or your preferred Arweave upload method
+ - Reference images using their Arweave transaction ID (txid) in the format: `https://arweave.net/`
+ - Example: ``
+- Follow the existing documentation style and format
+
+### 4. Update the Navigation
+
+Edit `mkdocs.yml` to add your documentation to the navigation:
+
+1. Open `mkdocs.yml`
+2. Find the appropriate section under the `nav:` configuration
+3. Add your entry following the existing indentation and format
+4. Ensure the path to your documentation is correct
+
+### 5. Test Your Changes
+
+Set up a local development environment to test your changes:
+
+```bash
+# Create and activate a virtual environment
+python3 -m venv venv
+source venv/bin/activate # (macOS/Linux) On Windows use `venv\Scripts\activate`
+
+# Install required packages
+pip3 install mkdocs mkdocs-material
+
+# Run the build script
+./docs/build-all.sh
+
+# Start a local server
+cd mkdocs-site
+python3 -m http.server 8000
+```
+
+View your documentation at `http://127.0.0.1:8000/` to ensure everything renders correctly.
+
+### 6. Submit a Pull Request
+
+When your documentation is ready:
+
+1. Create a new branch for your changes
+2. Commit your changes with a descriptive message
+3. Submit a PR with:
+ - A clear title describing the documentation addition
+ - A detailed description explaining:
+ - The purpose of the new documentation
+ - Why it should be added to the official docs
+ - Any related issues or discussions
+ - Screenshots of the rendered documentation (if applicable)
+
+### 7. Review Process
+
+The HyperBEAM team will review your PR and may request changes. Be prepared to:
+
+- Address any feedback
+- Make necessary adjustments
+- Respond to questions about your contribution
+
+Once approved, your documentation will be merged into the main repository.
+
+## Additional Resources
+
+- [Community Guidelines](./guidelines.md)
+- [Development Setup](./setup.md)
+- [MkDocs Documentation](https://www.mkdocs.org/)
+- [Material for MkDocs Documentation](https://squidfunk.github.io/mkdocs-material/)
\ No newline at end of file
diff --git a/docs/contribute/guidelines.md b/docs/misc/community/guidelines.md
similarity index 100%
rename from docs/contribute/guidelines.md
rename to docs/misc/community/guidelines.md
diff --git a/docs/contribute/setup.md b/docs/misc/community/setup.md
similarity index 100%
rename from docs/contribute/setup.md
rename to docs/misc/community/setup.md
diff --git a/docs/compute-unit/configuration.md b/docs/misc/components-compute-unit/configuration.md
similarity index 100%
rename from docs/compute-unit/configuration.md
rename to docs/misc/components-compute-unit/configuration.md
diff --git a/docs/compute-unit/index.md b/docs/misc/components-compute-unit/index.md
similarity index 92%
rename from docs/compute-unit/index.md
rename to docs/misc/components-compute-unit/index.md
index ad6b12247..024e2bbac 100644
--- a/docs/compute-unit/index.md
+++ b/docs/misc/components-compute-unit/index.md
@@ -25,12 +25,12 @@ This architecture separates business logic from external interfaces, making the
## Project Structure
- **domain**: Contains all business logic and public APIs
- - **api**: Implements public interfaces
- - **lib**: Contains business logic components
- - **dal.js**: Defines contracts for driven adapters
+ - **api**: Implements public interfaces
+ - **lib**: Contains business logic components
+ - **dal.js**: Defines contracts for driven adapters
- **effects**: Contains implementations of external interfaces
- - **ao-http**: Exposes the HTTP API consumed by other ao units
+ - **ao-http**: Exposes the HTTP API consumed by other ao units
## Technical Requirements
diff --git a/docs/compute-unit/setup.md b/docs/misc/components-compute-unit/setup.md
similarity index 100%
rename from docs/compute-unit/setup.md
rename to docs/misc/components-compute-unit/setup.md
diff --git a/docs/misc/getting-started-hyperpaths/index.md b/docs/misc/getting-started-hyperpaths/index.md
new file mode 100644
index 000000000..ced7c14f3
--- /dev/null
+++ b/docs/misc/getting-started-hyperpaths/index.md
@@ -0,0 +1,28 @@
+# HyperPATHs
+
+## Overview
+
+HyperPATHs provides a comprehensive set of HTTP endpoints for interacting with HyperBEAM nodes and accessing process data. This section covers various ways to extract value and interact with HyperBEAM through HTTP requests.
+
+## Key Concepts
+
+- **HTTP Endpoints**: Access process data and node information through standardized HTTP endpoints
+- **Cryptographic Signatures**: All responses are cryptographically signed for data integrity
+- **State Management**: Various methods for accessing and updating process states
+- **Node Interaction**: Tools for interacting with HyperBEAM nodes
+
+## Best Practices
+
+1. Always verify cryptographic signatures on responses
+2. Use appropriate caching strategies for frequently accessed data
+3. Implement proper error handling for network requests
+4. Consider rate limits and performance implications
+5. Keep sensitive data secure and use appropriate authentication methods
+
+## Common Use Cases
+
+- Real-time process state monitoring
+- Data synchronization between processes
+- Building web interfaces for AO processes
+- Automated process management and monitoring
+- Cross-process communication and data sharing
\ No newline at end of file
diff --git a/docs/guides/index.md b/docs/misc/index.md
similarity index 100%
rename from docs/guides/index.md
rename to docs/misc/index.md
diff --git a/docs/hyperbeam/api.md b/docs/misc/installation-core/hyperbeam-setup-config/api.md
similarity index 99%
rename from docs/hyperbeam/api.md
rename to docs/misc/installation-core/hyperbeam-setup-config/api.md
index aa0a26c89..85c7e8f0f 100644
--- a/docs/hyperbeam/api.md
+++ b/docs/misc/installation-core/hyperbeam-setup-config/api.md
@@ -1,4 +1,4 @@
-
+- Large messages may be rejected depending on node configuration
\ No newline at end of file
diff --git a/docs/hyperbeam/configuration.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration.md
similarity index 73%
rename from docs/hyperbeam/configuration.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration.md
index f6369bea0..93c452d42 100644
--- a/docs/hyperbeam/configuration.md
+++ b/docs/misc/installation-core/hyperbeam-setup-config/configuration.md
@@ -17,12 +17,11 @@ HyperBEAM is a highly configurable node runtime for decentralized applications.
For detailed information about specific aspects of HyperBEAM configuration, please refer to the following documentation:
-- [Configuration Methods](configuration-methods.md) - Different ways to configure HyperBEAM
-- [Configuration Options](configuration-options.md) - Complete reference of all configuration options
-- [Storage Configuration](storage-configuration.md) - Setting up file systems, RocksDB, and other storage backends
-- [Routing Configuration](routing-configuration.md) - Configuring request routing and connectivity
-- [Configuration Examples](configuration-examples.md) - Common deployment scenarios and sample configurations
-- [Configuration Troubleshooting](configuration-troubleshooting.md) - Solving common configuration issues
+- [Configuration Methods](./configuration/configuration-methods.md) - Different ways to configure HyperBEAM
+- [Configuration Options](./configuration/configuration-options.md) - Complete reference of all configuration options
+- [Storage Configuration](./configuration/storage-configuration.md) - Setting up file systems, RocksDB, and other storage backends
+- [Routing Configuration](./configuration/routing-configuration.md) - Configuring request routing and connectivity
+- [Configuration Examples](./configuration/configuration-examples.md) - Common deployment scenarios and sample configurations
## Getting Started
diff --git a/docs/hyperbeam/configuration/configuration-examples.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration/configuration-examples.md
similarity index 100%
rename from docs/hyperbeam/configuration/configuration-examples.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration/configuration-examples.md
diff --git a/docs/hyperbeam/configuration/configuration-methods.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration/configuration-methods.md
similarity index 100%
rename from docs/hyperbeam/configuration/configuration-methods.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration/configuration-methods.md
diff --git a/docs/hyperbeam/configuration/configuration-options.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration/configuration-options.md
similarity index 100%
rename from docs/hyperbeam/configuration/configuration-options.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration/configuration-options.md
diff --git a/docs/hyperbeam/configuration/index.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration/index.md
similarity index 100%
rename from docs/hyperbeam/configuration/index.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration/index.md
diff --git a/docs/hyperbeam/configuration/routing-configuration.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration/routing-configuration.md
similarity index 100%
rename from docs/hyperbeam/configuration/routing-configuration.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration/routing-configuration.md
diff --git a/docs/hyperbeam/configuration/storage-configuration.md b/docs/misc/installation-core/hyperbeam-setup-config/configuration/storage-configuration.md
similarity index 100%
rename from docs/hyperbeam/configuration/storage-configuration.md
rename to docs/misc/installation-core/hyperbeam-setup-config/configuration/storage-configuration.md
diff --git a/docs/hyperbeam/index.md b/docs/misc/installation-core/hyperbeam-setup-config/index.md
similarity index 100%
rename from docs/hyperbeam/index.md
rename to docs/misc/installation-core/hyperbeam-setup-config/index.md
diff --git a/docs/hyperbeam/setup.md b/docs/misc/installation-core/hyperbeam-setup-config/setup.md
similarity index 52%
rename from docs/hyperbeam/setup.md
rename to docs/misc/installation-core/hyperbeam-setup-config/setup.md
index ea53e7b52..5d497fe90 100644
--- a/docs/hyperbeam/setup.md
+++ b/docs/misc/installation-core/hyperbeam-setup-config/setup.md
@@ -56,21 +56,62 @@ curl http://localhost:10000/~meta@1.0/info
```
If you receive a response with node information, your HyperBEAM installation is working properly.
-## **4. Run HyperBEAM with Mainnet**
+## **4. Create and Run a HyperBEAM Release**
-To start HyperBEAM connected to the mainnet, you can use the `--eval` option with rebar3:
+For a more stable setup, especially when connecting to networks like mainnet or using specific features, it's recommended to create a release.
+### **a. Configure Your Node**
+
+HyperBEAM uses a `config.flat` file for configuration when running as a release. A sample file is included in the repository.
+
+1. Locate the `config.flat` file in the root of the HyperBEAM project directory.
+2. Edit the file to specify your desired settings. For example, to set the port and specify your wallet key file:
+
+ ```
+ port: 10001
+ priv_key_location: /path/to/your/wallet.json
+ # Add other configurations as needed
+ ```
+ Ensure the `priv_key_location` points to the correct path of your Arweave wallet key file.
+
+### **b. Build the Release (with Optional Profiles)**
+
+You can build a standard release or include specific profiles for additional features (like `genesis_wasm`, `rocksdb`, `http3`).
+
+To build a standard release:
```bash
-rebar3 shell --eval "hb:start_mainnet(#{ port => 10001, priv_key_location => <<\"./wallet.json\">>})."
+rebar3 release
```
-To verify that your HyperBEAM node is running correctly, you can check:
+To build a release with specific profiles (e.g., `rocksdb`):
+```bash
+rebar3 as rocksdb release
+```
+
+This command creates a self-contained release package in the `_build/default/rel/hb` directory.
+
+### **c. Run the Release**
+
+Navigate to the release directory and start the HyperBEAM node:
+
+```bash
+cd _build/default/rel/hb
+./bin/hb console
+```
+Replace `console` with `start` to run it in the background.
+
+!!! note "Stopping the Node"
+ To stop a HyperBEAM node started with `./bin/hb start`, run `./bin/hb stop` from the release directory (`_build/default/rel/hb`). If started with `./bin/hb console`, press `Ctrl+C` in the terminal to stop it.
+
+### **d. Verify the Release Node**
+
+Once the node is running, verify it by checking the meta device info endpoint. Use the port you specified in your `config.flat` (e.g., 10001):
```bash
curl http://localhost:10001/~meta@1.0/info
```
-If you receive a response with node information, your HyperBEAM installation is working properly.
+If you receive a response with node information, your HyperBEAM release is configured and running correctly.
## **Next Steps**
diff --git a/docs/hyperbeam/testing.md b/docs/misc/installation-core/hyperbeam-setup-config/testing.md
similarity index 100%
rename from docs/hyperbeam/testing.md
rename to docs/misc/installation-core/hyperbeam-setup-config/testing.md
diff --git a/docs/getting-started/index.md b/docs/misc/installation-core/system-dependencies/index.md
similarity index 100%
rename from docs/getting-started/index.md
rename to docs/misc/installation-core/system-dependencies/index.md
diff --git a/docs/getting-started/installation/dependencies.md b/docs/misc/installation-core/system-dependencies/installation/dependencies.md
similarity index 100%
rename from docs/getting-started/installation/dependencies.md
rename to docs/misc/installation-core/system-dependencies/installation/dependencies.md
diff --git a/docs/getting-started/installation/erlang.md b/docs/misc/installation-core/system-dependencies/installation/erlang.md
similarity index 100%
rename from docs/getting-started/installation/erlang.md
rename to docs/misc/installation-core/system-dependencies/installation/erlang.md
diff --git a/docs/getting-started/installation/index.md b/docs/misc/installation-core/system-dependencies/installation/index.md
similarity index 100%
rename from docs/getting-started/installation/index.md
rename to docs/misc/installation-core/system-dependencies/installation/index.md
diff --git a/docs/getting-started/installation/nodejs.md b/docs/misc/installation-core/system-dependencies/installation/nodejs.md
similarity index 100%
rename from docs/getting-started/installation/nodejs.md
rename to docs/misc/installation-core/system-dependencies/installation/nodejs.md
diff --git a/docs/getting-started/installation/rebar3.md b/docs/misc/installation-core/system-dependencies/installation/rebar3.md
similarity index 100%
rename from docs/getting-started/installation/rebar3.md
rename to docs/misc/installation-core/system-dependencies/installation/rebar3.md
diff --git a/docs/getting-started/installation/rust.md b/docs/misc/installation-core/system-dependencies/installation/rust.md
similarity index 100%
rename from docs/getting-started/installation/rust.md
rename to docs/misc/installation-core/system-dependencies/installation/rust.md
diff --git a/docs/getting-started/requirements.md b/docs/misc/installation-core/system-dependencies/requirements.md
similarity index 100%
rename from docs/getting-started/requirements.md
rename to docs/misc/installation-core/system-dependencies/requirements.md
diff --git a/docs/guides/js-client-guide.md b/docs/misc/js-client-guide.md
similarity index 100%
rename from docs/guides/js-client-guide.md
rename to docs/misc/js-client-guide.md
diff --git a/docs/misc/overview/index.md b/docs/misc/overview/index.md
new file mode 100644
index 000000000..554af770f
--- /dev/null
+++ b/docs/misc/overview/index.md
@@ -0,0 +1,60 @@
+!!! warning "Platform Support"
+ This documentation is currently written specifically for **Ubuntu 22.04**. Support for macOS and other platforms will be added in future updates.
+
+## Overview
+
+HyperBEAM is a client implementation of the AO-Core protocol, written in Erlang. It enables a decentralized computing platform where programs run as independent processes, communicate via asynchronous message passing, and operate across a distributed network of nodes.
+
+For detailed technical information about HyperBEAM's architecture and functionality, see the [HyperBEAM Overview](hyperbeam/index.md).
+
+### What is AO-Core?
+
+AO-Core is a protocol built to enable decentralized computations, offering a series of universal primitives. Instead of enforcing a single, monolithic architecture, AO-Core provides a framework into which any number of different computational models, encapsulated as primitive and composable devices, can be attached.
+
+AO-Core's protocol is built upon the following primitives:
+
+- **Hashpaths**: A mechanism for referencing locations in a program's state-space prior to execution
+- **Unified data structure**: For representing program states as HTTP documents
+- **Attestation protocol**: For expressing attestations of states found at particular hashpaths
+- **Meta-VM**: Allowing various state transformation programs (virtual machines and computational models, expressed in the form of devices) to be executed inside the AO-Core protocol
+
+## Installation Process Overview
+
+Setting up HyperBEAM involves several steps:
+
+1. **Check System Requirements** - Ensure your hardware and operating system meet the [minimum requirements](getting-started/requirements.md).
+2. **Install System Dependencies** - Set up the necessary system packages via the [Installation Guide](getting-started/installation/index.md).
+3. **Setup & Configure HyperBEAM** - Clone, Compile, Configure, and Run [HyperBEAM itself](hyperbeam/setup.md).
+4. **Setup & Configure the Compute Unit** - Clone, Compile, Configure, and Run the [Local Compute Unit](compute-unit/setup.md).
+5. **(Optional) Verify Installation** - Follow guides to ensure everything is working. (We might need to create or link to a verification guide here, e.g., `guides/verification.md`)
+
+### Before You Begin
+
+Before starting the installation process, make sure to:
+
+- Have access to a terminal/command line with administrative privileges.
+- Have a stable internet connection for downloading packages.
+- Allocate sufficient time (approximately 30-60 minutes for a complete setup).
+- Review the [System Requirements](getting-started/requirements.md) first.
+
+## Documentation Structure
+
+This documentation is organized into the following main sections accessible via the top navigation:
+
+- **[Home](.)**: This page - overview and starting points.
+- **[Installation & Core](getting-started/installation/index.md)**: Detailed steps for system dependencies and HyperBEAM setup/configuration.
+- **[Components](compute-unit/index.md)**: Information on related components like the Compute Unit and TEE.
+- **[Usage](guides/index.md)**: Practical guides and examples for using HyperBEAM.
+- **[Resources](source-code-docs/index.md)**: Source code documentation and reference materials (Troubleshooting, Glossary, FAQ).
+- **[Community](contribute/guidelines.md)**: How to contribute and get involved.
+
+## Community and Support
+
+- **GitHub HyperBEAM**: [permaweb/HyperBEAM](https://github.com/permaweb/HyperBEAM)
+- **Github Local CU**: [permaweb/local-cu](https://github.com/permaweb/local-cu)
+- **Discord**: [Join the community](https://discord.gg/V3yjzrBxPM)
+- **Issues**: [File a bug report](https://github.com/permaweb/HyperBEAM/issues)
+
+## License
+
+HyperBEAM is open-source software licensed under the [MIT License](https://github.com/permaweb/HyperBEAM/blob/main/LICENSE.md).
diff --git a/docs/misc/setting-up-selecting-devices.md b/docs/misc/setting-up-selecting-devices.md
index 5fe4f6414..9268ad0c3 100644
--- a/docs/misc/setting-up-selecting-devices.md
+++ b/docs/misc/setting-up-selecting-devices.md
@@ -137,8 +137,8 @@ PoDA:
Core payment framework that works with pricing and ledger devices. It requires the following node message settings:
-- `p4_pricing_device`: Estimates request cost
-- `p4_ledger_device`: Acts as payment ledger
+- `p4_pricing-device`: Estimates request cost
+- `p4_ledger-device`: Acts as payment ledger
### ~simple-pay@1.0
@@ -239,8 +239,8 @@ For a node intended for personal use only:
rebar3 shell --eval "hb:start_mainnet(#{
port => 9001,
key_location => 'path/to/my/wallet.key',
- p4_pricing_device => '~faff@1.0',
- p4_ledger_device => '~faff@1.0',
+ p4_pricing-device => '~faff@1.0',
+ p4_ledger-device => '~faff@1.0',
faff_allow_list => ['my-wallet-address']
})."
@@ -255,8 +255,8 @@ For a node offering computation services to the network:
rebar3 shell --eval "hb:start_mainnet(#{
port => 9001,
key_location => 'path/to/my/wallet.key',
- p4_pricing_device => '~simple-pay@1.0',
- p4_ledger_device => '~simple-pay@1.0',
+ p4_pricing-device => '~simple-pay@1.0',
+ p4_ledger-device => '~simple-pay@1.0',
simple_pay_price => 0.01,
preloaded_devices => ['~wasm64@1.0', '~process@1.0', 'dev_stack', 'dev_scheduler']
})."
@@ -272,8 +272,8 @@ For a node running in a Trusted Execution Environment:
rebar3 shell --eval "hb:start_mainnet(#{
port => 9001,
key_location => 'path/to/my/wallet.key',
- p4_pricing_device => '~simple-pay@1.0',
- p4_ledger_device => '~simple-pay@1.0',
+ p4_pricing-device => '~simple-pay@1.0',
+ p4_ledger-device => '~simple-pay@1.0',
simple_pay_price => 0.05,
preloaded_devices => ['~wasm64@1.0', '~process@1.0', 'dev_stack', 'dev_scheduler', '~snp@1.0']
})."
diff --git a/docs/resources/llms.md b/docs/resources/llms.md
new file mode 100644
index 000000000..c0c120334
--- /dev/null
+++ b/docs/resources/llms.md
@@ -0,0 +1,20 @@
+# LLM Context Files
+
+This section provides access to specially formatted files intended for consumption by Large Language Models (LLMs) to provide context about the HyperBEAM documentation.
+
+1. **[LLM Summary (llms.txt)](../llms.txt)**
+ * **Content**: Contains a brief summary of the HyperBEAM documentation structure and a list of relative file paths for all markdown documents included in the build.
+ * **Usage**: Useful for providing an LLM with a high-level overview and the available navigation routes within the documentation.
+
+2. **[LLM Full Content (llms-full.txt)](../llms-full.txt)**
+ * **Content**: A single text file containing the complete, concatenated content of all markdown documents from the specified documentation directories (`begin`, `run`, `guides`, `devices`, `resources`). Each file's content is clearly demarcated.
+ * **Usage**: Ideal for feeding the entire documentation content into an LLM for comprehensive context, analysis, or question-answering based on the full documentation set.
+
+!!! note "Generation Process"
+ These files are automatically generated by the `docs/build-all.sh` script during the documentation build process. They consolidate information from the following directories:
+
+ * `docs/begin`
+ * `docs/run`
+ * `docs/guides`
+ * `docs/devices`
+ * `docs/resources`
diff --git a/docs/reference/faq.md b/docs/resources/reference/faq.md
similarity index 85%
rename from docs/reference/faq.md
rename to docs/resources/reference/faq.md
index 621667639..af7bd79b3 100644
--- a/docs/reference/faq.md
+++ b/docs/resources/reference/faq.md
@@ -25,7 +25,11 @@ You can build a wide range of applications, including:
### Is HyperBEAM open source?
-Yes, HyperBEAM is open-source software licensed under the MIT License.
+Yes, HyperBEAM is open-source software licensed under the Business Source License License.
+
+### What is the current focus or phase of HyperBEAM development?
+
+The initial development phase focuses on integrating AO processes more deeply with HyperBEAM. A key part of this is phasing out the reliance on traditional "dryrun" simulations for reading process state. Instead, processes are encouraged to use the [~patch@1.0 device](../../resources/source-code/dev_patch.md) to expose specific parts of their state directly via HyperPATH GET requests. This allows for more efficient and direct state access, particularly for web interfaces and external integrations. You can learn more about this mechanism in the [Exposing Process State with the Patch Device](../../build/exposing-process-state.md) guide.
## Installation and Setup
diff --git a/docs/reference/glossary.md b/docs/resources/reference/glossary.md
similarity index 85%
rename from docs/reference/glossary.md
rename to docs/resources/reference/glossary.md
index 533e8fe42..cdfe3355e 100644
--- a/docs/reference/glossary.md
+++ b/docs/resources/reference/glossary.md
@@ -2,88 +2,88 @@
This glossary provides definitions for terms and concepts used throughout the HyperBEAM documentation. For a comprehensive glossary of permaweb-specific terminology, check out the [permaweb glossary](#permaweb-glossary) section below.
-### AO-Core Protocol
+## AO-Core Protocol
The underlying protocol that HyperBEAM implements, enabling decentralized computing and communication between nodes. AO-Core provides a framework into which any number of different computational models, encapsulated as primitive devices, can be attached.
-### Asynchronous Message Passing
+## Asynchronous Message Passing
A communication paradigm where senders don't wait for receivers to be ready, allowing for non-blocking operations and better scalability.
-### Checkpoint
+## Checkpoint
A saved state of a process that can be used to resume execution from a known point, used for persistence and recovery.
-### Compute Unit (CU)
+## Compute Unit (CU)
The NodeJS component of HyperBEAM that executes WebAssembly modules and handles computational tasks.
-### Decentralized Execution
+## Decentralized Execution
The ability to run processes across a distributed network without centralized control or coordination.
-### Device
+## Device
A functional unit in HyperBEAM that provides specific capabilities to the system, such as storage, networking, or computational resources.
-### Erlang
+## Erlang
The programming language used to implement the HyperBEAM core, known for its robustness and support for building distributed, fault-tolerant applications.
-### ~flat@1.0
+## ~flat@1.0
A format used for encoding settings files in HyperBEAM configuration, using HTTP header styling.
-### Hashpaths
+## Hashpaths
A mechanism for referencing locations in a program's state-space prior to execution. These state-space links are represented as Merklized lists of programs inputs and initial states.
-### HyperBEAM
+## HyperBEAM
The Erlang-based node software that handles message routing, process management, and device coordination in the HyperBEAM ecosystem.
-### Message
+## Message
A data structure used for communication between processes in the HyperBEAM system. Messages can be interpreted as a binary term or as a collection of named functions (a Map of functions).
-### Module
+## Module
A unit of code that can be loaded and executed by the Compute Unit, typically in WebAssembly format.
-### Node
+## Node
An instance of HyperBEAM running on a physical or virtual machine that participates in the distributed network.
-### ~p4@1.0
+## ~p4@1.0
A device that runs as a pre-processor and post-processor in HyperBEAM, enabling a framework for node operators to sell usage of their machine's hardware to execute AO-Core devices.
-### Process
+## Process
An independent unit of computation in HyperBEAM with its own state and execution context.
-### Process ID
+## Process ID
A unique identifier assigned to a process within the HyperBEAM system.
-### ~scheduler@1.0
+## ~scheduler@1.0
A device used to assign a linear hashpath to an execution, such that all users may access it with a deterministic ordering.
-### ~compute-lite@1.0
+## ~compute-lite@1.0
A lightweight device wrapping a local WASM executor, used for executing legacynet AO processes inside HyperBEAM.
-### ~json-iface@1.0
+## ~json-iface@1.0
A device that offers a translation layer between the JSON-encoded message format used by legacy versions and HyperBEAM's native HTTP message format.
-### ~meta@1.0
+## ~meta@1.0
A device used to configure the node's hardware, supported devices, metering and payments information, amongst other configuration options.
-### ~process@1.0
+## ~process@1.0
A device that enables users to create persistent, shared executions that can be accessed by any number of users, each of whom may add additional inputs to its hashpath.
-### ~relay@1.0
+## ~relay@1.0
A device used to relay messages between nodes and the wider HTTP network. It offers an interface for sending and receiving messages using a variety of execution strategies.
-### ~simple-pay@1.0
+## ~simple-pay@1.0
A simple, flexible pricing device that can be used in conjunction with p4@1.0 to offer flat-fees for the execution of AO-Core messages.
-### ~snp@1.0
+## ~snp@1.0
A device used to generate and validate proofs that a node is executing inside a Trusted Execution Environment (TEE).
-### ~wasm64@1.0
+## ~wasm64@1.0
A device used to execute WebAssembly code, using the Web Assembly Micro-Runtime (WAMR) under-the-hood.
-### ~stack@1.0
+## ~stack@1.0
A device used to execute an ordered set of devices over the same inputs, allowing users to create complex combinations of other devices.
-### Trusted Execution Environment (TEE)
+## Trusted Execution Environment (TEE)
A secure area inside a processor that ensures the confidentiality and integrity of code and data loaded within it. Used in HyperBEAM for trust-minimized computation.
-### WebAssembly (WASM)
+## WebAssembly (WASM)
A binary instruction format that serves as a portable compilation target for programming languages, enabling deployment on the web and other environments.
## Permaweb Glossary
@@ -106,7 +106,7 @@ For a more comprehensive glossary of terms used in the permaweb, try the [Permaw
diff --git a/docs/reference/troubleshooting.md b/docs/resources/reference/troubleshooting.md
similarity index 100%
rename from docs/reference/troubleshooting.md
rename to docs/resources/reference/troubleshooting.md
diff --git a/docs/source-code-docs/README.md b/docs/resources/source-code/README.md
similarity index 90%
rename from docs/source-code-docs/README.md
rename to docs/resources/source-code/README.md
index d2a41bff6..7e15fb008 100644
--- a/docs/source-code-docs/README.md
+++ b/docs/resources/source-code/README.md
@@ -28,15 +28,21 @@
@@ -107,6 +108,12 @@ Generate an address from a public key.
`to_address(PubKey, X2) -> any()`
+
+
+### to_ecdsa_address/1 * ###
+
+`to_ecdsa_address(PubKey) -> any()`
+
### to_rsa_address/1 * ###
diff --git a/docs/source-code-docs/dev_cache.md b/docs/resources/source-code/dev_cache.md
similarity index 98%
rename from docs/source-code-docs/dev_cache.md
rename to docs/resources/source-code/dev_cache.md
index 39e626ec5..f94697515 100644
--- a/docs/source-code-docs/dev_cache.md
+++ b/docs/resources/source-code/dev_cache.md
@@ -1,7 +1,7 @@
+# [Module dev_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cache.erl)
+
-# Module dev_cache #
-* [Description](#description)
A device that looks up an ID from a local store and returns it,
honoring the `accept` key to return the correct format.
diff --git a/docs/source-code-docs/dev_cacheviz.md b/docs/resources/source-code/dev_cacheviz.md
similarity index 91%
rename from docs/source-code-docs/dev_cacheviz.md
rename to docs/resources/source-code/dev_cacheviz.md
index f03060968..60dd68313 100644
--- a/docs/source-code-docs/dev_cacheviz.md
+++ b/docs/resources/source-code/dev_cacheviz.md
@@ -1,7 +1,7 @@
+# [Module dev_cacheviz.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_cacheviz.erl)
+
-# Module dev_cacheviz #
-* [Description](#description)
A device that generates renders (or renderable dot output) of a node's
cache.
diff --git a/docs/source-code-docs/dev_codec_ans104.md b/docs/resources/source-code/dev_codec_ans104.md
similarity index 81%
rename from docs/source-code-docs/dev_codec_ans104.md
rename to docs/resources/source-code/dev_codec_ans104.md
index f6c434b74..a597b00be 100644
--- a/docs/source-code-docs/dev_codec_ans104.md
+++ b/docs/resources/source-code/dev_codec_ans104.md
@@ -1,7 +1,7 @@
+# [Module dev_codec_ans104.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_codec_ans104.erl)
+
-# Module dev_codec_ans104 #
-* [Description](#description)
Codec for managing transformations from `ar_bundles`-style Arweave TX
records to and from TABMs.
@@ -12,7 +12,7 @@ records to and from TABMs.
+add_trusted_node(NodeAddr::binary(), Report::map(), RequesterPubKey::term(), Opts::map()) -> ok
+
+
+
+`NodeAddr`: The joining node's address `Report`: The commitment report provided by the joining node `RequesterPubKey`: The joining node's public key `Opts`: A map of configuration options
+
+returns: ok
+
+Adds a node to the trusted nodes list with its commitment report.
+
+This function updates the trusted nodes configuration:
+1. Retrieves the current trusted nodes map
+2. Adds the new node with its report and public key
+3. Updates the node configuration with the new trusted nodes list
+
+
+
+### become/3 ###
+
+
+
+
+`Opts`: A map of configuration options
+
+returns: `{ok, Map}` on success with confirmation details, or
+`{error, Binary}` if the node is not part of a green zone or
+identity adoption fails.
+
+Clones the identity of a target node in the green zone.
+
+This function performs the following operations:
+1. Retrieves target node location and ID from the configuration
+2. Verifies that the local node has a valid shared AES key
+3. Requests the target node's encrypted key via its key endpoint
+4. Verifies the response is from the expected peer
+5. Decrypts the target node's private key using the shared AES key
+6. Updates the local node's wallet with the target node's identity
+
+Required configuration in Opts map:
+- green_zone_peer_location: Target node's address
+- green_zone_peer_id: Target node's unique identifier
+- priv_green_zone_aes: The shared AES key for the green zone
+
+
+
+### calculate_node_message/3 * ###
+
+`calculate_node_message(RequiredOpts, Req, List) -> any()`
+
+Generate the node message that should be set prior to joining
+a green zone.
+
+This function takes a required opts message, a request message, and an
+`adopt-config` value. The `adopt-config` value can be a boolean, a list of
+fields that should be included in the node message from the request, or a
+binary string of fields to include, separated by commas.
+
+
+
+### decrypt_zone_key/2 * ###
+
+
+
+
+`EncZoneKey`: The encrypted zone AES key (Base64 encoded or binary) `Opts`: A map of configuration options
+
+returns: {ok, DecryptedKey} on success with the decrypted AES key
+
+Decrypts an AES key using the node's RSA private key.
+
+This function handles decryption of the zone key:
+1. Decodes the encrypted key if it's in Base64 format
+2. Extracts the RSA private key components from the wallet
+3. Creates an RSA private key record
+4. Performs private key decryption on the encrypted key
+
+
+
+### default_zone_required_opts/1 * ###
+
+
+
+
+`Opts`: A map of configuration options from which to derive defaults
+
+returns: A map of required configuration options for the green zone
+
+Provides the default required options for a green zone.
+
+This function defines the baseline security requirements for nodes in a green zone:
+1. Restricts loading of remote devices and only allows trusted signers
+2. Limits to preloaded devices from the initiating machine
+3. Enforces specific store configuration
+4. Prevents route changes from the defaults
+5. Requires matching hooks across all peers
+6. Disables message scheduling to prevent conflicts
+7. Enforces a permanent state to prevent further configuration changes
+
+
+
+### encrypt_payload/2 * ###
+
+
+
+
+`AESKey`: The shared AES key (256-bit binary) `RequesterPubKey`: The node's public RSA key
+
+returns: The encrypted AES key
+
+Encrypts an AES key with a node's RSA public key.
+
+This function securely encrypts the shared key for transmission:
+1. Extracts the RSA public key components
+2. Creates an RSA public key record
+3. Performs public key encryption on the AES key
+
+
+
+### finalize_become/5 * ###
+
+`finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> any()`
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Controls which functions are exposed via the device API.
+
+This function defines the security boundary for the green zone device by
+explicitly listing which functions are available through the API.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+Provides information about the green zone device and its API.
+
+This function returns detailed documentation about the device, including:
+1. A high-level description of the device's purpose
+2. Version information
+3. Available API endpoints with their parameters and descriptions
+
+
+
+### init/3 ###
+
+
+
+
+`Opts`: A map of configuration options
+
+returns: `{ok, Binary}` on success with confirmation message, or
+`{error, Binary}` on failure with error message.
+
+Initialize the green zone for a node.
+
+This function performs the following operations:
+1. Validates the node's history to ensure this is a valid initialization
+2. Retrieves or creates a required configuration for the green zone
+3. Ensures a wallet (keypair) exists or creates a new one
+4. Generates a new 256-bit AES key for secure communication
+5. Updates the node's configuration with these cryptographic identities
+
+Config options in Opts map:
+- green_zone_required_config: (Optional) Custom configuration requirements
+- priv_wallet: (Optional) Existing wallet to use instead of creating a new one
+- priv_green_zone_aes: (Optional) Existing AES key, if already part of a zone
+
+
+
+### join/3 ###
+
+
+
+
+`M1`: The join request message with target peer information `M2`: Additional request details, may include adoption preferences `Opts`: A map of configuration options for join operations
+
+returns: `{ok, Map}` on success with join response details, or
+`{error, Binary}` on failure with error message.
+
+Initiates the join process for a node to enter an existing green zone.
+
+This function performs the following operations depending on the state:
+1. Validates the node's history to ensure proper initialization
+2. Checks for target peer information (location and ID)
+3. If target peer is specified:
+a. Generates a commitment report for the peer
+b. Prepares and sends a POST request to the target peer
+c. Verifies the response and decrypts the returned zone key
+d. Updates local configuration with the shared AES key
+4. If no peer is specified, processes the join request locally
+
+Config options in Opts map:
+- green_zone_peer_location: Target peer's address
+- green_zone_peer_id: Target peer's unique identifier
+- green_zone_adopt_config:
+(Optional) Whether to adopt peer's configuration (default: true)
+
+
+
+### join_peer/5 * ###
+
+
+
+
+`PeerLocation`: The target peer's address `PeerID`: The target peer's unique identifier `M2`: May contain ShouldMount flag to enable encrypted volume mounting
+
+returns: `{ok, Map}` on success with confirmation message, or
+`{error, Map|Binary}` on failure with error details
+
+Processes a join request to a specific peer node.
+
+This function handles the client-side join flow when connecting to a peer:
+1. Verifies the node is not already in a green zone
+2. Optionally adopts configuration from the target peer
+3. Generates a hardware-backed commitment report
+4. Sends a POST request to the peer's join endpoint
+5. Verifies the response signature
+6. Decrypts the returned AES key
+7. Updates local configuration with the shared key
+8. Optionally mounts an encrypted volume using the shared key
+
+
+
+### key/3 ###
+
+
+
+
+`Opts`: A map of configuration options
+
+returns: `{ok, Map}` containing the encrypted key and IV on success, or
+`{error, Binary}` if the node is not part of a green zone
+
+Encrypts and provides the node's private key for secure sharing.
+
+This function performs the following operations:
+1. Retrieves the shared AES key and the node's wallet
+2. Verifies that the node is part of a green zone (has a shared AES key)
+3. Generates a random initialization vector (IV) for encryption
+4. Encrypts the node's private key using AES-256-GCM with the shared key
+5. Returns the encrypted key and IV for secure transmission
+
+Required configuration in Opts map:
+- priv_green_zone_aes: The shared AES key for the green zone
+- priv_wallet: The node's wallet containing the private key to encrypt
+
+
+
+### maybe_set_zone_opts/4 * ###
+
+
+
+
+`PeerLocation`: The location of the peer node to join `PeerID`: The ID of the peer node to join `Req`: The request message with adoption preferences `InitOpts`: A map of initial configuration options
+
+returns: `{ok, Map}` with updated configuration on success, or
+`{error, Binary}` if configuration retrieval fails
+
+Adopts configuration from a peer when joining a green zone.
+
+This function handles the conditional adoption of peer configuration:
+1. Checks if adoption is enabled (default: true)
+2. Requests required configuration from the peer
+3. Verifies the authenticity of the configuration
+4. Creates a node message with appropriate settings
+5. Updates the local node configuration
+
+Config options:
+- green_zone_adopt_config: Controls configuration adoption (boolean, list, or binary)
+
+
+
+### rsa_wallet_integration_test/0 * ###
+
+`rsa_wallet_integration_test() -> any()`
+
+Test RSA operations with the existing wallet structure.
+
+This test function verifies that encryption and decryption using the RSA keys
+from the wallet work correctly. It creates a new wallet, encrypts a test
+message with the RSA public key, and then decrypts it with the RSA private
+key, asserting that the decrypted message matches the original.
+
+
+
+### try_mount_encrypted_volume/2 * ###
+
+`try_mount_encrypted_volume(AESKey, Opts) -> any()`
+
+Attempts to mount an encrypted volume using the green zone AES key.
+
+This function handles the complete process of secure storage setup by
+delegating to the dev_volume module, which provides a unified interface
+for volume management.
+
+The encryption key used for the volume is the same AES key used for green zone
+communication, ensuring that only nodes in the green zone can access the data.
+
+
+
+### validate_join/3 * ###
+
+
+
+
+`M1`: Ignored parameter `Req`: The join request containing commitment report and public key `Opts`: A map of configuration options
+
+returns: `{ok, Map}` on success with encrypted AES key, or
+`{error, Binary}` on failure with error message
+
+Validates an incoming join request from another node.
+
+This function handles the server-side join flow when receiving a connection
+request:
+1. Validates the peer's configuration meets required standards
+2. Extracts the commitment report and public key from the request
+3. Verifies the hardware-backed commitment report
+4. Adds the joining node to the trusted nodes list
+5. Encrypts the shared AES key with the peer's public key
+6. Returns the encrypted key to the requesting node
+
+
+
+### validate_peer_opts/2 * ###
+
+
+
+
+`Req`: The request message containing the peer's configuration `Opts`: A map of the local node's configuration options
+
+returns: true if the peer's configuration is valid, false otherwise
+
+Validates that a peer's configuration matches required options.
+
+This function ensures the peer node meets configuration requirements:
+1. Retrieves the local node's required configuration
+2. Gets the peer's options from its message
+3. Adds required configuration to peer's required options list
+4. Verifies the peer's node history is valid
+5. Checks that the peer's options match the required configuration
+
diff --git a/docs/resources/source-code/dev_hook.md b/docs/resources/source-code/dev_hook.md
new file mode 100644
index 000000000..477df1e5f
--- /dev/null
+++ b/docs/resources/source-code/dev_hook.md
@@ -0,0 +1,166 @@
+# [Module dev_hook.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hook.erl)
+
+
+
+
+A generalized interface for `hooking` into HyperBEAM nodes.
+
+
+
+## Description ##
+
+This module allows users to define `hooks` that are executed at various
+points in the lifecycle of nodes and message evaluations.
+
+Hooks are maintained in the `node message` options, under the key `on`
+key. Each `hook` may have zero or many `handlers` which their request is
+executed against. A new `handler` of a hook can be registered by simply
+adding a new key to that message. If multiple hooks need to be executed for
+a single event, the key's value can be set to a list of hooks.
+
+`hook`s themselves do not need to be added explicitly. Any device can add
+a hook by simply executing `dev_hook:on(HookName, Req, Opts)`. This
+function is does not affect the hashpath of a message and is not exported on
+the device`s API, such that it is not possible to call it directly with
+AO-Core resolution.
+
+All handlers are expressed in the form of a message, upon which the hook's
+request is evaluated:
+
+AO(HookMsg, Req, Opts) => {Status, Result}
+
+The `Status` and `Result` of the evaluation can be used at the `hook` caller's
+discretion. If multiple handlers are to be executed for a single `hook`, the
+result of each is used as the input to the next, on the assumption that the
+status of the previous is `ok`. If a non-`ok` status is encountered, the
+evaluation is halted and the result is returned to the caller. This means
+that in most cases, hooks take the form of chainable pipelines of functions,
+passing the most pertinent data in the `body` key of both the request and
+result. Hook definitions can also set the `hook/result` key to `ignore`, if
+the result of the execution should be discarded and the prior value (the
+input to the hook) should be used instead. The `hook/commit-request` key can
+also be set to `true` if the request should be committed by the node before
+execution of the hook.
+
+The default HyperBEAM node implements several useful hooks. They include:
+
+start: Executed when the node starts.
+Req/body: The node's initial configuration.
+Result/body: The node's possibly updated configuration.
+request: Executed when a request is received via the HTTP API.
+Req/body: The sequence of messages that the node will evaluate.
+Req/request: The raw, unparsed singleton request.
+Result/body: The sequence of messages that the node will evaluate.
+step: Executed after each message in a sequence has been evaluated.
+Req/body: The result of the evaluation.
+Result/body: The result of the evaluation.
+response: Executed when a response is sent via the HTTP API.
+Req/body: The result of the evaluation.
+Req/request: The raw, unparsed singleton request that was used to
+generate the response.
+Result/body: The message to be sent in response to the request.
+
+Additionally, this module implements a traditional device API, allowing the
+node operator to register hooks to the node and find those that are
+currently active.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### execute_handler/4 * ###
+
+`execute_handler(HookName, Handler, Req, Opts) -> any()`
+
+Execute a single handler
+Handlers are expressed as messages that can be resolved via AO.
+
+
+
+### execute_handlers/4 * ###
+
+`execute_handlers(HookName, Rest, Req, Opts) -> any()`
+
+Execute a list of handlers in sequence.
+The result of each handler is used as input to the next handler.
+If a handler returns a non-ok status, execution is halted.
+
+
+
+### find/2 ###
+
+`find(HookName, Opts) -> any()`
+
+Get all handlers for a specific hook from the node message options.
+Handlers are stored in the `on` key of this message. The `find/2` variant of
+this function only takes a hook name and node message, and is not called
+directly via the device API. Instead it is used by `on/3` and other internal
+functionality to find handlers when necessary. The `find/3` variant can,
+however, be called directly via the device API.
+
+
+
+### find/3 ###
+
+`find(Base, Req, Opts) -> any()`
+
+
+
+### halt_on_error_test/0 * ###
+
+`halt_on_error_test() -> any()`
+
+Test that pipeline execution halts on error
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Device API information
+
+
+
+### multiple_handlers_test/0 * ###
+
+`multiple_handlers_test() -> any()`
+
+Test that multiple handlers form a pipeline
+
+
+
+### no_handlers_test/0 * ###
+
+`no_handlers_test() -> any()`
+
+Test that hooks with no handlers return the original request
+
+
+
+### on/3 ###
+
+`on(HookName, Req, Opts) -> any()`
+
+Execute a named hook with the provided request and options
+This function finds all handlers for the hook and evaluates them in sequence.
+The result of each handler is used as input to the next handler.
+
+
+
+### single_handler_test/0 * ###
+
+`single_handler_test() -> any()`
+
+Test that a single handler is executed correctly
+
diff --git a/docs/source-code-docs/dev_hyperbuddy.md b/docs/resources/source-code/dev_hyperbuddy.md
similarity index 93%
rename from docs/source-code-docs/dev_hyperbuddy.md
rename to docs/resources/source-code/dev_hyperbuddy.md
index ce63bb197..da40fdaaf 100644
--- a/docs/source-code-docs/dev_hyperbuddy.md
+++ b/docs/resources/source-code/dev_hyperbuddy.md
@@ -1,7 +1,7 @@
+# [Module dev_hyperbuddy.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hyperbuddy.erl)
+
-# Module dev_hyperbuddy #
-* [Description](#description)
A device that renders a REPL-like interface for AO-Core via HTML.
diff --git a/docs/source-code-docs/dev_json_iface.md b/docs/resources/source-code/dev_json_iface.md
similarity index 98%
rename from docs/source-code-docs/dev_json_iface.md
rename to docs/resources/source-code/dev_json_iface.md
index 97bb71f99..dca4dd7b4 100644
--- a/docs/source-code-docs/dev_json_iface.md
+++ b/docs/resources/source-code/dev_json_iface.md
@@ -1,7 +1,7 @@
+# [Module dev_json_iface.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_json_iface.erl)
+
-# Module dev_json_iface #
-* [Description](#description)
A device that provides a way for WASM execution to interact with
the HyperBEAM (and AO) systems, using JSON as a shared data representation.
diff --git a/docs/resources/source-code/dev_local_name.md b/docs/resources/source-code/dev_local_name.md
new file mode 100644
index 000000000..c536e6cec
--- /dev/null
+++ b/docs/resources/source-code/dev_local_name.md
@@ -0,0 +1,130 @@
+# [Module dev_local_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_local_name.erl)
+
+
+
+
+A device for registering and looking up local names.
+
+
+
+## Description ##
+This device uses
+the node message to store a local cache of its known names, and the typical
+non-volatile storage of the node message to store the names long-term.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### default_lookup/4 * ###
+
+`default_lookup(Key, X2, Req, Opts) -> any()`
+
+Handle all other requests by delegating to the lookup function.
+
+
+
+### direct_register/2 ###
+
+`direct_register(Req, Opts) -> any()`
+
+Register a name without checking if the caller is an operator. Exported
+for use by other devices, but not publicly available.
+
+
+
+### find_names/1 * ###
+
+`find_names(Opts) -> any()`
+
+Returns a message containing all known names.
+
+
+
+### generate_test_opts/0 * ###
+
+`generate_test_opts() -> any()`
+
+
+
+### http_test/0 * ###
+
+`http_test() -> any()`
+
+
+
+### info/1 ###
+
+`info(Opts) -> any()`
+
+Export only the `lookup` and `register` functions.
+
+
+
+### load_names/1 * ###
+
+`load_names(Opts) -> any()`
+
+Loads all known names from the cache and returns the new `node message`
+with those names loaded into it.
+
+
+
+### lookup/3 ###
+
+`lookup(X1, Req, Opts) -> any()`
+
+Takes a `key` argument and returns the value of the name, if it exists.
+
+
+
+### lookup_opts_name_test/0 * ###
+
+`lookup_opts_name_test() -> any()`
+
+
+
+### no_names_test/0 * ###
+
+`no_names_test() -> any()`
+
+
+
+### register/3 ###
+
+`register(X1, Req, Opts) -> any()`
+
+Takes a `key` and `value` argument and registers the name. The caller
+must be the node operator in order to register a name.
+
+
+
+### register_test/0 * ###
+
+`register_test() -> any()`
+
+
+
+### unauthorized_test/0 * ###
+
+`unauthorized_test() -> any()`
+
+
+
+### update_names/2 * ###
+
+`update_names(LocalNames, Opts) -> any()`
+
+Updates the node message with the new names. Further HTTP requests will
+use this new message, removing the need to look up the names from non-volatile
+storage.
+
diff --git a/docs/source-code-docs/dev_lookup.md b/docs/resources/source-code/dev_lookup.md
similarity index 80%
rename from docs/source-code-docs/dev_lookup.md
rename to docs/resources/source-code/dev_lookup.md
index 634a15a61..6ed34a647 100644
--- a/docs/source-code-docs/dev_lookup.md
+++ b/docs/resources/source-code/dev_lookup.md
@@ -1,7 +1,7 @@
+# [Module dev_lookup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lookup.erl)
+
-# Module dev_lookup #
-* [Description](#description)
A device that looks up an ID from a local store and returns it, honoring
the `accept` key to return the correct format.
@@ -11,7 +11,7 @@ the `accept` key to return the correct format.
## Function Index ##
-
Fetch a resource from the cache using "target" ID extracted from the message.
@@ -48,3 +48,5 @@ the `accept` key to return the correct format.
`read(M1, M2, Opts) -> any()`
+Fetch a resource from the cache using "target" ID extracted from the message
+
diff --git a/docs/resources/source-code/dev_lua.md b/docs/resources/source-code/dev_lua.md
new file mode 100644
index 000000000..9101078aa
--- /dev/null
+++ b/docs/resources/source-code/dev_lua.md
@@ -0,0 +1,305 @@
+# [Module dev_lua.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua.erl)
+
+
+
+
+A device that calls a Lua module upon a request and returns the result.
+
+
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### ao_core_resolution_from_lua_test/0 * ###
+
+`ao_core_resolution_from_lua_test() -> any()`
+
+Run an AO-Core resolution from the Lua environment.
+
+
+
+### ao_core_sandbox_test/0 * ###
+
+`ao_core_sandbox_test() -> any()`
+
+Run an AO-Core resolution from the Lua environment.
+
+
+
+### aos_authority_not_trusted_test/0 * ###
+
+`aos_authority_not_trusted_test() -> any()`
+
+
+
+### aos_process_benchmark_test_/0 * ###
+
+`aos_process_benchmark_test_() -> any()`
+
+Benchmark the performance of Lua executions.
+
+
+
+### compute/4 * ###
+
+`compute(Key, RawBase, Req, Opts) -> any()`
+
+Call the Lua script with the given arguments.
+
+
+
+### decode/1 ###
+
+`decode(EncMsg) -> any()`
+
+Decode a Lua result into a HyperBEAM `structured@1.0` message.
+
+
+
+### decode_params/2 * ###
+
+`decode_params(Rest, State) -> any()`
+
+Decode a list of Lua references, as found in a stack trace, into a
+list of Erlang terms.
+
+
+
+### decode_stacktrace/2 * ###
+
+`decode_stacktrace(StackTrace, State0) -> any()`
+
+Parse a Lua stack trace into a list of messages.
+
+
+
+### decode_stacktrace/3 * ###
+
+`decode_stacktrace(Rest, State, Acc) -> any()`
+
+
+
+### direct_benchmark_test/0 * ###
+
+`direct_benchmark_test() -> any()`
+
+Benchmark the performance of Lua executions.
+
+
+
+### encode/1 ###
+
+`encode(Map) -> any()`
+
+Encode a HyperBEAM `structured@1.0` message into a Lua term.
+
+
+
+### ensure_initialized/3 * ###
+
+`ensure_initialized(Base, Req, Opts) -> any()`
+
+Initialize the Lua VM if it is not already initialized. Optionally takes
+the script as a Binary string. If not provided, the module will be loaded
+from the base message.
+
+
+
+### error_response_test/0 * ###
+
+`error_response_test() -> any()`
+
+
+
+### find_modules/2 * ###
+
+`find_modules(Base, Opts) -> any()`
+
+Find the script in the base message, either by ID or by string.
+
+
+
+### functions/3 ###
+
+`functions(Base, Req, Opts) -> any()`
+
+Return a list of all functions in the Lua environment.
+
+
+
+### generate_lua_process/1 * ###
+
+`generate_lua_process(File) -> any()`
+
+Generate a Lua process message.
+
+
+
+### generate_stack/1 * ###
+
+`generate_stack(File) -> any()`
+
+Generate a stack message for the Lua process.
+
+
+
+### generate_test_message/1 * ###
+
+`generate_test_message(Process) -> any()`
+
+Generate a test message for a Lua process.
+
+
+
+### info/1 ###
+
+`info(Base) -> any()`
+
+All keys that are not directly available in the base message are
+resolved by calling the Lua function in the module of the same name.
+Additionally, we exclude the `keys`, `set`, `encode` and `decode` functions
+which are `message@1.0` core functions, and Lua public utility functions.
+
+
+
+### init/3 ###
+
+`init(Base, Req, Opts) -> any()`
+
+Initialize the device state, loading the script into memory if it is
+a reference.
+
+
+
+### initialize/3 * ###
+
+`initialize(Base, Modules, Opts) -> any()`
+
+Initialize a new Lua state with a given base message and module.
+
+
+
+### invoke_aos_test/0 * ###
+
+`invoke_aos_test() -> any()`
+
+
+
+### invoke_non_compute_key_test/0 * ###
+
+`invoke_non_compute_key_test() -> any()`
+
+Call a non-compute key on a Lua device message and ensure that the
+function of the same name in the script is called.
+
+
+
+### load_modules/2 * ###
+
+`load_modules(Modules, Opts) -> any()`
+
+Load a list of modules for installation into the Lua VM.
+
+
+
+### load_modules/3 * ###
+
+`load_modules(Rest, Opts, Acc) -> any()`
+
+
+
+### load_modules_by_id_test/0 * ###
+
+`load_modules_by_id_test() -> any()`
+
+
+
+### lua_http_hook_test/0 * ###
+
+`lua_http_hook_test() -> any()`
+
+Use a Lua module as a hook on the HTTP server via `~meta@1.0`.
+
+
+
+### multiple_modules_test/0 * ###
+
+`multiple_modules_test() -> any()`
+
+
+
+### normalize/3 ###
+
+`normalize(Base, Req, RawOpts) -> any()`
+
+Restore the Lua state from a snapshot, if it exists.
+
+
+
+### process_response/2 * ###
+
+`process_response(X1, Priv) -> any()`
+
+Process a response to a Luerl invocation. Returns the typical AO-Core
+HyperBEAM response format.
+
+
+
+### pure_lua_process_benchmark_test_/0 * ###
+
+`pure_lua_process_benchmark_test_() -> any()`
+
+
+
+### pure_lua_process_test/0 * ###
+
+`pure_lua_process_test() -> any()`
+
+Call a process whose `execution-device` is set to `lua@5.3a`.
+
+
+
+### sandbox/3 * ###
+
+`sandbox(State, Map, Opts) -> any()`
+
+Sandbox (render inoperable) a set of Lua functions. Each function is
+referred to as if it is a path in AO-Core, with its value being what to
+return to the caller. For example, 'os.exit' would be referred to as
+referred to as `os/exit`. If preferred, a list rather than a map may be
+provided, in which case the functions all return `sandboxed`.
+
+
+
+### sandboxed_failure_test/0 * ###
+
+`sandboxed_failure_test() -> any()`
+
+
+
+### simple_invocation_test/0 * ###
+
+`simple_invocation_test() -> any()`
+
+
+
+### snapshot/3 ###
+
+`snapshot(Base, Req, Opts) -> any()`
+
+Snapshot the Lua state from a live computation. Normalizes its `priv`
+state element, then serializes the state to a binary.
+
diff --git a/docs/resources/source-code/dev_lua_lib.md b/docs/resources/source-code/dev_lua_lib.md
new file mode 100644
index 000000000..016e90a0b
--- /dev/null
+++ b/docs/resources/source-code/dev_lua_lib.md
@@ -0,0 +1,86 @@
+# [Module dev_lua_lib.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_lua_lib.erl)
+
+
+
+
+A module for providing AO library functions to the Lua environment.
+
+
+
+## Description ##
+
+This module contains the implementation of the functions, each by the name
+that should be used in the `ao` table in the Lua environment. Every export
+is imported into the Lua environment.
+
+Each function adheres closely to the Luerl calling convention, adding the
+appropriate node message as a third argument:
+
+fun(Args, State, NodeMsg) -> {ResultTerms, NewState}
+
+As Lua allows for multiple return values, each function returns a list of
+terms to grant to the caller. Matching the tuple convention used by AO-Core,
+the first term is typically the status, and the second term is the result.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### exec_test/2 * ###
+
+`exec_test(State, Function) -> any()`
+
+Generate an EUnit test for a given function.
+
+
+
+### exec_test_/0 * ###
+
+`exec_test_() -> any()`
+
+Main entrypoint for Lua tests.
+
+
+
+### new_state/1 * ###
+
+`new_state(File) -> any()`
+
+Create a new Lua environment for a given script.
+
+
+
+### parse_spec/1 ###
+
+`parse_spec(Str) -> any()`
+
+Parse a string representation of test descriptions received from the
+command line via the `LUA_TESTS` environment variable.
+
+Supported syntax in loose BNF/RegEx:
+
+Definitions := (ModDef,)+
+ModDef := ModName(TestDefs)?
+ModName := ModuleInLUA_SCRIPTS|(FileName[.lua])?
+TestDefs := (:TestDef)+
+TestDef := TestName
+
+File names ending in `.lua` are assumed to be relative paths from the current
+working directory. Module names lacking the `.lua` extension are assumed to
+be modules found in the `LUA_SCRIPTS` environment variable (defaulting to
+`scripts/`).
+
+For example, to run a single test one could call the following:
+
+LUA_TESTS=~/src/LuaScripts/test.yourTest rebar3 lua-tests
+
+To specify that one would like to run all of the tests in the
+`scripts/test.lua` file and two tests from the `scripts/test2.lua` file, the
+user could provide the following test definition:
+
+LUA_TESTS="test,scripts/test2.userTest1|userTest2" rebar3 lua-tests
+
+
+
+### suite/2 * ###
+
+`suite(File, Funcs) -> any()`
+
+Generate an EUnit test suite for a given Lua script. If the `Funcs` is
+the atom `tests` we find all of the global functions in the script, then
+filter for those ending in `_test` in a similar fashion to Eunit.
+
+
+
+### terminates_with/2 * ###
+
+`terminates_with(String, Suffix) -> any()`
+
+Check if a string terminates with a given suffix.
+
diff --git a/docs/source-code-docs/dev_manifest.md b/docs/resources/source-code/dev_manifest.md
similarity index 93%
rename from docs/source-code-docs/dev_manifest.md
rename to docs/resources/source-code/dev_manifest.md
index 4e3ee825b..4ee517450 100644
--- a/docs/source-code-docs/dev_manifest.md
+++ b/docs/resources/source-code/dev_manifest.md
@@ -1,7 +1,7 @@
+# [Module dev_manifest.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_manifest.erl)
+
-# Module dev_manifest #
-* [Description](#description)
An Arweave path manifest resolution device.
diff --git a/docs/source-code-docs/dev_message.md b/docs/resources/source-code/dev_message.md
similarity index 96%
rename from docs/source-code-docs/dev_message.md
rename to docs/resources/source-code/dev_message.md
index 2c2673997..714013bc9 100644
--- a/docs/source-code-docs/dev_message.md
+++ b/docs/resources/source-code/dev_message.md
@@ -1,7 +1,7 @@
+# [Module dev_message.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_message.erl)
+
-# Module dev_message #
-* [Description](#description)
The identity device: For non-reserved keys, it simply returns a key
from the message as it is found in the message's underlying Erlang map.
@@ -22,7 +22,7 @@ implement a case-insensitive key lookup rather than delegating to
maps:get/2.
Return a message with only the relevant commitments for a given request.
@@ -119,6 +119,12 @@ Return the committers of a message that are present in the given request.
`committers(X1, X2, NodeOpts) -> any()`
+
+
+### deep_unset_test/0 * ###
+
+`deep_unset_test() -> any()`
+
### exec_for_commitment/5 * ###
diff --git a/docs/source-code-docs/dev_meta.md b/docs/resources/source-code/dev_meta.md
similarity index 57%
rename from docs/source-code-docs/dev_meta.md
rename to docs/resources/source-code/dev_meta.md
index cf2bc0361..a7f2d5d51 100644
--- a/docs/source-code-docs/dev_meta.md
+++ b/docs/resources/source-code/dev_meta.md
@@ -1,7 +1,7 @@
+# [Module dev_meta.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_meta.erl)
+
-# Module dev_meta #
-* [Description](#description)
The hyperbeam meta device, which is the default entry point
for all messages processed by the machine.
@@ -21,8 +21,9 @@ the AO-Core resolver has returned a result.
Validate that the request is signed by the operator of the node, then
allow them to update the node message.
@@ -56,6 +57,29 @@ Attempt to adopt changes to a node message.
Test that we can set the node message if the request is signed by the
owner of the node.
+
+
+### build/3 ###
+
+`build(X1, X2, NodeMsg) -> any()`
+
+Emits the version number and commit hash of the HyperBEAM node source,
+if available.
+
+We include the short hash separately, as the length of this hash may change in
+the future, depending on the git version/config used to build the node.
+Subsequently, rather than embedding the `git-short-hash-length`, for the
+avoidance of doubt, we include the short hash separately, as well as its long
+hash.
+
+
+
+### buildinfo_test/0 * ###
+
+`buildinfo_test() -> any()`
+
+Test that version information is available and returned correctly.
+
### claim_node_test/0 * ###
@@ -95,7 +119,7 @@ message.
`halt_request_test() -> any()`
-Test that we can halt a request if the preprocessor returns an error.
+Test that we can halt a request if the hook returns an error.
@@ -122,7 +146,7 @@ other messages are routed to the `handle_resolve/2` function.
Handle an AO-Core request, which is a list of messages. We apply
the node's pre-processor to the request first, and then resolve the request
using the node's AO-Core implementation if its response was `ok`.
-After execution, we run the node's `postprocessor` message on the result of
+After execution, we run the node's `response` hook on the result of
the request before returning the result it grants back to the user.
@@ -151,6 +175,21 @@ Get/set the node message. If the request is a `POST`, we check that the
request is signed by the owner of the node. If not, we return the node message
as-is, aside all keys that are private (according to `hb_private`).
+
+
+### is/2 ###
+
+`is(Request, NodeMsg) -> any()`
+
+Check if the request in question is signed by a given `role` on the node.
+The `role` can be one of `operator` or `initiator`.
+
+
+
+### is/3 ###
+
+`is(X1, Request, NodeMsg) -> any()`
+
### maybe_sign/2 * ###
@@ -173,7 +212,7 @@ Get the HTTP status code from a transaction (if it exists).
`modify_request_test() -> any()`
-Test that a preprocessor can modify a request.
+Test that a hook can modify a request.
@@ -191,20 +230,26 @@ Test that a permanent node message cannot be changed.
Test that we can't get the node message if the requested key is private.
-
+
+
+### request_response_hooks_test/0 * ###
+
+`request_response_hooks_test() -> any()`
+
+
-### resolve_processor/5 * ###
+### resolve_hook/4 * ###
-`resolve_processor(PathKey, Processor, Req, Query, NodeMsg) -> any()`
+`resolve_hook(HookName, InitiatingRequest, Body, NodeMsg) -> any()`
-Execute a message from the node message upon the user's request. The
-invocation of the processor provides a request of the following form:
+Execute a hook from the node message upon the user's request. The
+invocation of the hook provides a request of the following form:
```
- /path => preprocess | postprocess
+ /path => request | response
/request => the original request singleton
- /body => list of messages the user wishes to process
+ /body => parsed sequence of messages to process | the execution result
```
diff --git a/docs/source-code-docs/dev_monitor.md b/docs/resources/source-code/dev_monitor.md
similarity index 92%
rename from docs/source-code-docs/dev_monitor.md
rename to docs/resources/source-code/dev_monitor.md
index 1e5309b56..f092fb0a0 100644
--- a/docs/source-code-docs/dev_monitor.md
+++ b/docs/resources/source-code/dev_monitor.md
@@ -1,6 +1,7 @@
+# [Module dev_monitor.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_monitor.erl)
+
-# Module dev_monitor #
diff --git a/docs/source-code-docs/dev_multipass.md b/docs/resources/source-code/dev_multipass.md
similarity index 91%
rename from docs/source-code-docs/dev_multipass.md
rename to docs/resources/source-code/dev_multipass.md
index 57f4780a1..1cf3fec8a 100644
--- a/docs/source-code-docs/dev_multipass.md
+++ b/docs/resources/source-code/dev_multipass.md
@@ -1,7 +1,7 @@
+# [Module dev_multipass.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_multipass.erl)
+
-# Module dev_multipass #
-* [Description](#description)
A device that triggers repass events until a certain counter has been
reached.
diff --git a/docs/resources/source-code/dev_name.md b/docs/resources/source-code/dev_name.md
new file mode 100644
index 000000000..9edd22fca
--- /dev/null
+++ b/docs/resources/source-code/dev_name.md
@@ -0,0 +1,97 @@
+# [Module dev_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_name.erl)
+
+
+
+
+A device for resolving names to their corresponding values, through the
+use of a `resolver` interface.
+
+
+
+## Description ##
+Each `resolver` is a message that can be
+given a `key` and returns an associated value. The device will attempt to
+match the key against each resolver in turn, and return the value of the
+first resolver that matches.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### execute_resolver/3 * ###
+
+`execute_resolver(Key, Resolver, Opts) -> any()`
+
+Execute a resolver with the given key and return its value.
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Configure the `default` key to proxy to the `resolver/4` function.
+Exclude the `keys` and `set` keys from being processed by this device, as
+these are needed to modify the base message itself.
+
+
+
+### load_and_execute_test/0 * ###
+
+`load_and_execute_test() -> any()`
+
+Test that we can resolve messages from a name loaded with the device.
+
+
+
+### match_resolver/3 * ###
+
+`match_resolver(Key, Resolvers, Opts) -> any()`
+
+Find the first resolver that matches the key and return its value.
+
+
+
+### message_lookup_device_resolver/1 * ###
+
+`message_lookup_device_resolver(Msg) -> any()`
+
+
+
+### multiple_resolvers_test/0 * ###
+
+`multiple_resolvers_test() -> any()`
+
+
+
+### no_resolvers_test/0 * ###
+
+`no_resolvers_test() -> any()`
+
+
+
+### resolve/4 * ###
+
+`resolve(Key, X2, Req, Opts) -> any()`
+
+Resolve a name to its corresponding value. The name is given by the key
+called. For example, `GET /~name@1.0/hello&load=false` grants the value of
+`hello`. If the `load` key is set to `true`, the value is treated as a
+pointer and its contents is loaded from the cache. For example,
+`GET /~name@1.0/reference` yields the message at the path specified by the
+`reference` key.
+
+
+
+### single_resolver_test/0 * ###
+
+`single_resolver_test() -> any()`
+
diff --git a/docs/resources/source-code/dev_node_process.md b/docs/resources/source-code/dev_node_process.md
new file mode 100644
index 000000000..70a546cd9
--- /dev/null
+++ b/docs/resources/source-code/dev_node_process.md
@@ -0,0 +1,97 @@
+# [Module dev_node_process.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_node_process.erl)
+
+
+
+
+A device that implements the singleton pattern for processes specific
+to an individual node.
+
+
+
+## Description ##
+
+This device uses the `local-name@1.0` device to
+register processes with names locally, persistenting them across reboots.
+
+Definitions of singleton processes are expected to be found with their
+names in the `node_processes` section of the node message.
+
+## Function Index ##
+
+
+
Spawn a new process according to the process definition found in the
+node message, and register it with the given name.
+
+
+
+
+## Function Details ##
+
+
+
+### augment_definition/2 * ###
+
+`augment_definition(BaseDef, Opts) -> any()`
+
+Augment the given process definition with the node's address.
+
+
+
+### generate_test_opts/0 * ###
+
+`generate_test_opts() -> any()`
+
+Helper function to generate a test environment and its options.
+
+
+
+### generate_test_opts/1 * ###
+
+`generate_test_opts(Defs) -> any()`
+
+
+
+### info/1 ###
+
+`info(Opts) -> any()`
+
+Register a default handler for the device. Inherits `keys` and `set`
+from the default device.
+
+
+
+### lookup/4 * ###
+
+`lookup(Name, Base, Req, Opts) -> any()`
+
+Lookup a process by name.
+
+
+
+### lookup_execute_test/0 * ###
+
+`lookup_execute_test() -> any()`
+
+Test that a process can be spawned, executed upon, and its result retrieved.
+
+
+
+### lookup_no_spawn_test/0 * ###
+
+`lookup_no_spawn_test() -> any()`
+
+
+
+### lookup_spawn_test/0 * ###
+
+`lookup_spawn_test() -> any()`
+
+
+
+### spawn_register/2 * ###
+
+`spawn_register(Name, Opts) -> any()`
+
+Spawn a new process according to the process definition found in the
+node message, and register it with the given name.
+
diff --git a/docs/source-code-docs/dev_p4.md b/docs/resources/source-code/dev_p4.md
similarity index 66%
rename from docs/source-code-docs/dev_p4.md
rename to docs/resources/source-code/dev_p4.md
index 2dba4ac4a..081a4814a 100644
--- a/docs/source-code-docs/dev_p4.md
+++ b/docs/resources/source-code/dev_p4.md
@@ -1,7 +1,7 @@
+# [Module dev_p4.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_p4.erl)
+
-# Module dev_p4 #
-* [Description](#description)
The HyperBEAM core payment ledger.
@@ -16,8 +16,8 @@ the node should fulfil services for users.
The device requires the following node message settings in order to function:
-- `p4_pricing_device`: The device that will estimate the cost of a request.
-- `p4_ledger_device`: The device that will act as a payment ledger.
+- `p4_pricing-device`: The device that will estimate the cost of a request.
+- `p4_ledger-device`: The device that will act as a payment ledger.
The pricing device should implement the following keys:
@@ -33,10 +33,10 @@ key may return `infinity` if the node will not serve a user under any
circumstances. Else, the value returned by the `price` key will be passed to
the ledger device as the `amount` key.
-The ledger device should implement the following keys:
+A ledger device should implement the following keys:
```
-POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&type=pre|post&request=RequestMessage
+POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&request=RequestMessageGET /balance?request=RequestMessage
```
The `type` key is optional and defaults to `pre`. If `type` is set to `post`,
@@ -47,8 +47,8 @@ check whether the debit would succeed before execution.
@@ -80,6 +80,19 @@ Simple test of p4's capabilities with the `faff@1.0` device.
The node operator may elect to make certain routes non-chargable, using
the `routes` syntax also used to declare routes in `router@1.0`.
+
+
+### lua_pricing_test/0 * ###
+
+`lua_pricing_test() -> any()`
+
+Ensure that Lua modules can be used as pricing and ledger devices. Our
+modules come in two parts:
+- A `process` module which is executed as a persistent `local-process` on the
+node, and which maintains the state of the ledger.
+- A `client` module, which is executed as a `p4@1.0` device, marshalling
+requests to the `process` module.
+
### non_chargable_route_test/0 * ###
@@ -88,23 +101,23 @@ the `routes` syntax also used to declare routes in `router@1.0`.
Test that a non-chargable route is not charged for.
-
+
-### postprocess/3 ###
+### request/3 ###
-`postprocess(State, RawResponse, NodeMsg) -> any()`
+`request(State, Raw, NodeMsg) -> any()`
-Postprocess the request after it has been fulfilled.
+Estimate the cost of a transaction and decide whether to proceed with
+a request. The default behavior if `pricing-device` or `p4_balances` are
+not set is to proceed, so it is important that a user initialize them.
-
+
-### preprocess/3 ###
+### response/3 ###
-`preprocess(State, Raw, NodeMsg) -> any()`
+`response(State, RawResponse, NodeMsg) -> any()`
-Estimate the cost of a transaction and decide whether to proceed with
-a request. The default behavior if `pricing_device` or `p4_balances` are
-not set is to proceed, so it is important that a user initialize them.
+Postprocess the request after it has been fulfilled.
diff --git a/docs/resources/source-code/dev_patch.md b/docs/resources/source-code/dev_patch.md
new file mode 100644
index 000000000..92cf5373c
--- /dev/null
+++ b/docs/resources/source-code/dev_patch.md
@@ -0,0 +1,125 @@
+# [Module dev_patch.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_patch.erl)
+
+
+
+
+A device that can be used to reorganize a message: Moving data from
+one path inside it to another.
+
+
+
+## Description ##
+
+This device's function runs in two modes:
+
+1. When using `all` to move all data at the path given in `from` to the
+path given in `to`.
+2. When using `patches` to move all submessages in the source to the target,
+_if_ they have a `method` key of `PATCH` or a `device` key of `patch@1.0`.
+
+Source and destination paths may be prepended by `base:` or `req:` keys to
+indicate that they are relative to either of the message`s that the
+computation is being performed on.
+
+The search order for finding the source and destination keys is as follows,
+where `X` is either `from` or `to`:
+
+1. The `patch-X` key of the execution message.
+2. The `X` key of the execution message.
+3. The `patch-X` key of the request message.
+4. The `X` key of the request message.
+
+Additionally, this device implements the standard computation device keys,
+allowing it to be used as an element of an execution stack pipeline, etc.
+
+## Function Index ##
+
+
+
Find relevant PATCH messages in the given source key of the execution
+and request messages, and apply them to the given destination key of the
+request.
@@ -62,6 +77,8 @@ determine which commitments to add.
`is_user_signed(Tx) -> any()`
+Determines if a user committed
+
### pfiltermap/2 * ###
diff --git a/docs/source-code-docs/dev_process.md b/docs/resources/source-code/dev_process.md
similarity index 99%
rename from docs/source-code-docs/dev_process.md
rename to docs/resources/source-code/dev_process.md
index a6dbf153f..f9a4074ea 100644
--- a/docs/source-code-docs/dev_process.md
+++ b/docs/resources/source-code/dev_process.md
@@ -1,7 +1,7 @@
+# [Module dev_process.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process.erl)
+
-# Module dev_process #
-* [Description](#description)
This module contains the device implementation of AO processes
in AO-Core.
diff --git a/docs/source-code-docs/dev_process_cache.md b/docs/resources/source-code/dev_process_cache.md
similarity index 97%
rename from docs/source-code-docs/dev_process_cache.md
rename to docs/resources/source-code/dev_process_cache.md
index 27f730f86..59f195d20 100644
--- a/docs/source-code-docs/dev_process_cache.md
+++ b/docs/resources/source-code/dev_process_cache.md
@@ -1,7 +1,7 @@
+# [Module dev_process_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process_cache.erl)
+
-# Module dev_process_cache #
-* [Description](#description)
A wrapper around the hb_cache module that provides a more
convenient interface for reading the result of a process at a given slot or
diff --git a/docs/source-code-docs/dev_process_worker.md b/docs/resources/source-code/dev_process_worker.md
similarity index 96%
rename from docs/source-code-docs/dev_process_worker.md
rename to docs/resources/source-code/dev_process_worker.md
index af181ae74..4e34029e3 100644
--- a/docs/source-code-docs/dev_process_worker.md
+++ b/docs/resources/source-code/dev_process_worker.md
@@ -1,7 +1,7 @@
+# [Module dev_process_worker.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_process_worker.erl)
+
-# Module dev_process_worker #
-* [Description](#description)
A long-lived process worker that keeps state in memory between
calls.
diff --git a/docs/resources/source-code/dev_push.md b/docs/resources/source-code/dev_push.md
new file mode 100644
index 000000000..f966c2457
--- /dev/null
+++ b/docs/resources/source-code/dev_push.md
@@ -0,0 +1,196 @@
+# [Module dev_push.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_push.erl)
+
+
+
+
+`push@1.0` takes a message or slot number, evaluates it, and recursively
+pushes the resulting messages to other processes.
+
+
+
+## Description ##
+The `push`ing mechanism
+continues until the there are no remaining messages to push.
+
+## Function Index ##
+
+
+
+
+
+
+
+## Function Details ##
+
+
+
+### additional_keys/3 * ###
+
+`additional_keys(Origin, ToSched, Opts) -> any()`
+
+Set the necessary keys in order for the recipient to know where the
+message came from.
+
+
+
+### do_push/3 * ###
+
+`do_push(Process, Assignment, Opts) -> any()`
+
+Push a message or slot number, including its downstream results.
+
+
+
+### extract/2 * ###
+
+`extract(X1, Raw) -> any()`
+
+Return either the `target` or the `hint`.
+
+
+
+### find_type/2 * ###
+
+`find_type(Req, Opts) -> any()`
+
+
+
+### full_push_test_/0 * ###
+
+`full_push_test_() -> any()`
+
+
+
+### is_async/3 * ###
+
+`is_async(Process, Req, Opts) -> any()`
+
+Determine if the push is asynchronous.
+
+
+
+### multi_process_push_test_/0 * ###
+
+`multi_process_push_test_() -> any()`
+
+
+
+### normalize_message/2 * ###
+
+`normalize_message(MsgToPush, Opts) -> any()`
+
+Augment the message with from-* keys, if it doesn't already have them.
+
+
+
+### parse_redirect/1 * ###
+
+`parse_redirect(Location) -> any()`
+
+
+
+### ping_pong_script/1 * ###
+
+`ping_pong_script(Limit) -> any()`
+
+
+
+### push/3 ###
+
+`push(Base, Req, Opts) -> any()`
+
+Push either a message or an assigned slot number. If a `Process` is
+provided in the `body` of the request, it will be scheduled (initializing
+it if it does not exist). Otherwise, the message specified by the given
+`slot` key will be pushed.
+
+Optional parameters:
+`/result-depth`: The depth to which the full contents of the result
+will be included in the response. Default: 1, returning
+the full result of the first message, but only the 'tree'
+of downstream messages.
+`/push-mode`: Whether or not the push should be done asynchronously.
+Default: `sync`, pushing synchronously.
+
+
+
+### push_prompts_encoding_change_test/0 * ###
+
+`push_prompts_encoding_change_test() -> any()`
+
+
+
+### push_result_message/4 * ###
+
+`push_result_message(TargetProcess, MsgToPush, Origin, Opts) -> any()`
+
+Push a downstream message result. The `Origin` map contains information
+about the origin of the message: The process that originated the message,
+the slot number from which it was sent, and the outbox key of the message,
+and the depth to which downstream results should be included in the message.
+
+
+
+### push_with_mode/3 * ###
+
+`push_with_mode(Process, Req, Opts) -> any()`
+
+
+
+### push_with_redirect_hint_test_disabled/0 * ###
+
+`push_with_redirect_hint_test_disabled() -> any()`
+
+
+
+### remote_schedule_result/3 * ###
+
+`remote_schedule_result(Location, SignedReq, Opts) -> any()`
+
+
+
+### reply_script/0 * ###
+
+`reply_script() -> any()`
+
+
+
+### schedule_initial_message/3 * ###
+
+`schedule_initial_message(Base, Req, Opts) -> any()`
+
+Push a message or a process, prior to pushing the resulting slot number.
+
+
+
+### schedule_result/4 * ###
+
+`schedule_result(TargetProcess, MsgToPush, Origin, Opts) -> any()`
+
+Add the necessary keys to the message to be scheduled, then schedule it.
+If the remote scheduler does not support the given codec, it will be
+downgraded and re-signed.
+
+
+
+### schedule_result/5 * ###
+
+`schedule_result(TargetProcess, MsgToPush, Codec, Origin, Opts) -> any()`
+
+
+
+### split_target/1 * ###
+
+`split_target(RawTarget) -> any()`
+
+Split the target into the process ID and the optional query string.
+
+
+
+### target_process/2 * ###
+
+`target_process(MsgToPush, Opts) -> any()`
+
+Find the target process ID for a message to push.
+
diff --git a/docs/source-code-docs/dev_relay.md b/docs/resources/source-code/dev_relay.md
similarity index 76%
rename from docs/source-code-docs/dev_relay.md
rename to docs/resources/source-code/dev_relay.md
index 054dac672..2828b72eb 100644
--- a/docs/source-code-docs/dev_relay.md
+++ b/docs/resources/source-code/dev_relay.md
@@ -1,7 +1,7 @@
+# [Module dev_relay.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_relay.erl)
+
-# Module dev_relay #
-* [Description](#description)
This module implements the relay device, which is responsible for
relaying messages between nodes and other HTTP(S) endpoints.
@@ -27,7 +27,7 @@ Example usage:
## Function Index ##
-
Test that the preprocess/3 function re-routes a request to remote
peers, according to the node's routing table.
@@ -65,19 +65,19 @@ Defaults to `false`.
Execute a request in the same way as `call/3`, but asynchronously. Always
returns `<<"OK">>`.
-
+
-### preprocess/3 ###
+### request/3 ###
-`preprocess(M1, M2, Opts) -> any()`
+`request(Msg1, Msg2, Opts) -> any()`
Preprocess a request to check if it should be relayed to a different node.
-
+
-### preprocessor_reroute_to_nearest_test/0 * ###
+### request_hook_reroute_to_nearest_test/0 * ###
-`preprocessor_reroute_to_nearest_test() -> any()`
+`request_hook_reroute_to_nearest_test() -> any()`
Test that the `preprocess/3` function re-routes a request to remote
peers, according to the node's routing table.
diff --git a/docs/source-code-docs/dev_router.md b/docs/resources/source-code/dev_router.md
similarity index 53%
rename from docs/source-code-docs/dev_router.md
rename to docs/resources/source-code/dev_router.md
index e2e840c0b..c513e1b3d 100644
--- a/docs/source-code-docs/dev_router.md
+++ b/docs/resources/source-code/dev_router.md
@@ -1,7 +1,7 @@
+# [Module dev_router.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_router.erl)
+
-# Module dev_router #
-* [Description](#description)
A device that routes outbound messages from the node to their
appropriate network recipients via HTTP.
@@ -42,9 +42,13 @@ The structure of the routes should be as follows:
@@ -121,11 +125,39 @@ Implements the load distribution strategies if given a cluster.
`device_call_from_singleton_test() -> any()`
-
+
+
+### dynamic_route_provider_test/0 * ###
+
+`dynamic_route_provider_test() -> any()`
+
+
+
+### dynamic_router_test/0 * ###
+
+`dynamic_router_test() -> any()`
+
+Example of a Lua module being used as the `route_provider` for a
+HyperBEAM node. The module utilized in this example dynamically adjusts the
+likelihood of routing to a given node, depending upon price and performance.
+also include preprocessing support for routing
-### dynamic_routes_provider_test/0 * ###
+
-`dynamic_routes_provider_test() -> any()`
+### dynamic_routing_by_performance/0 * ###
+
+`dynamic_routing_by_performance() -> any()`
+
+
+
+### dynamic_routing_by_performance_test_/0 * ###
+
+`dynamic_routing_by_performance_test_() -> any()`
+
+Demonstrates routing tables being dynamically created and adjusted
+according to the real-time performance of nodes. This test utilizes the
+`dynamic-router` script to manage routes and recalculate weights based on the
+reported performance.
@@ -152,6 +184,14 @@ Calculate the minimum distance between two numbers
(either progressing backwards or forwards), assuming a
256-bit field.
+
+
+### find_target_path/2 * ###
+
+`find_target_path(Msg, Opts) -> any()`
+
+Find the target path to route for a request message.
+
### generate_hashpaths/1 * ###
@@ -170,6 +210,23 @@ Calculate the minimum distance between two numbers
`get_routes_test() -> any()`
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Exported function for getting device info, controls which functions are
+exposed via the device API.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+HTTP info response providing information about this device
+
### load_routes/1 * ###
@@ -180,6 +237,22 @@ Load the current routes for the node. Allows either explicit routes from
the node message's `routes` key, or dynamic routes generated by resolving the
`route_provider` message.
+
+
+### local_dynamic_router_test/0 * ###
+
+`local_dynamic_router_test() -> any()`
+
+Example of a Lua module being used as the `route_provider` for a
+HyperBEAM node. The module utilized in this example dynamically adjusts the
+likelihood of routing to a given node, depending upon price and performance.
+
+
+
+### local_process_route_provider_test/0 * ###
+
+`local_process_route_provider_test() -> any()`
+
### lowest_distance/1 * ###
@@ -194,20 +267,42 @@ Find the node with the lowest distance to the given hashpath.
`lowest_distance(Nodes, X) -> any()`
+
+
+### match/3 ###
+
+`match(Base, Req, Opts) -> any()`
+
+Find the first matching template in a list of known routes. Allows the
+path to be specified by either the explicit `path` (for internal use by this
+module), or `route-path` for use by external devices and users.
+
-### match_routes/3 ###
+### match_routes/3 * ###
`match_routes(ToMatch, Routes, Opts) -> any()`
-Find the first matching template in a list of known routes.
-
### match_routes/4 * ###
`match_routes(ToMatch, Routes, Keys, Opts) -> any()`
+
+
+### preprocess/3 ###
+
+`preprocess(Msg1, Msg2, Opts) -> any()`
+
+Preprocess a request to check if it should be relayed to a different node.
+
+
+
+### register/3 ###
+
+`register(M1, M2, Opts) -> any()`
+
### relay_nearest_test/0 * ###
@@ -252,6 +347,12 @@ function, taking only the request message and the `Opts` map.
`route(X1, Msg, Opts) -> any()`
+
+
+### route_provider_test/0 * ###
+
+`route_provider_test() -> any()`
+
### route_regex_matches_test/0 * ###
@@ -272,12 +373,6 @@ function, taking only the request message and the `Opts` map.
Device function that returns all known routes.
-
-
-### routes_provider_test/0 * ###
-
-`routes_provider_test() -> any()`
-
### simulate/4 * ###
@@ -302,11 +397,11 @@ Device function that returns all known routes.
`strategy_suite_test_() -> any()`
-
+
-### template_matches/2 * ###
+### template_matches/3 * ###
-`template_matches(ToMatch, Template) -> any()`
+`template_matches(ToMatch, Template, Opts) -> any()`
Check if a message matches a message template or path regex.
diff --git a/docs/source-code-docs/dev_scheduler.md b/docs/resources/source-code/dev_scheduler.md
similarity index 86%
rename from docs/source-code-docs/dev_scheduler.md
rename to docs/resources/source-code/dev_scheduler.md
index 2734cf742..e9a1cb10d 100644
--- a/docs/source-code-docs/dev_scheduler.md
+++ b/docs/resources/source-code/dev_scheduler.md
@@ -1,7 +1,7 @@
+# [Module dev_scheduler.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler.erl)
+
-# Module dev_scheduler #
-* [Description](#description)
A simple scheduler scheme for AO.
@@ -28,12 +28,13 @@ Process: `#{ id, Scheduler: #{ Authority } }`
Generate a _transformed_ process message, not as they are generated
@@ -198,6 +199,17 @@ Get the assignments for a process, and whether the request was truncated.
`get_local_schedule_test() -> any()`
+
+
+### get_location/3 * ###
+
+`get_location(Msg1, Req, Opts) -> any()`
+
+Search for the location of the scheduler in the scheduler-location
+cache. If an address is provided, we search for the location of that
+specific scheduler. Otherwise, we return the location record for the current
+node's scheduler, if it has been established.
+
### get_remote_schedule/5 * ###
@@ -316,6 +328,14 @@ we redirect to the remote scheduler or proxy based on the node opts.
This device uses a default_handler to route requests to the correct
function.
+
+
+### location/3 ###
+
+`location(Msg1, Msg2, Opts) -> any()`
+
+Router for `record` requests. Expects either a `POST` or `GET` request.
+
### many_clients/1 * ###
@@ -356,6 +376,15 @@ Get the node URL from a redirect.
`post_legacy_schedule(ProcID, OnlyCommitted, Node, Opts) -> any()`
+
+
+### post_location/3 * ###
+
+`post_location(Msg1, RawReq, Opts) -> any()`
+
+Generate a new scheduler location record and register it. We both send
+the new scheduler-location to the given registry, and return it to the caller.
+
### post_remote_schedule/4 * ###
@@ -392,14 +421,13 @@ Get the assignments for a process.
`redirect_to_hint_test() -> any()`
-
+
-### register/3 ###
+### register_location_on_boot_test/0 * ###
-`register(Msg1, Req, Opts) -> any()`
+`register_location_on_boot_test() -> any()`
-Generate a new scheduler location record and register it. We both send
-the new scheduler-location to the given registry, and return it to the caller.
+Test that a scheduler location is registered on boot.
diff --git a/docs/source-code-docs/dev_scheduler_cache.md b/docs/resources/source-code/dev_scheduler_cache.md
similarity index 91%
rename from docs/source-code-docs/dev_scheduler_cache.md
rename to docs/resources/source-code/dev_scheduler_cache.md
index a4fe1f53b..2ddd9866a 100644
--- a/docs/source-code-docs/dev_scheduler_cache.md
+++ b/docs/resources/source-code/dev_scheduler_cache.md
@@ -1,6 +1,7 @@
+# [Module dev_scheduler_cache.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_cache.erl)
+
-# Module dev_scheduler_cache #
@@ -58,7 +59,7 @@ Write an assignment message into the cache.
### write_location/2 ###
-`write_location(LocationMsg, Opts) -> any()`
+`write_location(LocMsg, Opts) -> any()`
Write the latest known scheduler location for an address.
diff --git a/docs/source-code-docs/dev_scheduler_formats.md b/docs/resources/source-code/dev_scheduler_formats.md
similarity index 97%
rename from docs/source-code-docs/dev_scheduler_formats.md
rename to docs/resources/source-code/dev_scheduler_formats.md
index 0ef6053a9..4c356b752 100644
--- a/docs/source-code-docs/dev_scheduler_formats.md
+++ b/docs/resources/source-code/dev_scheduler_formats.md
@@ -1,7 +1,7 @@
+# [Module dev_scheduler_formats.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_formats.erl)
+
-# Module dev_scheduler_formats #
-* [Description](#description)
This module is used by dev_scheduler in order to produce outputs that
are compatible with various forms of AO clients.
diff --git a/docs/resources/source-code/dev_scheduler_registry.md b/docs/resources/source-code/dev_scheduler_registry.md
new file mode 100644
index 000000000..4415c8ff7
--- /dev/null
+++ b/docs/resources/source-code/dev_scheduler_registry.md
@@ -0,0 +1,95 @@
+# [Module dev_scheduler_registry.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_registry.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
Find a process associated with the processor ID in the local registry
+If the process is not found and GenIfNotHosted is true, it attemps to create a new one.
+
+
+
+
+## Function Details ##
+
+
+
+### create_and_find_process_test/0 * ###
+
+`create_and_find_process_test() -> any()`
+
+
+
+### create_multiple_processes_test/0 * ###
+
+`create_multiple_processes_test() -> any()`
+
+
+
+### find/1 ###
+
+`find(ProcID) -> any()`
+
+Find a process associated with the processor ID in the local registry
+If the process is not found, it will not create a new one
+
+
+
+### find/2 ###
+
+`find(ProcID, GenIfNotHosted) -> any()`
+
+Find a process associated with the processor ID in the local registry
+If the process is not found and `GenIfNotHosted` is true, it attemps to create a new one
+
+
+
+### find/3 ###
+
+`find(ProcID, GenIfNotHosted, Opts) -> any()`
+
+Same as `find/2` but with additional options passed when spawning a new process (if needed)
+
+
+
+### find_non_existent_process_test/0 * ###
+
+`find_non_existent_process_test() -> any()`
+
+
+
+### get_all_processes_test/0 * ###
+
+`get_all_processes_test() -> any()`
+
+
+
+### get_processes/0 ###
+
+`get_processes() -> any()`
+
+Return a list of all currently registered ProcID.
+
+
+
+### get_wallet/0 ###
+
+`get_wallet() -> any()`
+
+
+
+### maybe_new_proc/3 * ###
+
+`maybe_new_proc(ProcID, GenIfNotHosted, Opts) -> any()`
+
+
+
+### start/0 ###
+
+`start() -> any()`
+
diff --git a/docs/source-code-docs/dev_scheduler_server.md b/docs/resources/source-code/dev_scheduler_server.md
similarity index 96%
rename from docs/source-code-docs/dev_scheduler_server.md
rename to docs/resources/source-code/dev_scheduler_server.md
index 0c44aa2bb..ab8f015fd 100644
--- a/docs/source-code-docs/dev_scheduler_server.md
+++ b/docs/resources/source-code/dev_scheduler_server.md
@@ -1,7 +1,7 @@
+# [Module dev_scheduler_server.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_scheduler_server.erl)
+
-# Module dev_scheduler_server #
-* [Description](#description)
A long-lived server that schedules messages for a process.
diff --git a/docs/source-code-docs/dev_simple_pay.md b/docs/resources/source-code/dev_simple_pay.md
similarity index 88%
rename from docs/source-code-docs/dev_simple_pay.md
rename to docs/resources/source-code/dev_simple_pay.md
index ef4da71cb..b65230680 100644
--- a/docs/source-code-docs/dev_simple_pay.md
+++ b/docs/resources/source-code/dev_simple_pay.md
@@ -1,7 +1,7 @@
+# [Module dev_simple_pay.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_simple_pay.erl)
+
-# Module dev_simple_pay #
-* [Description](#description)
A simple device that allows the operator to specify a price for a
request and then charge the user for it, on a per message basis.
@@ -19,7 +19,7 @@ definition.
@@ -84,12 +84,6 @@ Check if the request is from the operator.
Adjust a user's balance, normalizing their wallet ID first.
-
-
-### test_opts/0 * ###
-
-`test_opts() -> any()`
-
### test_opts/1 * ###
diff --git a/docs/source-code-docs/dev_snp.md b/docs/resources/source-code/dev_snp.md
similarity index 67%
rename from docs/source-code-docs/dev_snp.md
rename to docs/resources/source-code/dev_snp.md
index 46fd84bf2..9eb969850 100644
--- a/docs/source-code-docs/dev_snp.md
+++ b/docs/resources/source-code/dev_snp.md
@@ -1,7 +1,7 @@
+# [Module dev_snp.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_snp.erl)
+
-# Module dev_snp #
-* [Description](#description)
This device offers an interface for validating AMD SEV-SNP commitments,
as well as generating them, if called in an appropriate environment.
@@ -14,8 +14,8 @@ as well as generating them, if called in an appropriate environment.
Generate an commitment report and emit it as a message, including all of
the necessary data to generate the nonce (ephemeral node address + node
message ID), as well as the expected measurement (firmware, kernel, and VMSAs
-hashes).
Validates if a given message parameter matches a trusted value from the SNP trusted list
+Returns {ok, true} if the message is trusted, {ok, false} otherwise.
Verify an commitment report message; validating the identity of a
remote node, its ephemeral private address, and the integrity of the report.
@@ -52,17 +52,6 @@ hashes).
Generate the nonce to use in the commitment report.
-
-
-### init/3 ###
-
-`init(M1, M2, Opts) -> any()`
-
-Should take in options to set for the device such as kernel, initrd, firmware,
-and append hashes and make them available to the device. Only runnable once,
-and only if the operator is not set to an address (and thus, the node has not
-had any priviledged access).
-
### is_debug/1 * ###
@@ -91,9 +80,8 @@ Ensure that the report data matches the expected report data.
`trusted(Msg1, Msg2, NodeOpts) -> any()`
-Default implementation of a resolver for trusted software. Searches the
-`trusted` key in the base message for a list of trusted values, and checks
-if the value in the request message is a member of that list.
+Validates if a given message parameter matches a trusted value from the SNP trusted list
+Returns {ok, true} if the message is trusted, {ok, false} otherwise
diff --git a/docs/source-code-docs/dev_snp_nif.md b/docs/resources/source-code/dev_snp_nif.md
similarity index 96%
rename from docs/source-code-docs/dev_snp_nif.md
rename to docs/resources/source-code/dev_snp_nif.md
index caf34085b..34d8e95fd 100644
--- a/docs/source-code-docs/dev_snp_nif.md
+++ b/docs/resources/source-code/dev_snp_nif.md
@@ -1,6 +1,7 @@
+# [Module dev_snp_nif.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_snp_nif.erl)
+
-# Module dev_snp_nif #
diff --git a/docs/source-code-docs/dev_stack.md b/docs/resources/source-code/dev_stack.md
similarity index 99%
rename from docs/source-code-docs/dev_stack.md
rename to docs/resources/source-code/dev_stack.md
index 553ed071d..d00bc24cf 100644
--- a/docs/source-code-docs/dev_stack.md
+++ b/docs/resources/source-code/dev_stack.md
@@ -1,7 +1,7 @@
+# [Module dev_stack.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_stack.erl)
+
-# Module dev_stack #
-* [Description](#description)
A device that contains a stack of other devices, and manages their
execution.
diff --git a/docs/source-code-docs/dev_test.md b/docs/resources/source-code/dev_test.md
similarity index 50%
rename from docs/source-code-docs/dev_test.md
rename to docs/resources/source-code/dev_test.md
index 8bc9cf45f..854ac1d7d 100644
--- a/docs/source-code-docs/dev_test.md
+++ b/docs/resources/source-code/dev_test.md
@@ -1,15 +1,18 @@
+# [Module dev_test.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_test.erl)
+
-# Module dev_test #
## Function Index ##
-
Find a test worker's PID and send it an update message.
@@ -32,6 +35,15 @@ slot number in the results key.
`compute_test() -> any()`
+
+
+### delay/3 ###
+
+`delay(Msg1, Req, Opts) -> any()`
+
+Does nothing, just sleeps `Req/duration or 750` ms and returns the
+appropriate form in order to be used as a hook.
+
### device_with_function_key_module_test/0 * ###
@@ -40,6 +52,14 @@ slot number in the results key.
Tests the resolution of a default function.
+
+
+### increment_counter/3 ###
+
+`increment_counter(Msg1, Msg2, Opts) -> any()`
+
+Find a test worker's PID and send it an increment message.
+
### info/1 ###
@@ -49,6 +69,15 @@ Tests the resolution of a default function.
Exports a default_handler function that can be used to test the
handler resolution mechanism.
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+Exports a default_handler function that can be used to test the
+handler resolution mechanism.
+
### init/3 ###
@@ -66,14 +95,6 @@ Example `init/3` handler. Sets the `Already-Seen` key to an empty list.
Example implementation of an `imported` function for a WASM
executor.
-
-
-### postprocess/3 ###
-
-`postprocess(Msg, X2, Opts) -> any()`
-
-Set the `postprocessor-called` key to true in the HTTP server.
-
### restore/3 ###
@@ -103,3 +124,11 @@ Do nothing when asked to snapshot.
`test_func(X1) -> any()`
+
+
+### update_state/3 ###
+
+`update_state(Msg, Msg2, Opts) -> any()`
+
+Find a test worker's PID and send it an update message.
+
diff --git a/docs/resources/source-code/dev_volume.md b/docs/resources/source-code/dev_volume.md
new file mode 100644
index 000000000..210df23d9
--- /dev/null
+++ b/docs/resources/source-code/dev_volume.md
@@ -0,0 +1,271 @@
+# [Module dev_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_volume.erl)
+
+
+
+
+Secure Volume Management for HyperBEAM Nodes.
+
+
+
+## Description ##
+
+This module handles encrypted storage operations for HyperBEAM, providing
+a robust and secure approach to data persistence. It manages the complete
+lifecycle of encrypted volumes from detection to creation, formatting, and
+mounting.
+
+Key responsibilities:
+- Volume detection and initialization
+- Encrypted partition creation and formatting
+- Secure mounting using cryptographic keys
+- Store path reconfiguration to use mounted volumes
+- Automatic handling of various system states
+(new device, existing partition, etc.)
+
+The primary entry point is the `mount/3` function, which orchestrates the
+entire process based on the provided configuration parameters. This module
+works alongside `hb_volume` which provides the low-level operations for
+device manipulation.
+
+Security considerations:
+- Ensures data at rest is protected through LUKS encryption
+- Provides proper volume sanitization and secure mounting
+- IMPORTANT: This module only applies configuration set in node options and
+does NOT accept disk operations via HTTP requests. It cannot format arbitrary
+disks as all operations are safeguarded by host operating system permissions
+enforced upon the HyperBEAM environment.
+
+## Function Index ##
+
+
+
+
+
+`Device`: The base device to check. `Partition`: The partition to check. `PartitionType`: The type of partition to check. `VolumeName`: The name of the volume to check. `MountPoint`: The mount point to check. `StorePath`: The store path to check. `Key`: The key to check. `Opts`: The options to check.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Check if the base device exists and if it does, check if the partition exists.
+
+
+
+### check_partition/8 * ###
+
+
+
+
+`Device`: The base device to check. `Partition`: The partition to check. `PartitionType`: The type of partition to check. `VolumeName`: The name of the volume to check. `MountPoint`: The mount point to check. `StorePath`: The store path to check. `Key`: The key to check. `Opts`: The options to check.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Check if the partition exists. If it does, attempt to mount it.
+If it doesn't exist, create it, format it with encryption and mount it.
+
+
+
+### create_and_mount_partition/8 * ###
+
+
+
+
+`Device`: The device to create the partition on. `Partition`: The partition to create. `PartitionType`: The type of partition to create. `Key`: The key to create the partition with. `MountPoint`: The mount point to mount the partition to. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Create, format and mount a new partition.
+
+
+
+### decrypt_volume_key/2 * ###
+
+
+
+
+`Opts`: A map of configuration options.
+
+returns: `{ok, DecryptedKey}` on successful decryption, or
+`{error, Binary}` if decryption fails.
+
+Decrypts an encrypted volume key using the node's private key.
+
+This function takes an encrypted key (typically sent by a client who encrypted
+it with the node's public key) and decrypts it using the node's private RSA key.
+
+
+
+### format_and_mount/6 * ###
+
+
+
+
+`Partition`: The partition to format and mount. `Key`: The key to format and mount the partition with. `MountPoint`: The mount point to mount the partition to. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Format and mount a newly created partition.
+
+
+
+### info/1 ###
+
+`info(X1) -> any()`
+
+Exported function for getting device info, controls which functions are
+exposed via the device API.
+
+
+
+### info/3 ###
+
+`info(Msg1, Msg2, Opts) -> any()`
+
+HTTP info response providing information about this device
+
+
+
+### mount/3 ###
+
+
+
+
+`M1`: Base message for context. `M2`: Request message with operation details. `Opts`: A map of configuration options for volume operations.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Handles the complete process of secure encrypted volume mounting.
+
+This function performs the following operations depending on the state:
+1. Validates the encryption key is present
+2. Checks if the base device exists
+3. Checks if the partition exists on the device
+4. If the partition exists, attempts to mount it
+5. If the partition doesn't exist, creates it, formats it with encryption
+and mounts it
+6. Updates the node's store configuration to use the mounted volume
+
+Config options in Opts map:
+- volume_key: (Required) The encryption key
+- volume_device: Base device path
+- volume_partition: Partition path
+- volume_partition_type: Filesystem type
+- volume_name: Name for encrypted volume
+- volume_mount_point: Where to mount
+- volume_store_path: Store path on volume
+
+
+
+### mount_existing_partition/6 * ###
+
+
+
+
+`Partition`: The partition to mount. `Key`: The key to mount. `MountPoint`: The mount point to mount. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Mount an existing partition.
+
+
+
+### mount_formatted_partition/6 * ###
+
+
+
+
+`Partition`: The partition to mount. `Key`: The key to mount the partition with. `MountPoint`: The mount point to mount the partition to. `VolumeName`: The name of the volume to mount. `StorePath`: The store path to mount. `Opts`: The options to mount.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Mount a newly formatted partition.
+
+
+
+### public_key/3 ###
+
+
+
+
+`Opts`: A map of configuration options.
+
+returns: `{ok, Map}` containing the node's public key on success, or
+`{error, Binary}` if the node's wallet is not available.
+
+Returns the node's public key for secure key exchange.
+
+This function retrieves the node's wallet and extracts the public key
+for encryption purposes. It allows users to securely exchange encryption keys
+by first encrypting their volume key with the node's public key.
+
+The process ensures that sensitive keys are never transmitted in plaintext.
+The encrypted key can then be securely sent to the node, which will decrypt it
+using its private key before using it for volume encryption.
+
+
+
+### update_node_config/2 * ###
+
+
+
+
+`NewStore`: The new store to update the node's configuration with. `Opts`: The options to update the node's configuration with.
+
+returns: `{ok, Binary}` on success with operation result message, or
+`{error, Binary}` on failure with error message.
+
+Update the node's configuration with the new store.
+
+
+
+### update_store_path/2 * ###
+
+
Get the info map for a device, optionally giving it a message if the
@@ -118,7 +118,10 @@ actually takes.
@@ -139,6 +142,12 @@ according to the `Message2` passed to it.
Call the device's `set` function.
+
+
+### device_set/5 * ###
+
+`device_set(Msg, Key, Value, Mode, Opts) -> any()`
+
### do_resolve_many/2 * ###
@@ -430,11 +439,13 @@ The resolver is composed of a series of discrete phases:
4: Persistent-resolver lookup.
5: Device lookup.
6: Execution.
-7: Cryptographic linking.
-8: Result caching.
-9: Notify waiters.
-10: Fork worker.
-11: Recurse or terminate.
+7: Execution of the `step` hook.
+8: Subresolution.
+9: Cryptographic linking.
+10: Result caching.
+11: Notify waiters.
+12: Fork worker.
+13: Recurse or terminate.
diff --git a/docs/source-code-docs/hb_ao_test_vectors.md b/docs/resources/source-code/hb_ao_test_vectors.md
similarity index 94%
rename from docs/source-code-docs/hb_ao_test_vectors.md
rename to docs/resources/source-code/hb_ao_test_vectors.md
index 1af8d4fc7..967e70f79 100644
--- a/docs/source-code-docs/hb_ao_test_vectors.md
+++ b/docs/resources/source-code/hb_ao_test_vectors.md
@@ -1,7 +1,7 @@
+# [Module hb_ao_test_vectors.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_ao_test_vectors.erl)
+
-# Module hb_ao_test_vectors #
-* [Description](#description)
Uses a series of different `Opts` values to test the resolution engine's
execution under different circumstances.
@@ -12,7 +12,7 @@ execution under different circumstances.
@@ -31,6 +31,12 @@ Delay the event server until prometheus is started.
`handle_events() -> any()`
+
+
+### handle_tracer/3 * ###
+
+`handle_tracer(Topic, X, Opts) -> any()`
+
### increment/3 ###
diff --git a/docs/source-code-docs/hb_examples.md b/docs/resources/source-code/hb_examples.md
similarity index 96%
rename from docs/source-code-docs/hb_examples.md
rename to docs/resources/source-code/hb_examples.md
index 959b78dd3..c5fc7b2ca 100644
--- a/docs/source-code-docs/hb_examples.md
+++ b/docs/resources/source-code/hb_examples.md
@@ -1,7 +1,7 @@
+# [Module hb_examples.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_examples.erl)
+
-# Module hb_examples #
-* [Description](#description)
This module contains end-to-end tests for Hyperbeam, accessing through
the HTTP interface.
diff --git a/docs/source-code-docs/hb_features.md b/docs/resources/source-code/hb_features.md
similarity index 93%
rename from docs/source-code-docs/hb_features.md
rename to docs/resources/source-code/hb_features.md
index 9d3de7a2f..1bb652ad2 100644
--- a/docs/source-code-docs/hb_features.md
+++ b/docs/resources/source-code/hb_features.md
@@ -1,7 +1,7 @@
+# [Module hb_features.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_features.erl)
+
-# Module hb_features #
-* [Description](#description)
A module that exports a list of feature flags that the node supports
using the `-ifdef` macro.
diff --git a/docs/source-code-docs/hb_gateway_client.md b/docs/resources/source-code/hb_gateway_client.md
similarity index 69%
rename from docs/source-code-docs/hb_gateway_client.md
rename to docs/resources/source-code/hb_gateway_client.md
index 6c50d7d55..6a072f0bb 100644
--- a/docs/source-code-docs/hb_gateway_client.md
+++ b/docs/resources/source-code/hb_gateway_client.md
@@ -1,7 +1,7 @@
+# [Module hb_gateway_client.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_gateway_client.erl)
+
-# Module hb_gateway_client #
-* [Description](#description)
Implementation of Arweave's GraphQL API to gain access to specific
items of data stored on the network.
@@ -18,11 +18,12 @@ module will be deprecated.
## Function Index ##
-
Takes a list of messages with name and value fields, and formats
+them as a GraphQL tags argument.
@@ -35,6 +36,14 @@ gateway, then returns {ok, ParsedMsg}.
+
+### ao_dataitem_test/0 * ###
+
+`ao_dataitem_test() -> any()`
+
+Test optimistic index
+
### data/2 ###
@@ -68,6 +77,22 @@ where `<id>` is the base64-url-encoded transaction ID.
Gives the fields of a transaction that are needed to construct an
ANS-104 message.
+
+
+### l1_transaction_test/0 * ###
+
+`l1_transaction_test() -> any()`
+
+Test l1 message from graphql
+
+
+
+### l2_dataitem_test/0 * ###
+
+`l2_dataitem_test() -> any()`
+
+Test l2 message from graphql
+
### normalize_null/1 * ###
@@ -140,3 +165,12 @@ Find the location of the scheduler based on its ID, through GraphQL.
Test that we can get the scheduler location.
+
+
+### subindex_to_tags/1 * ###
+
+`subindex_to_tags(Subindex) -> any()`
+
+Takes a list of messages with `name` and `value` fields, and formats
+them as a GraphQL `tags` argument.
+
diff --git a/docs/source-code-docs/hb_http.md b/docs/resources/source-code/hb_http.md
similarity index 91%
rename from docs/source-code-docs/hb_http.md
rename to docs/resources/source-code/hb_http.md
index 4f47194a8..e364fdf86 100644
--- a/docs/source-code-docs/hb_http.md
+++ b/docs/resources/source-code/hb_http.md
@@ -1,6 +1,7 @@
+# [Module hb_http.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http.erl)
+
-# Module hb_http #
@@ -17,7 +18,7 @@ require special handling in order to be converted to a normalized message.
@@ -115,11 +116,27 @@ Safe wrapper for prometheus_gauge:inc/2.
`log(Type, Event, X3, Reason, Opts) -> any()`
-
+
+
+### maybe_invoke_monitor/2 * ###
+
+`maybe_invoke_monitor(Details, Opts) -> any()`
+
+Invoke the HTTP monitor message with AO-Core, if it is set in the
+node message key. We invoke the given message with the `body` set to a signed
+version of the details. This allows node operators to configure their machine
+to record duration statistics into customized data stores, computations, or
+processes etc. Additionally, we include the `http_reference` value, if set in
+the given `opts`.
+
+We use `hb_ao:get` rather than `hb_opts:get`, as settings configured
+by the `~router@1.0` route `opts` key are unable to generate atoms.
-### method_to_list/1 * ###
+
-`method_to_list(X1) -> any()`
+### method_to_bin/1 * ###
+
+`method_to_bin(X1) -> any()`
@@ -133,6 +150,16 @@ Safe wrapper for prometheus_gauge:inc/2.
`parse_peer(Peer, Opts) -> any()`
+
+
+### record_duration/2 * ###
+
+`record_duration(Details, Opts) -> any()`
+
+Record the duration of the request in an async process. We write the
+data to prometheus if the application is enabled, as well as invoking the
+`http_monitor` if appropriate.
+
### record_response_status/3 * ###
diff --git a/docs/source-code-docs/hb_http_client_sup.md b/docs/resources/source-code/hb_http_client_sup.md
similarity index 84%
rename from docs/source-code-docs/hb_http_client_sup.md
rename to docs/resources/source-code/hb_http_client_sup.md
index 951032fc7..fb0d04e9a 100644
--- a/docs/source-code-docs/hb_http_client_sup.md
+++ b/docs/resources/source-code/hb_http_client_sup.md
@@ -1,7 +1,7 @@
+# [Module hb_http_client_sup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_client_sup.erl)
+
-# Module hb_http_client_sup #
-* [Description](#description)
The supervisor for the gun HTTP client wrapper.
diff --git a/docs/source-code-docs/hb_http_server.md b/docs/resources/source-code/hb_http_server.md
similarity index 58%
rename from docs/source-code-docs/hb_http_server.md
rename to docs/resources/source-code/hb_http_server.md
index 867be3cc8..946292de2 100644
--- a/docs/source-code-docs/hb_http_server.md
+++ b/docs/resources/source-code/hb_http_server.md
@@ -1,7 +1,7 @@
+# [Module hb_http_server.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_server.erl)
+
-# Module hb_http_server #
-* [Description](#description)
A router that attaches a HTTP server to the AO-Core resolver.
@@ -23,8 +23,9 @@ the execution parameters of all downstream requests to be controlled.
@@ -85,6 +86,12 @@ the server ID, which can be used to lookup the node message.
`new_server(RawNodeMsg) -> any()`
+Trigger the creation of a new HTTP server node. Accepts a `NodeMsg`
+message, which is used to configure the server. This function executed the
+`start` hook on the node, giving it the opportunity to modify the `NodeMsg`
+before it is used to configure the server. The `start` hook expects gives and
+expects the node message to be in the `body` key.
+
### read_body/1 * ###
@@ -105,14 +112,34 @@ Helper to grab the full body of a HTTP request, even if it's chunked.
`set_default_opts(Opts) -> any()`
+
+
+### set_node_opts_test/0 * ###
+
+`set_node_opts_test() -> any()`
+
+Ensure that the `start` hook can be used to modify the node options. We
+do this by creating a message with a device that has a `start` key. This
+key takes the message's body (the anticipated node options) and returns a
+modified version of that body, which will be used to configure the node. We
+then check that the node options were modified as we expected.
+
### set_opts/1 ###
`set_opts(Opts) -> any()`
-Update the `Opts` map that the HTTP server uses for all future
-requests.
+Merges the provided `Opts` with uncommitted values from `Request`,
+preserves the http_server value, and updates node_history by prepending
+the `Request`. If a server reference exists, updates the Cowboy environment
+variable 'node_msg' with the resulting options map.
+
+
+
+### set_opts/2 ###
+
+`set_opts(Request, Opts) -> any()`
diff --git a/docs/source-code-docs/hb_json.md b/docs/resources/source-code/hb_json.md
similarity index 92%
rename from docs/source-code-docs/hb_json.md
rename to docs/resources/source-code/hb_json.md
index 60e7ab780..f8dfe8b70 100644
--- a/docs/source-code-docs/hb_json.md
+++ b/docs/resources/source-code/hb_json.md
@@ -1,7 +1,7 @@
+# [Module hb_json.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_json.erl)
+
-# Module hb_json #
-* [Description](#description)
Wrapper for encoding and decoding JSON.
diff --git a/docs/resources/source-code/hb_keccak.md b/docs/resources/source-code/hb_keccak.md
new file mode 100644
index 000000000..ec3d4f0bb
--- /dev/null
+++ b/docs/resources/source-code/hb_keccak.md
@@ -0,0 +1,77 @@
+# [Module hb_keccak.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_keccak.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
+`KeyValuePairs`: A list of {Name, Value} pairs to check. `Opts`: The original options map to return if validation succeeds.
+
+returns: `{ok, Opts}` if all required options are present, or
+`{error, <<"Missing required parameters: ", MissingOptsStr/binary>>}`
+where `MissingOptsStr` is a comma-separated list of missing option names.
+
+Utility function to check for required options in a list.
+Takes a list of {Name, Value} pairs and returns:
+- {ok, Opts} when all required options are present (Value =/= not_found)
+- {error, ErrorMsg} with a message listing all missing options when any are not_found
+
### config_lookup/2 * ###
@@ -108,6 +128,12 @@ Get an environment variable or configuration key.
Parse a `flat@1.0` encoded file into a map, matching the types of the
keys to those in the default message.
+
+
+### load_bin/1 ###
+
+`load_bin(Bin) -> any()`
+
### mimic_default_types/2 ###
@@ -125,3 +151,17 @@ Mimic the types of the default message for a given map.
Get an option from environment variables, optionally consulting the
`hb_features` of the node if a conditional default tuple is provided.
+
+
+### validate_node_history/1 ###
+
+`validate_node_history(Opts) -> any()`
+
+Validate that the node_history length is within an acceptable range.
+
+
+
+### validate_node_history/3 ###
+
+`validate_node_history(Opts, MinLength, MaxLength) -> any()`
+
diff --git a/docs/source-code-docs/hb_path.md b/docs/resources/source-code/hb_path.md
similarity index 99%
rename from docs/source-code-docs/hb_path.md
rename to docs/resources/source-code/hb_path.md
index 09dce1a75..d3811c713 100644
--- a/docs/source-code-docs/hb_path.md
+++ b/docs/resources/source-code/hb_path.md
@@ -1,7 +1,7 @@
+# [Module hb_path.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_path.erl)
+
-# Module hb_path #
-* [Description](#description)
This module provides utilities for manipulating the paths of a
message: Its request path (referred to in messages as just the `Path`), and
diff --git a/docs/source-code-docs/hb_persistent.md b/docs/resources/source-code/hb_persistent.md
similarity index 98%
rename from docs/source-code-docs/hb_persistent.md
rename to docs/resources/source-code/hb_persistent.md
index ff1e672ab..bfb5bdc31 100644
--- a/docs/source-code-docs/hb_persistent.md
+++ b/docs/resources/source-code/hb_persistent.md
@@ -1,7 +1,7 @@
+# [Module hb_persistent.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_persistent.erl)
+
-# Module hb_persistent #
-* [Description](#description)
Creates and manages long-lived AO-Core resolution processes.
diff --git a/docs/source-code-docs/hb_private.md b/docs/resources/source-code/hb_private.md
similarity index 97%
rename from docs/source-code-docs/hb_private.md
rename to docs/resources/source-code/hb_private.md
index 71a48cb89..4da4ce7d0 100644
--- a/docs/source-code-docs/hb_private.md
+++ b/docs/resources/source-code/hb_private.md
@@ -1,7 +1,7 @@
+# [Module hb_private.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_private.erl)
+
-# Module hb_private #
-* [Description](#description)
This module provides basic helper utilities for managing the
private element of a message, which can be used to store state that is
diff --git a/docs/source-code-docs/hb_process_monitor.md b/docs/resources/source-code/hb_process_monitor.md
similarity index 91%
rename from docs/source-code-docs/hb_process_monitor.md
rename to docs/resources/source-code/hb_process_monitor.md
index 191209a48..b154476be 100644
--- a/docs/source-code-docs/hb_process_monitor.md
+++ b/docs/resources/source-code/hb_process_monitor.md
@@ -1,6 +1,7 @@
+# [Module hb_process_monitor.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_process_monitor.erl)
+
-# Module hb_process_monitor #
diff --git a/docs/source-code-docs/hb_router.md b/docs/resources/source-code/hb_router.md
similarity index 83%
rename from docs/source-code-docs/hb_router.md
rename to docs/resources/source-code/hb_router.md
index ba0fccb88..94038cc12 100644
--- a/docs/source-code-docs/hb_router.md
+++ b/docs/resources/source-code/hb_router.md
@@ -1,6 +1,7 @@
+# [Module hb_router.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_router.erl)
+
-# Module hb_router #
diff --git a/docs/source-code-docs/hb_singleton.md b/docs/resources/source-code/hb_singleton.md
similarity index 91%
rename from docs/source-code-docs/hb_singleton.md
rename to docs/resources/source-code/hb_singleton.md
index 57f0feb89..652f2b669 100644
--- a/docs/source-code-docs/hb_singleton.md
+++ b/docs/resources/source-code/hb_singleton.md
@@ -1,8 +1,7 @@
+# [Module hb_singleton.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_singleton.erl)
+
-# Module hb_singleton #
-* [Description](#description)
-* [Data Types](#types)
A parser that translates AO-Core HTTP API requests in TABM format
into an ordered list of messages to evaluate.
@@ -73,10 +72,10 @@ tabm_message() = map()
## Function Index ##
-
@@ -119,6 +118,12 @@ Step 3: Apply types to values and remove specifiers.
`basic_hashpath_to_test() -> any()`
+
+
+### build/3 * ###
+
+`build(I, Rest, ScopedKeys) -> any()`
+
### build_messages/2 * ###
@@ -135,17 +140,11 @@ Step 5: Merge the base message with the scoped messages.
Attempt Cowboy URL decode, then sanitize the result.
-
-
-### do_build/3 * ###
-
-`do_build(I, Rest, ScopedKeys) -> any()`
-
### from/1 ###
-`from(RawMsg) -> any()`
+`from(Path) -> any()`
Normalize a singleton TABM message into a list of executable AO-Core
messages.
@@ -253,6 +252,15 @@ Extrapolate the inlined key-value pair from a path segment. If the
key has a value, it may provide a type (as with typical keys), but if a
value is not provided, it is assumed to be a boolean `true`.
+
+
+### parse_inlined_keys/2 * ###
+
+`parse_inlined_keys(InlinedMsgBin, Msg) -> any()`
+
+Parse inlined key-value pairs from a path segment. Each key-value pair
+is separated by `&` and is of the form `K=V`.
+
### parse_part/1 * ###
diff --git a/docs/source-code-docs/hb_store.md b/docs/resources/source-code/hb_store.md
similarity index 98%
rename from docs/source-code-docs/hb_store.md
rename to docs/resources/source-code/hb_store.md
index 4b1e57aa6..0e8ad542f 100644
--- a/docs/source-code-docs/hb_store.md
+++ b/docs/resources/source-code/hb_store.md
@@ -1,6 +1,7 @@
+# [Module hb_store.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store.erl)
+
-# Module hb_store #
diff --git a/docs/source-code-docs/hb_store_fs.md b/docs/resources/source-code/hb_store_fs.md
similarity index 97%
rename from docs/source-code-docs/hb_store_fs.md
rename to docs/resources/source-code/hb_store_fs.md
index 366d18296..9e55fef1b 100644
--- a/docs/source-code-docs/hb_store_fs.md
+++ b/docs/resources/source-code/hb_store_fs.md
@@ -1,6 +1,7 @@
+# [Module hb_store_fs.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_fs.erl)
+
-# Module hb_store_fs #
diff --git a/docs/source-code-docs/hb_store_gateway.md b/docs/resources/source-code/hb_store_gateway.md
similarity index 89%
rename from docs/source-code-docs/hb_store_gateway.md
rename to docs/resources/source-code/hb_store_gateway.md
index 99b800f27..2411d5d80 100644
--- a/docs/source-code-docs/hb_store_gateway.md
+++ b/docs/resources/source-code/hb_store_gateway.md
@@ -1,7 +1,7 @@
+# [Module hb_store_gateway.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_gateway.erl)
+
-# Module hb_store_gateway #
-* [Description](#description)
A store module that reads data from the nodes Arweave gateway and
GraphQL routes, additionally including additional store-specific routes.
@@ -11,7 +11,7 @@ GraphQL routes, additionally including additional store-specific routes.
## Function Index ##
-
@@ -113,6 +113,14 @@ that the default routes allow access to the item. If the test below were to
produce the same result, despite an empty 'only' route list, then we would
know that the module is not respecting the route list.
+
+
+### store_opts_test/0 * ###
+
+`store_opts_test() -> any()`
+
+Test to verify store opts is being set for Data-Protocol ao
+
### type/2 ###
diff --git a/docs/source-code-docs/hb_store_remote_node.md b/docs/resources/source-code/hb_store_remote_node.md
similarity index 96%
rename from docs/source-code-docs/hb_store_remote_node.md
rename to docs/resources/source-code/hb_store_remote_node.md
index 9c8735f03..34a886174 100644
--- a/docs/source-code-docs/hb_store_remote_node.md
+++ b/docs/resources/source-code/hb_store_remote_node.md
@@ -1,7 +1,7 @@
+# [Module hb_store_remote_node.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_remote_node.erl)
+
-# Module hb_store_remote_node #
-* [Description](#description)
A store module that reads data from another AO node.
diff --git a/docs/source-code-docs/hb_store_rocksdb.md b/docs/resources/source-code/hb_store_rocksdb.md
similarity index 98%
rename from docs/source-code-docs/hb_store_rocksdb.md
rename to docs/resources/source-code/hb_store_rocksdb.md
index 3cb291511..1da975be5 100644
--- a/docs/source-code-docs/hb_store_rocksdb.md
+++ b/docs/resources/source-code/hb_store_rocksdb.md
@@ -1,8 +1,7 @@
+# [Module hb_store_rocksdb.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_store_rocksdb.erl)
+
-# Module hb_store_rocksdb #
-* [Description](#description)
-* [Data Types](#types)
A process wrapper over rocksdb storage.
diff --git a/docs/source-code-docs/hb_structured_fields.md b/docs/resources/source-code/hb_structured_fields.md
similarity index 99%
rename from docs/source-code-docs/hb_structured_fields.md
rename to docs/resources/source-code/hb_structured_fields.md
index 8d6981ea7..5aa7d74fc 100644
--- a/docs/source-code-docs/hb_structured_fields.md
+++ b/docs/resources/source-code/hb_structured_fields.md
@@ -1,8 +1,7 @@
+# [Module hb_structured_fields.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_structured_fields.erl)
+
-# Module hb_structured_fields #
-* [Description](#description)
-* [Data Types](#types)
A module for parsing and converting between Erlang and HTTP Structured
Fields, as described in RFC-9651.
diff --git a/docs/source-code-docs/hb_sup.md b/docs/resources/source-code/hb_sup.md
similarity index 91%
rename from docs/source-code-docs/hb_sup.md
rename to docs/resources/source-code/hb_sup.md
index b06c3eeec..3b29e9f87 100644
--- a/docs/source-code-docs/hb_sup.md
+++ b/docs/resources/source-code/hb_sup.md
@@ -1,6 +1,7 @@
+# [Module hb_sup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_sup.erl)
+
-# Module hb_sup #
__Behaviours:__ [`supervisor`](supervisor.md).
diff --git a/docs/source-code-docs/hb_test_utils.md b/docs/resources/source-code/hb_test_utils.md
similarity index 93%
rename from docs/source-code-docs/hb_test_utils.md
rename to docs/resources/source-code/hb_test_utils.md
index b96035c80..46ba4d961 100644
--- a/docs/source-code-docs/hb_test_utils.md
+++ b/docs/resources/source-code/hb_test_utils.md
@@ -1,7 +1,7 @@
+# [Module hb_test_utils.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_test_utils.erl)
+
-# Module hb_test_utils #
-* [Description](#description)
Simple utilities for testing HyperBEAM.
diff --git a/docs/resources/source-code/hb_tracer.md b/docs/resources/source-code/hb_tracer.md
new file mode 100644
index 000000000..a880ee8be
--- /dev/null
+++ b/docs/resources/source-code/hb_tracer.md
@@ -0,0 +1,78 @@
+# [Module hb_tracer.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_tracer.erl)
+
+
+
+
+A module for tracing the flow of requests through the system.
+
+
+
+## Description ##
+This allows for tracking the lifecycle of a request from HTTP receipt through processing and response.
+
+## Function Index ##
+
+
+
Print the trace of the current stack, up to the first non-hyperbeam
@@ -202,17 +202,23 @@ Format a string with an indentation level.
Format a map as either a single line or a multi-line string depending
on the value of the `debug_print_map_line_threshold` runtime option.
-
+
-### format_trace/2 * ###
+### format_trace/1 ###
-`format_trace(Rest, Prefixes) -> any()`
+`format_trace(Stack) -> any()`
Format a stack trace as a list of strings, one for each stack frame.
Each stack frame is formatted if it matches the `stack_print_prefixes`
option. At the first frame that does not match a prefix in the
`stack_print_prefixes` option, the rest of the stack is not formatted.
+
+
+### format_trace/2 * ###
+
+`format_trace(Rest, Prefixes) -> any()`
+
### format_trace_short/1 ###
@@ -278,7 +284,8 @@ as well as a standard map of HyperBEAM runtime options.
`human_id(Bin) -> any()`
Convert a native binary ID to a human readable ID. If the ID is already
-a human readable ID, it is returned as is.
+a human readable ID, it is returned as is. If it is an ethereum address, it
+is returned as is.
@@ -333,6 +340,20 @@ Is the given module part of HyperBEAM?
Determine whether a binary is human-readable.
+
+
+### is_ordered_list/1 ###
+
+`is_ordered_list(Msg) -> any()`
+
+Determine if the message given is an ordered list, starting from 1.
+
+
+
+### is_ordered_list/2 * ###
+
+`is_ordered_list(N, Msg) -> any()`
+
### is_string_list/1 ###
diff --git a/docs/resources/source-code/hb_volume.md b/docs/resources/source-code/hb_volume.md
new file mode 100644
index 000000000..8df22b076
--- /dev/null
+++ b/docs/resources/source-code/hb_volume.md
@@ -0,0 +1,146 @@
+# [Module hb_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_volume.erl)
+
+
+
+
+
+
+## Function Index ##
+
+
+
+
+
diff --git a/docs/resources/source-code/index.md b/docs/resources/source-code/index.md
new file mode 100644
index 000000000..e0bb2d593
--- /dev/null
+++ b/docs/resources/source-code/index.md
@@ -0,0 +1,23 @@
+# Source Code Documentation
+
+Welcome to the source code documentation for HyperBEAM. This section provides detailed insights into the codebase, helping developers understand the structure, functionality, and implementation details of HyperBEAM and its components.
+
+## Overview
+
+HyperBEAM is built with a modular architecture to ensure scalability, maintainability, and extensibility. The source code is organized into distinct components, each serving a specific purpose within the ecosystem.
+
+## Sections
+
+- **HyperBEAM Core**: The main framework that orchestrates data processing, storage, and routing.
+- **Compute Unit**: Handles computational tasks and integrates with the HyperBEAM core for distributed processing.
+- **Trusted Execution Environment (TEE)**: Ensures secure execution of sensitive operations.
+- **Client Libraries**: Tools and SDKs for interacting with HyperBEAM, including the JavaScript client.
+
+## Getting Started
+
+To explore the source code, you can clone the repository from [GitHub](https://github.com/permaweb/HyperBEAM).
+
+## Navigation
+
+Use the navigation menu to dive into specific parts of the codebase. Each module includes detailed documentation, code comments, and examples to assist in understanding and contributing to the project.
+
diff --git a/docs/source-code-docs/rsa_pss.md b/docs/resources/source-code/rsa_pss.md
similarity index 98%
rename from docs/source-code-docs/rsa_pss.md
rename to docs/resources/source-code/rsa_pss.md
index 068573148..afa0543e3 100644
--- a/docs/source-code-docs/rsa_pss.md
+++ b/docs/resources/source-code/rsa_pss.md
@@ -1,8 +1,7 @@
+# [Module rsa_pss.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/rsa_pss.erl)
+
-# Module rsa_pss #
-* [Description](#description)
-* [Data Types](#types)
Distributed under the Mozilla Public License v2.0.
diff --git a/docs/source-code-docs/stylesheet.css b/docs/resources/source-code/stylesheet.css
similarity index 100%
rename from docs/source-code-docs/stylesheet.css
rename to docs/resources/source-code/stylesheet.css
diff --git a/docs/run/configuring-your-machine.md b/docs/run/configuring-your-machine.md
new file mode 100644
index 000000000..1154297b4
--- /dev/null
+++ b/docs/run/configuring-your-machine.md
@@ -0,0 +1,157 @@
+# Configuring Your HyperBEAM Node
+
+This guide details the various ways to configure your HyperBEAM node's behavior, including ports, storage, keys, and logging.
+
+## Configuration (`config.flat`)
+
+The primary way to configure your HyperBEAM node is through a `config.flat` file located in the node's working directory or specified by the `HB_CONFIG_LOCATION` environment variable.
+
+This file uses a simple `Key = Value.` format (note the period at the end of each line).
+
+**Example `config.flat`:**
+
+```erlang
+% Set the HTTP port
+port = 8080.
+
+% Specify the Arweave key file
+priv_key_location = "/path/to/your/wallet.json".
+
+% Set the data store directory
+% Note: Storage configuration can be complex. See below.
+% store = [{local, [{root, <<"./node_data_mainnet">>}]}]. % Example of complex config, not for config.flat
+
+% Enable verbose logging for specific modules
+% debug_print = [hb_http, dev_router]. % Example of complex config, not for config.flat
+```
+
+Below is a reference of commonly used configuration keys. Remember that `config.flat` only supports simple key-value pairs (Atoms, Strings, Integers, Booleans). For complex configurations (Lists, Maps), you must use environment variables or `hb:start_mainnet/1`.
+
+### Core Configuration
+
+These options control fundamental HyperBEAM behavior.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `port` | Integer | 8734 | HTTP API port |
+| `hb_config_location` | String | "config.flat" | Path to configuration file |
+| `priv_key_location` | String | "hyperbeam-key.json" | Path to operator wallet key file |
+| `mode` | Atom | debug | Execution mode (debug, prod) |
+
+### Server & Network Configuration
+
+These options control networking behavior and HTTP settings.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `host` | String | "localhost" | Choice of remote node for non-local tasks |
+| `gateway` | String | "https://arweave.net" | Default gateway |
+| `bundler_ans104` | String | "https://up.arweave.net:443" | Location of ANS-104 bundler |
+| `protocol` | Atom | http2 | Protocol for HTTP requests (http1, http2, http3) |
+| `http_client` | Atom | gun | HTTP client to use (gun, httpc) |
+| `http_connect_timeout` | Integer | 5000 | HTTP connection timeout in milliseconds |
+| `http_keepalive` | Integer | 120000 | HTTP keepalive time in milliseconds |
+| `http_request_send_timeout` | Integer | 60000 | HTTP request send timeout in milliseconds |
+| `relay_http_client` | Atom | httpc | HTTP client for the relay device |
+
+
+### Security & Identity
+
+These options control identity and security settings.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `scheduler_location_ttl` | Integer | 604800000 | TTL for scheduler registration (7 days in ms) |
+
+
+### Caching & Storage
+
+These options control caching behavior. **Note:** Detailed storage configuration (`store` option) involves complex data structures and cannot be set via `config.flat`.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `cache_lookup_hueristics` | Boolean | false | Whether to use caching heuristics or always consult the local data store |
+| `access_remote_cache_for_client` | Boolean | false | Whether to access data from remote caches for client requests |
+| `store_all_signed` | Boolean | true | Whether the node should store all signed messages |
+| `await_inprogress` | Atom/Boolean | named | Whether to await in-progress executions (false, named, true) |
+
+
+### Execution & Processing
+
+These options control how HyperBEAM executes messages and processes.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `scheduling_mode` | Atom | local_confirmation | When to inform recipients about scheduled assignments (aggressive, local_confirmation, remote_confirmation) |
+| `compute_mode` | Atom | lazy | Whether to execute more messages after returning a result (aggressive, lazy) |
+| `process_workers` | Boolean | true | Whether the node should use persistent processes |
+| `client_error_strategy` | Atom | throw | What to do if a client error occurs |
+| `wasm_allow_aot` | Boolean | false | Allow ahead-of-time compilation for WASM |
+
+### Device Management
+
+These options control how HyperBEAM manages devices.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `load_remote_devices` | Boolean | false | Whether to load devices from remote signers |
+
+
+### Debug & Development
+
+These options control debugging and development features.
+
+| Option | Type | Default | Description |
+|--------|------|---------|-------------|
+| `debug_stack_depth` | Integer | 40 | Maximum stack depth for debug printing |
+| `debug_print_map_line_threshold` | Integer | 30 | Maximum lines for map printing |
+| `debug_print_binary_max` | Integer | 60 | Maximum binary size for debug printing |
+| `debug_print_indent` | Integer | 2 | Indentation for debug printing |
+| `debug_print_trace` | Atom | short | Trace mode (short, false) |
+| `short_trace_len` | Integer | 5 | Length of short traces |
+| `debug_hide_metadata` | Boolean | true | Whether to hide metadata in debug output |
+| `debug_ids` | Boolean | false | Whether to print IDs in debug output |
+| `debug_hide_priv` | Boolean | true | Whether to hide private data in debug output |
+
+
+**Note:** For the *absolute complete* and most up-to-date list, including complex options not suitable for `config.flat`, refer to the `default_message/0` function in the `hb_opts` module source code.
+
+## Overrides (Environment Variables & Args)
+
+You can override settings from `config.flat` or provide values if the file is missing using environment variables or command-line arguments.
+
+**Using Environment Variables:**
+
+Environment variables typically use an `HB_` prefix followed by the configuration key in uppercase.
+
+* **`HB_PORT=`:** Overrides `hb_port`.
+ * Example: `HB_PORT=8080 rebar3 shell`
+* **`HB_KEY=`:** Overrides `hb_key`.
+ * Example: `HB_KEY=~/.keys/arweave_key.json rebar3 shell`
+* **`HB_STORE=`:** Overrides `hb_store`.
+ * Example: `HB_STORE=./node_data_1 rebar3 shell`
+* **`HB_PRINT=`:** Overrides `hb_print`. `` can be `true` (or `1`), or a comma-separated list of modules/topics (e.g., `hb_path,hb_ao,ao_result`).
+ * Example: `HB_PRINT=hb_http,dev_router rebar3 shell`
+* **`HB_CONFIG_LOCATION=`:** Specifies a custom location for the configuration file.
+
+**Using `erl_opts` (Direct Erlang VM Arguments):**
+
+You can also pass arguments directly to the Erlang VM using the `-` format within `erl_opts`. This is generally less common for application configuration than `config.flat` or environment variables.
+
+```bash
+rebar3 shell --erl_opts "-hb_port 8080 -hb_key path/to/key.json"
+```
+
+**Order of Precedence:**
+
+1. Command-line arguments (`erl_opts`).
+2. Settings in `config.flat`.
+3. Environment variables (`HB_*`).
+4. Default values from `hb_opts.erl`.
+
+## Configuration in Releases
+
+When running a release build (see [Running a HyperBEAM Node](./running-a-hyperbeam-node.md)), configuration works similarly:
+
+1. A `config.flat` file will be present in the release directory (e.g., `_build/default/rel/hb/config.flat`). Edit this file to set your desired parameters for the release environment.
+2. Environment variables (`HB_*`) can still be used to override the settings in the release's `config.flat` when starting the node using the `bin/hb` script.
diff --git a/docs/run/joining-running-a-router.md b/docs/run/joining-running-a-router.md
new file mode 100644
index 000000000..268990cf2
--- /dev/null
+++ b/docs/run/joining-running-a-router.md
@@ -0,0 +1,68 @@
+# Joining or Running a Router Node
+
+Router nodes play a crucial role in the HyperBEAM network by directing incoming HTTP requests to appropriate worker nodes capable of handling the requested computation or data retrieval. They act as intelligent load balancers and entry points into the AO ecosystem.
+
+!!! info "Advanced Topic"
+ Configuring and running a production-grade router involves considerations beyond the scope of this introductory guide, including network topology, security, high availability, and performance tuning.
+
+## What is a Router?
+
+In HyperBEAM, the `dev_router` module (and associated logic) implements routing functionality. A node configured as a router typically:
+
+1. Receives external HTTP requests (HyperPATH calls).
+2. Parses the request path to determine the target process, device, and desired operation.
+3. Consults its routing table or logic to select an appropriate downstream worker node (which might be itself or another node).
+4. Forwards the request to the selected worker.
+5. Receives the response from the worker.
+6. Returns the response to the original client.
+
+Routers often maintain information about the capabilities and load of worker nodes they know about.
+
+## Configuring Routing Behavior
+
+Routing logic is primarily configured through node options, often managed via `hb_opts` or environment variables when starting the node. Key aspects include:
+
+* **Route Definitions:** Defining patterns (templates) and corresponding downstream targets (worker node URLs or internal handlers). Routes are typically ordered by precedence.
+* **Load Balancing Strategy:** How the router chooses among multiple potential workers for a given route (e.g., round-robin, least connections, latency-based).
+* **Worker Discovery/Management:** How the router learns about available worker nodes and their status.
+
+**Example Configuration Snippet (Conceptual - from `hb_opts` or config file):**
+
+```erlang
+{
+ routes,
+ [
+ #{ template => "/~meta@1.0/.*", target => self }, % Handle meta locally
+ #{ template => "/PROCESS_ID1~process@1.0/.*", target => "http://worker1.example.com" },
+ #{ template => "/PROCESS_ID2~process@1.0/.*", target => "http://worker2.example.com" },
+ #{ template => "/.*~wasm64@1.0/.*", target => ["http://wasm_worker1", "http://wasm_worker2"], strategy => round_robin }, % Route WASM requests
+ #{ template => "/.*", target => "http://default_worker.example.com" } % Default fallback
+ ]
+},
+{ router_load_balancing_strategy, latency_aware }
+```
+
+*(Note: The actual configuration format and options should be verified in the `hb_opts.erl` and `dev_router.erl` source code.)*
+
+## Running a Simple Router
+
+While a dedicated router setup is complex, any HyperBEAM node implicitly performs some level of routing, especially if it needs to interact with other nodes (e.g., via the `~relay@1.0` device). The default configuration might route certain requests internally or have basic forwarding capabilities.
+
+To run a node that explicitly acts *more* like a router, you would typically configure it with specific `routes` pointing to other worker nodes, potentially disabling local execution for certain devices it intends to forward.
+
+## Joining an Existing Router Network
+
+As a user or developer, you typically don't *run* the main public routers (like `router-1.forward.computer`). Instead, you configure your client applications (or your own local node if it needs to relay requests) to *use* these public routers as entry points.
+
+When making HyperPATH calls, you simply target the public router's URL:
+
+```
+https:///~/...
+```
+The router handles directing your request to an appropriate compute node.
+
+## Further Exploration
+
+* Examine the `dev_router.erl` source code for detailed implementation.
+* Review the available configuration options in `hb_opts.erl` related to routing (`routes`, strategies, etc.).
+* Consult community channels or advanced documentation for best practices on deploying production routers.
diff --git a/docs/run/running-a-hyperbeam-node.md b/docs/run/running-a-hyperbeam-node.md
new file mode 100644
index 000000000..30f9df072
--- /dev/null
+++ b/docs/run/running-a-hyperbeam-node.md
@@ -0,0 +1,250 @@
+# Running a HyperBEAM Node
+
+This guide provides the basics for running your own HyperBEAM node, installing dependencies, and connecting to the AO network.
+
+## System Dependencies
+
+To successfully build and run a HyperBEAM node, your system needs several software dependencies installed.
+
+=== "macOS"
+ Install core dependencies using [Homebrew](https://brew.sh/):
+
+ ```bash
+ brew install cmake git pkg-config openssl ncurses
+ ```
+
+=== "Linux (Debian/Ubuntu)"
+ Install core dependencies using `apt`:
+ ```bash
+ sudo apt-get update && sudo apt-get install -y --no-install-recommends \
+ build-essential \
+ cmake \
+ git \
+ pkg-config \
+ ncurses-dev \
+ libssl-dev \
+ sudo \
+ curl
+ ca-certificates
+ ```
+
+=== "Windows (WSL)"
+ Using the Windows Subsystem for Linux (WSL) with a distribution like Ubuntu is recommended. Follow the Linux (Debian/Ubuntu) instructions within your WSL environment.
+
+
+
+### Erlang/OTP
+
+HyperBEAM is built on Erlang/OTP. You need a compatible version installed (check the `rebar.config` or project documentation for specific version requirements, **typically OTP 27**).
+
+Installation methods:
+
+=== "macOS (brew)"
+ ```bash
+ brew install erlang
+ ```
+
+=== "Linux (apt)"
+ ```bash
+ sudo apt install erlang
+ ```
+
+
+=== "Source Build"
+ Download from [erlang.org](https://www.erlang.org/downloads) and follow the build instructions for your platform.
+
+### Rebar3
+
+Rebar3 is the build tool for Erlang projects.
+
+Installation methods:
+
+=== "macOS (brew)"
+ ```bash
+ brew install rebar3
+ ```
+
+=== "Linux / macOS (Direct Download)"
+ Get the `rebar3` binary from the [official website](https://rebar3.org/). Place the downloaded `rebar3` file in your system's `PATH` (e.g., `/usr/local/bin`) and make it executable (`chmod +x rebar3`).
+
+
+
+### Node.js
+
+Node.js might be required for certain JavaScript-related tools or dependencies.
+
+Installation methods:
+
+=== "macOS (brew)"
+ ```bash
+ brew install node
+ ```
+
+=== "Linux (apt)"
+ ```bash
+ # Check your distribution's recommended method, might need nodesource repo
+ sudo apt install nodejs npm
+ ```
+
+=== "asdf (Recommended)"
+ `asdf-vm` with the `asdf-nodejs` plugin is recommended.
+ ```bash
+ asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs.git
+ asdf install nodejs # e.g., lts
+ asdf global nodejs
+ ```
+
+### Rust
+
+Rust is needed if you intend to work with or build components involving WebAssembly (WASM) or certain Native Implemented Functions (NIFs) used by some devices (like `~snp@1.0`).
+
+The recommended way to install Rust on **all platforms** is via `rustup`:
+
+```bash
+curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
+source "$HOME/.cargo/env" # Or follow the instructions provided by rustup
+```
+
+## Prerequisites for Running
+
+Before starting a node, ensure you have:
+
+* Installed the [system dependencies](#system-dependencies) mentioned above.
+* Cloned the [HyperBEAM repository](https://github.com/permaweb/HyperBEAM) (`git clone ...`).
+* Compiled the source code (`rebar3 compile` in the repo directory).
+* An Arweave **wallet keyfile** (e.g., generated via [Wander](https://www.wander.app)). The path to this file is typically set via the `hb_key` configuration option (see [Configuring Your HyperBEAM Node](./configuring-your-machine.md)).
+
+## Starting a Basic Node
+
+The simplest way to start a HyperBEAM node for development or testing is using `rebar3` from the repository's root directory:
+
+```bash
+rebar3 shell
+```
+
+This command:
+
+1. Starts the Erlang Virtual Machine (BEAM) with all HyperBEAM modules loaded.
+2. Initializes the node with default settings (from `hb_opts.erl`).
+3. Starts the default HTTP server (typically on **port 10000**), making the node accessible via HyperPATHs.
+4. Drops you into an interactive Erlang shell where you can interact with the running node.
+
+This basic setup is suitable for local development and exploring HyperBEAM's functionalities.
+
+## Optional Build Profiles
+
+HyperBEAM uses build profiles to enable optional features, often requiring extra dependencies. To run a node with specific profiles enabled, use `rebar3 as ... shell`:
+
+**Available Profiles (Examples):**
+
+* `genesis_wasm`: Enables Genesis WebAssembly support.
+* `rocksdb`: Enables the RocksDB storage backend.
+* `http3`: Enables HTTP/3 support.
+
+**Example Usage:**
+
+```bash
+# Start with RocksDB profile
+rebar3 as rocksdb shell
+
+# Start with RocksDB and Genesis WASM profiles
+rebar3 as rocksdb, genesis_wasm shell
+```
+
+*Note: Choose profiles **before** starting the shell, as they affect compile-time options.*
+
+## Node Configuration
+
+HyperBEAM offers various configuration options (port, key file, data storage, logging, etc.). These are primarily set using a `config.flat` file and can be overridden by environment variables or command-line arguments.
+
+See the dedicated **[Configuring Your HyperBEAM Node](./configuring-your-machine.md)** guide for detailed information on all configuration methods and options.
+
+## Verify Installation
+
+To quickly check if your node is running and accessible, you can send a request to its `~meta@1.0` device (assuming default port 10000):
+
+```bash
+curl http://localhost:10000/~meta@1.0/info
+```
+
+A JSON response containing node information indicates success.
+
+## Running for Production (Mainnet)
+
+While you can connect to the main AO network using the `rebar3 shell` for testing purposes (potentially using specific configurations or helper functions like `hb:start_mainnet/1` if available and applicable), the standard and recommended method for a stable production deployment (like running on the mainnet) is to build and run a **release**.
+
+**1. Build the Release:**
+
+From the root of the HyperBEAM repository, build the release package. You might include specific profiles needed for your mainnet setup (e.g., `rocksdb` if you intend to use it):
+
+```bash
+# Build release with default profile
+rebar3 release
+
+# Or, build with specific profiles (example)
+# rebar3 as rocksdb release
+```
+
+This command compiles the project and packages it along with the Erlang Runtime System (ERTS) and all dependencies into a directory, typically `_build/default/rel/hb`.
+
+**2. Configure the Release:**
+
+Navigate into the release directory (e.g., `cd _build/default/rel/hb`). Ensure you have a correctly configured `config.flat` file here. See the [configuration guide](./configuring-your-machine.md) for details on setting mainnet parameters (port, key file location, store path, specific peers, etc.). Environment variables can also be used to override settings in the release's `config.flat` when starting the node.
+
+**3. Start the Node:**
+
+Use the generated start script (`bin/hb`) to run the node:
+
+```bash
+# Start the node in the foreground (logs to console)
+./bin/hb console
+
+# Start the node as a background daemon
+./bin/hb start
+
+# Check the status
+./bin/hb ping
+./bin/hb status
+
+# Stop the node
+./bin/hb stop
+```
+
+Consult the generated `bin/hb` script or Erlang/OTP documentation for more advanced start-up options (e.g., attaching a remote shell).
+
+Running as a release provides a more robust, isolated, and manageable way to operate a node compared to running directly from the `rebar3 shell`.
+
+## Stopping the Node (rebar3 shell)
+
+To stop the node running *within the `rebar3 shell`*, press `Ctrl+C` twice or use the Erlang command `q().`.
+
+## Next Steps
+
+* **Configure Your Node:** Deep dive into [configuration options](./configuring-your-machine.md).
+* **TEE Nodes:** Learn about running nodes in [Trusted Execution Environments](./tee-nodes.md) for enhanced security.
+* **Routers:** Understand how to configure and run a [router node](./joining-running-a-router.md).
diff --git a/docs/tee/index.md b/docs/run/tee-nodes.md
similarity index 100%
rename from docs/tee/index.md
rename to docs/run/tee-nodes.md
diff --git a/docs/source-code-docs/dev_cron.md b/docs/source-code-docs/dev_cron.md
deleted file mode 100644
index 99761ec7c..000000000
--- a/docs/source-code-docs/dev_cron.md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-# Module dev_cron #
-
-
-
-## Function Index ##
-
-
-
-add_trusted_node(NodeAddr::binary(), Report::map(), RequesterPubKey::term(), Opts::map()) -> ok
-
-
-
-`NodeAddr`: The joining node's address. `Report`: The commitment report provided by the joining node. `RequesterPubKey`: The joining node's public key. `Opts`: A map of configuration options.
-
-returns: ok.
-
-Add a joining node's details to the trusted nodes list.
-Updates the local configuration with the new trusted node's commitment report
-and public key.
-
-
-
-### become/3 ###
-
-
-
-
-`M1`: The message containing the target node's encrypted private key and IV. `M2`: Ignored parameter. `Opts`: A map of configuration options. Must include `priv_green_zone_aes`.
-
-returns: {ok, Map} on success, where Map includes:
-- status: 200
-- message: confirmation text
-- node: the target node's address
-Returns {error, Reason} if the node is not part of the green zone.
-
-Clone the identity of a target node.
-Allows a node to adopt the identity of a target node by retrieving the target
-node's encrypted private key and IV, decrypting it using the shared AES key,
-and updating the local node's wallet with the target node's keypair.
-
-
-
-### calculate_node_message/3 * ###
-
-`calculate_node_message(RequiredOpts, Req, List) -> any()`
-
-Generate the node message that should be set prior to joining a green zone.
-This function takes a required opts message, a request message, and an `adopt-config`
-value. The `adopt-config` value can be a boolean, a list of fields that should be
-included in the node message from the request, or a binary string of fields to
-include, separated by commas.
-
-
-
-### decrypt_zone_key/2 * ###
-
-
-
-
-`EncZoneKey`: The encrypted zone AES key (Base64 encoded or binary). `Opts`: A map of configuration options.
-
-returns: {ok, DecryptedKey} on success, where DecryptedKey is the shared AES key.
-
-Decrypt the zone AES key using the node's RSA private key.
-Decrypts the encrypted zone AES key using the RSA private key from the node's
-wallet.
-
-
-
-### default_zone_required_opts/1 * ###
-
-`default_zone_required_opts(Opts) -> any()`
-
-The default required options for a green zone. These are intended as
-sane basic requirements for a green zone:
-- The node will not load remote devices (or trust extra peers).
-- The node will use only the default preloaded devices (found on the
-initiating machine).
-- The node uses the default store configuration.
-- The node will not change its routes from the defaults.
-- The peer's preprocessor and postprocessor are the same as the local node's.
-- The node will not schedule messages. Without coordination, peers in the
-green zone will schedule messages without regard for avoiding
-double-assignment of slots.
-- The node must be in a permanent state (no further configuration changes
-being allowed).
-Each of these options is derived from the present node's configuration.
-
-
-
-### encrypt_payload/2 * ###
-
-
-
-
-`AESKey`: The shared AES key (256-bit binary). `RequesterPubKey`: The requester's public RSA key.
-
-returns: The AES key encrypted with the RSA public key.
-
-Encrypt the shared AES key with the requester's RSA public key.
-Encrypts the shared AES key using the RSA public key provided by the joining
-node. The RSA public key is extracted from a tuple and converted into a
-record suitable for encryption.
-
-
-
-### finalize_become/5 * ###
-
-`finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> any()`
-
-
-
-### init/3 ###
-
-
-
-
-`M1`: Ignored parameter. `M2`: Optionally contains a `required-config` map. If not provided, the
- default required config (derived from the nodes base configuration)
- will be used. `Opts`: A map containing configuration options. If the wallet is not already
- provided (under key `priv_wallet`), a new one will be created.
-
-returns: {ok, Msg} where Msg is a binary confirmation message.
-
-Initialize the green zone.
-Sets up the node's cryptographic identity by ensuring that a wallet (keypair)
-exists and generating a shared AES key for secure communication. The wallet,
-AES key, and an empty trusted nodes list are stored in the node's configuration.
-
-
-
-### join/3 ###
-
-
-
-
-`M1`: The join request message containing a header with the target peer's
- address. `M2`: Ignored parameter. `Opts`: A map of configuration options.
-
-returns: {ok, Map} on success with join response details, or {error, Reason}
-on failure.
-
-Initiate the join process for a node (Node B).
-
-When Node B wishes to join an existing green zone, it sends a GET request to
-its local join endpoint.
-This request includes a header with the target peer's address (Node A).
-
-Based on the presence of a peer address:
-- If the target peer is specified, Node B internally routes the request to
-the join_peer flow, where it generates an commitment report and prepares
-a POST request to forward to Node A.
-- If no peer address is present, the join request is processed locally via
-the validate_join flow.
-
-
-
-### join_peer/5 * ###
-
-
-
-
-`M1`: Ignored parameter. `M2`: Ignored parameter. `Opts`: A map of configuration options.
-
-returns: {ok, Map} on success with a confirmation message, or {error, Map} on failure.
-
-Process an internal join request when a target peer is specified.
-
-In this flow (executed on Node B):
-1. Node B generates an commitment report and prepares a POST request.
-2. It then forwards the POST request to Node A's join endpoint.
-3. Upon receiving a response from Node A, Node B decrypts the returned
-zone-key (an encrypted shared AES key) using its local private key, then
-updates its configuration with the shared AES key.
-
-
-
-### key/3 ###
-
-
-
-
-`M1`: Ignored parameter. `M2`: Ignored parameter. `Opts`: A map of configuration options. Must include keys `priv_wallet`
- and `priv_green_zone_aes`.
-
-returns: {ok, Map} on success, where Map contains:
-- status: 200
-- encrypted_key: the encrypted private key (Base64 encoded)
-- iv: the initialization vector (Base64 encoded)
-Returns {error, Reason} if the node is not part of the green zone.
-
-Retrieve and encrypt the node's private key.
-Encrypts the node's private key using the shared AES key in AES-256-GCM mode.
-It returns the encrypted key along with the initialization vector (IV) needed
-for decryption.
-
-
-
-### maybe_set_zone_opts/4 * ###
-
-`maybe_set_zone_opts(PeerLocation, PeerID, Req, InitOpts) -> any()`
-
-If the operator requests it, the node can automatically adopt the
-necessary configuration to join a green zone. `adopt-config` can be a boolean,
-a list of fields that should be included in the node message, alongside the
-required config of the green zone they are joining.
-
-
-
-### rsa_wallet_integration_test/0 * ###
-
-`rsa_wallet_integration_test() -> any()`
-
-Test RSA operations with the existing wallet structure.
-This test function verifies that encryption and decryption using the RSA keys
-from the wallet work correctly. It creates a new wallet, encrypts a test
-message with the RSA public key, and then decrypts it with the RSA private
-key, asserting that the decrypted message matches the original.
-
-
-
-### validate_join/3 * ###
-
-
-
-
-`M1`: Ignored parameter. `Req`: The join request message containing the commitment report and
- other join details. `Opts`: A map of configuration options.
-
-returns: {ok, Map} on success with join response details, or {error, Reason}
-if verification fails.
-
-Validate an incoming join request.
-
-When Node A receives a POST join request from Node B, this routine is executed:
-1. It extracts the commitment report, the requesting node's address, and
-the encoded public key.
-2. It verifies the commitment report included in the request.
-3. If the report is valid, Node A adds Node B to its list of trusted nodes.
-4. Node A then encrypts the shared AES key (zone-key) with Node B's public
-key and returns it along with its public key.
-
-
-
-### validate_peer_opts/2 * ###
-
-`validate_peer_opts(Req, Opts) -> any()`
-
diff --git a/docs/source-code-docs/dev_lua.md b/docs/source-code-docs/dev_lua.md
deleted file mode 100644
index 8b6982deb..000000000
--- a/docs/source-code-docs/dev_lua.md
+++ /dev/null
@@ -1,220 +0,0 @@
-
-
-# Module dev_lua #
-* [Description](#description)
-
-A device that calls a Lua script upon a request and returns the result.
-
-
-
-## Function Index ##
-
-
-
-
-
-
-
-## Function Details ##
-
-
-
-### aos_process_benchmark_test_/0 * ###
-
-`aos_process_benchmark_test_() -> any()`
-
-Benchmark the performance of Lua executions.
-
-
-
-### compute/4 * ###
-
-`compute(Key, RawBase, Req, Opts) -> any()`
-
-Call the Lua script with the given arguments.
-
-
-
-### decode/1 * ###
-
-`decode(Map) -> any()`
-
-Decode a Lua result into a HyperBEAM `structured@1.0` message.
-
-
-
-### direct_benchmark_test/0 * ###
-
-`direct_benchmark_test() -> any()`
-
-Benchmark the performance of Lua executions.
-
-
-
-### encode/1 * ###
-
-`encode(Map) -> any()`
-
-Encode a HyperBEAM `structured@1.0` message into a Lua result.
-
-
-
-### ensure_initialized/3 * ###
-
-`ensure_initialized(Base, Req, Opts) -> any()`
-
-Initialize the Lua VM if it is not already initialized. Optionally takes
-the script as a Binary string. If not provided, the script will be loaded
-from the base message.
-
-
-
-### execute_aos_call/1 * ###
-
-`execute_aos_call(Base) -> any()`
-
-
-
-### execute_aos_call/2 * ###
-
-`execute_aos_call(Base, Req) -> any()`
-
-
-
-### find_script/2 * ###
-
-`find_script(Base, Opts) -> any()`
-
-Find the script in the base message, either by ID or by string.
-
-
-
-### generate_lua_process/1 * ###
-
-`generate_lua_process(File) -> any()`
-
-Generate a Lua process message.
-
-
-
-### generate_stack/1 * ###
-
-`generate_stack(File) -> any()`
-
-Generate a stack message for the Lua process.
-
-
-
-### generate_test_message/1 * ###
-
-`generate_test_message(Process) -> any()`
-
-Generate a test message for a Lua process.
-
-
-
-### info/1 ###
-
-`info(Base) -> any()`
-
-All keys that are not directly available in the base message are
-resolved by calling the Lua function in the script of the same name.
-
-
-
-### init/3 ###
-
-`init(Base, Req, Opts) -> any()`
-
-Initialize the device state, loading the script into memory if it is
-a reference.
-
-
-
-### initialize/3 * ###
-
-`initialize(Base, Script, Opts) -> any()`
-
-Initialize a new Lua state with a given base message and script.
-
-
-
-### invoke_aos_test/0 * ###
-
-`invoke_aos_test() -> any()`
-
-
-
-### invoke_non_compute_key_test/0 * ###
-
-`invoke_non_compute_key_test() -> any()`
-
-Call a non-compute key on a Lua device message and ensure that the
-function of the same name in the script is called.
-
-
-
-### lua_http_preprocessor_test/0 * ###
-
-`lua_http_preprocessor_test() -> any()`
-
-Use a Lua script as a preprocessor on the HTTP server via `~meta@1.0`.
-
-
-
-### normalize/3 ###
-
-`normalize(Base, Req, RawOpts) -> any()`
-
-Restore the Lua state from a snapshot, if it exists.
-
-
-
-### pure_lua_process_benchmark_test_/0 * ###
-
-`pure_lua_process_benchmark_test_() -> any()`
-
-
-
-### pure_lua_process_test/0 * ###
-
-`pure_lua_process_test() -> any()`
-
-Call a process whose `execution-device` is set to `lua@5.3a`.
-
-
-
-### sandbox/3 * ###
-
-`sandbox(State, Map, Opts) -> any()`
-
-Sandbox (render inoperable) a set of Lua functions. Each function is
-referred to as if it is a path in AO-Core, with its value being what to
-return to the caller. For example, 'os.exit' would be referred to as
-referred to as `os/exit`. If preferred, a list rather than a map may be
-provided, in which case the functions all return `sandboxed`.
-
-
-
-### sandboxed_failure_test/0 * ###
-
-`sandboxed_failure_test() -> any()`
-
-
-
-### simple_invocation_test/0 * ###
-
-`simple_invocation_test() -> any()`
-
-
-
-### snapshot/3 ###
-
-`snapshot(Base, Req, Opts) -> any()`
-
-Snapshot the Lua state from a live computation. Normalizes its `priv`
-state element, then serializes the state to a binary.
-
diff --git a/docs/source-code-docs/dev_patch.md b/docs/source-code-docs/dev_patch.md
deleted file mode 100644
index fc329d0a5..000000000
--- a/docs/source-code-docs/dev_patch.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-# Module dev_patch #
-* [Description](#description)
-
-A device that finds `PATCH` requests in the `results/outbox`
-of its message, and applies them to it.
-
-
-
-## Description ##
-This can be useful for processes
-whose computation would like to manipulate data outside of the `results` key
-of its message.
-
-## Function Index ##
-
-
-
+ Join AO-Core's peer-to-peer network and
+ build towards a decentralized future.
+
+
+
+
+
+
+
+
+ Bringing further capabilities to its network
+ of 7m+ smart contracts.
+
+
+
+
+ Core fundamentals supported by Arweave as
+ the permanent data ledger and AO as the
+ decentralized supercomputer.
+
+
+
+
+ Permissionless in combination with reduced
+ costs.
+
+
+
+
+ Permissionless in combination with reduced
+ costs.
+
+
+
+
+
+
+ {% else %}
+
+ {% block site_nav %}
+ {% if nav %}
+ {% if page.meta and page.meta.hide %}
+ {% set hidden = "hidden" if "navigation" in page.meta.hide %}
+ {% endif %}
+
+
+
+ {% include "partials/nav.html" %}
+
+
+
+ {% endif %}
+ {% if "toc.integrate" not in features %}
+ {% if page.meta and page.meta.hide %}
+ {% set hidden = "hidden" if "toc" in page.meta.hide %}
+ {% endif %}
+
+ {% endblock %}
+ {% include "partials/javascripts/content.html" %}
+
+ {% if "navigation.top" in features %}
+ {% include "partials/top.html" %}
+ {% endif %}
+ {% endif %}
+
+
+ {% block footer %}
+ {% include "partials/footer.html" %}
+ {% endblock %}
+
+
+
+
+ {% if "navigation.instant.progress" in features %}
+ {% include "partials/progress.html" %}
+ {% endif %}
+ {% if config.extra.consent %}
+
+
+
+
+ {% include "partials/javascripts/consent.html" %}
+ {% endif %}
+ {% block config %}
+ {% set _ = namespace() %}
+ {% set _.tags = config.extra.tags %}
+ {%- if config.extra.version -%}
+ {%- set mike = config.plugins.mike -%}
+ {%- if not mike or mike.config.version_selector -%}
+ {%- set _.version = config.extra.version -%}
+ {%- endif -%}
+ {%- endif -%}
+
+ {% endblock %}
+ {% block scripts %}
+
+ {% for script in config.extra_javascript %}
+ {{ script | script_tag }}
+ {% endfor %}
+ {% endblock %}
+
+
\ No newline at end of file
diff --git a/docs/theme/templates/partials/header.html b/docs/theme/templates/partials/header.html
new file mode 100644
index 000000000..80de1259e
--- /dev/null
+++ b/docs/theme/templates/partials/header.html
@@ -0,0 +1,56 @@
+{#- This file was automatically generated - do not edit -#} {% set class =
+"md-header" %} {% if "navigation.tabs.sticky" in features %} {% set class =
+class ~ " md-header--shadow md-header--lifted" %} {% elif "navigation.tabs" not
+in features %} {% set class = class ~ " md-header--shadow" %} {% endif %}
+
+
+
+ {% if "navigation.tabs.sticky" in features %} {% if "navigation.tabs" in
+ features %} {% include "partials/tabs.html" %} {% endif %} {% endif %}
+
diff --git a/mkdocs.yml b/mkdocs.yml
index 3fcc31900..02649fbca 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -1,149 +1,154 @@
-site_name: 'HyperBEAM Documentation'
+site_name: 'HyperBEAM - Documentation'
repo_url: https://github.com/permaweb/HyperBEAM
repo_name: 'permaweb/HyperBEAM'
-site_url: https://permaweb.github.io/
+site_url: http://[::]:8000/
docs_dir: docs
site_dir: mkdocs-site
+use_directory_urls: false
+
nav:
- - Home: 'index.md'
- - Getting Started:
- - Overview: 'getting-started/index.md'
- - System Requirements: 'getting-started/requirements.md'
- - Installation:
- - Overview: 'getting-started/installation/index.md'
- - System Dependencies: 'getting-started/installation/dependencies.md'
- - Erlang: 'getting-started/installation/erlang.md'
- - Rebar3: 'getting-started/installation/rebar3.md'
- - Node.js: 'getting-started/installation/nodejs.md'
- - Rust: 'getting-started/installation/rust.md'
- - HyperBEAM:
- - Overview: 'hyperbeam/index.md'
- - Setup: 'hyperbeam/setup.md'
- - Configuration:
- - Overview: 'hyperbeam/configuration/index.md'
- - Configuration Methods: 'hyperbeam/configuration/configuration-methods.md'
- - Configuration Options: 'hyperbeam/configuration/configuration-options.md'
- - Storage Configuration: 'hyperbeam/configuration/storage-configuration.md'
- - Routing Configuration: 'hyperbeam/configuration/routing-configuration.md'
- - Configuration Examples: 'hyperbeam/configuration/configuration-examples.md'
- - Testing: 'hyperbeam/testing.md'
- # - API Reference: 'hyperbeam/api.md'
- - Compute Unit:
- - Overview: 'compute-unit/index.md'
- - Setup: 'compute-unit/setup.md'
- - Configuration: 'compute-unit/configuration.md'
- - Trusted Execution (TEE):
- - Overview: 'tee/index.md'
- - Guides:
- - Overview: 'guides/index.md'
- - HB JavaScript Client: 'guides/js-client-guide.md'
- - Source Code:
- - Overview: 'source-code-docs/index.md'
- - Modules:
- - ar_bundles: 'source-code-docs/ar_bundles.md'
- - ar_deep_hash: 'source-code-docs/ar_deep_hash.md'
- - ar_rate_limiter: 'source-code-docs/ar_rate_limiter.md'
- - ar_timestamp: 'source-code-docs/ar_timestamp.md'
- - ar_tx: 'source-code-docs/ar_tx.md'
- - ar_wallet: 'source-code-docs/ar_wallet.md'
- - dev_cache: 'source-code-docs/dev_cache.md'
- - dev_cacheviz: 'source-code-docs/dev_cacheviz.md'
- - dev_codec_ans104: 'source-code-docs/dev_codec_ans104.md'
- - dev_codec_flat: 'source-code-docs/dev_codec_flat.md'
- - dev_codec_httpsig: 'source-code-docs/dev_codec_httpsig.md'
- - dev_codec_httpsig_conv: 'source-code-docs/dev_codec_httpsig_conv.md'
- - dev_codec_json: 'source-code-docs/dev_codec_json.md'
- - dev_codec_structured: 'source-code-docs/dev_codec_structured.md'
- - dev_cron: 'source-code-docs/dev_cron.md'
- - dev_cu: 'source-code-docs/dev_cu.md'
- - dev_dedup: 'source-code-docs/dev_dedup.md'
- - dev_delegated_compute: 'source-code-docs/dev_delegated_compute.md'
- - dev_faff: 'source-code-docs/dev_faff.md'
- - dev_genesis_wasm: 'source-code-docs/dev_genesis_wasm.md'
- - dev_green_zone: 'source-code-docs/dev_green_zone.md'
- - dev_hyperbuddy: 'source-code-docs/dev_hyperbuddy.md'
- - dev_json_iface: 'source-code-docs/dev_json_iface.md'
- - dev_lookup: 'source-code-docs/dev_lookup.md'
- - dev_lua: 'source-code-docs/dev_lua.md'
- - dev_manifest: 'source-code-docs/dev_manifest.md'
- - dev_message: 'source-code-docs/dev_message.md'
- - dev_meta: 'source-code-docs/dev_meta.md'
- - dev_monitor: 'source-code-docs/dev_monitor.md'
- - dev_multipass: 'source-code-docs/dev_multipass.md'
- - dev_p4: 'source-code-docs/dev_p4.md'
- - dev_patch: 'source-code-docs/dev_patch.md'
- - dev_poda: 'source-code-docs/dev_poda.md'
- - dev_process: 'source-code-docs/dev_process.md'
- - dev_process_cache: 'source-code-docs/dev_process_cache.md'
- - dev_process_worker: 'source-code-docs/dev_process_worker.md'
- - dev_push: 'source-code-docs/dev_push.md'
- - dev_relay: 'source-code-docs/dev_relay.md'
- - dev_router: 'source-code-docs/dev_router.md'
- - dev_scheduler: 'source-code-docs/dev_scheduler.md'
- - dev_scheduler_cache: 'source-code-docs/dev_scheduler_cache.md'
- - dev_scheduler_formats: 'source-code-docs/dev_scheduler_formats.md'
- - dev_scheduler_registry: 'source-code-docs/dev_scheduler_registry.md'
- - dev_scheduler_server: 'source-code-docs/dev_scheduler_server.md'
- - dev_simple_pay: 'source-code-docs/dev_simple_pay.md'
- - dev_snp: 'source-code-docs/dev_snp.md'
- - dev_snp_nif: 'source-code-docs/dev_snp_nif.md'
- - dev_stack: 'source-code-docs/dev_stack.md'
- - dev_test: 'source-code-docs/dev_test.md'
- - dev_wasi: 'source-code-docs/dev_wasi.md'
- - dev_wasm: 'source-code-docs/dev_wasm.md'
- - hb: 'source-code-docs/hb.md'
- - hb_ao: 'source-code-docs/hb_ao.md'
- - hb_ao_test_vectors: 'source-code-docs/hb_ao_test_vectors.md'
- - hb_app: 'source-code-docs/hb_app.md'
- - hb_beamr: 'source-code-docs/hb_beamr.md'
- - hb_beamr_io: 'source-code-docs/hb_beamr_io.md'
- - hb_cache: 'source-code-docs/hb_cache.md'
- - hb_cache_control: 'source-code-docs/hb_cache_control.md'
- - hb_cache_render: 'source-code-docs/hb_cache_render.md'
- - hb_client: 'source-code-docs/hb_client.md'
- - hb_crypto: 'source-code-docs/hb_crypto.md'
- - hb_debugger: 'source-code-docs/hb_debugger.md'
- - hb_escape: 'source-code-docs/hb_escape.md'
- - hb_event: 'source-code-docs/hb_event.md'
- - hb_examples: 'source-code-docs/hb_examples.md'
- - hb_features: 'source-code-docs/hb_features.md'
- - hb_gateway_client: 'source-code-docs/hb_gateway_client.md'
- - hb_http: 'source-code-docs/hb_http.md'
- - hb_http_benchmark_tests: 'source-code-docs/hb_http_benchmark_tests.md'
- - hb_http_client: 'source-code-docs/hb_http_client.md'
- - hb_http_client_sup: 'source-code-docs/hb_http_client_sup.md'
- - hb_http_server: 'source-code-docs/hb_http_server.md'
- - hb_json: 'source-code-docs/hb_json.md'
- - hb_logger: 'source-code-docs/hb_logger.md'
- - hb_message: 'source-code-docs/hb_message.md'
- - hb_metrics_collector: 'source-code-docs/hb_metrics_collector.md'
- - hb_name: 'source-code-docs/hb_name.md'
- - hb_opts: 'source-code-docs/hb_opts.md'
- - hb_path: 'source-code-docs/hb_path.md'
- - hb_persistent: 'source-code-docs/hb_persistent.md'
- - hb_private: 'source-code-docs/hb_private.md'
- - hb_process_monitor: 'source-code-docs/hb_process_monitor.md'
- - hb_router: 'source-code-docs/hb_router.md'
- - hb_singleton: 'source-code-docs/hb_singleton.md'
- - hb_store: 'source-code-docs/hb_store.md'
- - hb_store_fs: 'source-code-docs/hb_store_fs.md'
- - hb_store_gateway: 'source-code-docs/hb_store_gateway.md'
- - hb_store_remote_node: 'source-code-docs/hb_store_remote_node.md'
- - hb_store_rocksdb: 'source-code-docs/hb_store_rocksdb.md'
- - hb_structured_fields: 'source-code-docs/hb_structured_fields.md'
- - hb_sup: 'source-code-docs/hb_sup.md'
- - hb_test_utils: 'source-code-docs/hb_test_utils.md'
- - hb_util: 'source-code-docs/hb_util.md'
- - rsa_pss: 'source-code-docs/rsa_pss.md'
- - Reference:
- - Troubleshooting: 'reference/troubleshooting.md'
- - Glossary: 'reference/glossary.md'
- - FAQ: 'reference/faq.md'
- - Contribute:
- - Development Setup: 'contribute/setup.md'
- - Contribution Guidelines: 'contribute/guidelines.md'
+ - Introduction:
+ - What is AO-Core?: 'introduction/what-is-ao-core.md'
+ - What is HyperBEAM?: 'introduction/what-is-hyperbeam.md'
+ - AO Devices: 'introduction/ao-devices.md'
+ - Pathing in AO-Core: 'introduction/pathing-in-ao-core.md'
+ - Run a Node:
+ - Running a HyperBEAM node: 'run/running-a-hyperbeam-node.md'
+ - Configuring your machine: 'run/configuring-your-machine.md'
+ - TEE nodes: 'run/tee-nodes.md'
+ - Joining/running a router: 'run/joining-running-a-router.md'
+ - Build on HyperBEAM:
+ - Get started building on AO-Core: 'build/get-started-building-on-ao-core.md'
+ - Exposing process state: 'build/exposing-process-state.md'
+ - Serverless decentralized compute: 'build/serverless-decentralized-compute.md'
+ - Extending HyperBEAM: 'build/extending-hyperbeam.md'
+ - Devices:
+ - Overview: 'devices/overview.md'
+ - '~meta@1.0': 'devices/meta-at-1-0.md'
+ - '~process@1.0': 'devices/process-at-1-0.md'
+ - '~message@1.0': 'devices/message-at-1-0.md'
+ - '~wasm64@1.0': 'devices/wasm64-at-1-0.md'
+ - '~lua@5.3a': 'devices/lua-at-5-3a.md'
+ - '~json@1.0': 'devices/json-at-1-0.md'
+ - '~scheduler@1.0': 'devices/scheduler-at-1-0.md'
+ - '~relay@1.0': 'devices/relay-at-1-0.md'
+ - Resources:
+ # - Overview: 'resources/source-code/index.md'
+ - FAQ: 'resources/reference/faq.md'
+ - LLMs.txt: 'resources/llms.md'
+ - Glossary: 'resources/reference/glossary.md'
+ - Source Code Modules:
+ - Modules:
+ - ar_bundles: 'resources/source-code/ar_bundles.md'
+ - ar_deep_hash: 'resources/source-code/ar_deep_hash.md'
+ - ar_rate_limiter: 'resources/source-code/ar_rate_limiter.md'
+ - ar_timestamp: 'resources/source-code/ar_timestamp.md'
+ - ar_tx: 'resources/source-code/ar_tx.md'
+ - ar_wallet: 'resources/source-code/ar_wallet.md'
+ - dev_cache: 'resources/source-code/dev_cache.md'
+ - dev_cacheviz: 'resources/source-code/dev_cacheviz.md'
+ - dev_codec_ans104: 'resources/source-code/dev_codec_ans104.md'
+ - dev_codec_flat: 'resources/source-code/dev_codec_flat.md'
+ - dev_codec_httpsig_conv: 'resources/source-code/dev_codec_httpsig_conv.md'
+ - dev_codec_httpsig: 'resources/source-code/dev_codec_httpsig.md'
+ - dev_codec_json: 'resources/source-code/dev_codec_json.md'
+ - dev_codec_structured: 'resources/source-code/dev_codec_structured.md'
+ - dev_cron: 'resources/source-code/dev_cron.md'
+ - dev_cu: 'resources/source-code/dev_cu.md'
+ - dev_dedup: 'resources/source-code/dev_dedup.md'
+ - dev_delegated_compute: 'resources/source-code/dev_delegated_compute.md'
+ - dev_faff: 'resources/source-code/dev_faff.md'
+ - dev_genesis_wasm: 'resources/source-code/dev_genesis_wasm.md'
+ - dev_green_zone: 'resources/source-code/dev_green_zone.md'
+ - dev_hyperbuddy: 'resources/source-code/dev_hyperbuddy.md'
+ - dev_json_iface: 'resources/source-code/dev_json_iface.md'
+ - dev_local_name: 'resources/source-code/dev_local_name.md'
+ - dev_lookup: 'resources/source-code/dev_lookup.md'
+ - dev_lua_lib: 'resources/source-code/dev_lua_lib.md'
+ - dev_lua_test: 'resources/source-code/dev_lua_test.md'
+ - dev_lua: 'resources/source-code/dev_lua.md'
+ - dev_manifest: 'resources/source-code/dev_manifest.md'
+ - dev_message: 'resources/source-code/dev_message.md'
+ - dev_meta: 'resources/source-code/dev_meta.md'
+ - dev_monitor: 'resources/source-code/dev_monitor.md'
+ - dev_multipass: 'resources/source-code/dev_multipass.md'
+ - dev_name: 'resources/source-code/dev_name.md'
+ - dev_node_process: 'resources/source-code/dev_node_process.md'
+ - dev_p4: 'resources/source-code/dev_p4.md'
+ - dev_patch: 'resources/source-code/dev_patch.md'
+ - dev_poda: 'resources/source-code/dev_poda.md'
+ - dev_process_cache: 'resources/source-code/dev_process_cache.md'
+ - dev_process_worker: 'resources/source-code/dev_process_worker.md'
+ - dev_process: 'resources/source-code/dev_process.md'
+ - dev_push: 'resources/source-code/dev_push.md'
+ - dev_relay: 'resources/source-code/dev_relay.md'
+ - dev_router: 'resources/source-code/dev_router.md'
+ - dev_scheduler_cache: 'resources/source-code/dev_scheduler_cache.md'
+ - dev_scheduler_formats: 'resources/source-code/dev_scheduler_formats.md'
+ - dev_scheduler_registry: 'resources/source-code/dev_scheduler_registry.md'
+ - dev_scheduler_server: 'resources/source-code/dev_scheduler_server.md'
+ - dev_scheduler: 'resources/source-code/dev_scheduler.md'
+ - dev_simple_pay: 'resources/source-code/dev_simple_pay.md'
+ - dev_snp_nif: 'resources/source-code/dev_snp_nif.md'
+ - dev_snp: 'resources/source-code/dev_snp.md'
+ - dev_stack: 'resources/source-code/dev_stack.md'
+ - dev_test: 'resources/source-code/dev_test.md'
+ - dev_wasi: 'resources/source-code/dev_wasi.md'
+ - dev_wasm: 'resources/source-code/dev_wasm.md'
+ - hb_ao_test_vectors: 'resources/source-code/hb_ao_test_vectors.md'
+ - hb_ao: 'resources/source-code/hb_ao.md'
+ - hb_app: 'resources/source-code/hb_app.md'
+ - hb_beamr_io: 'resources/source-code/hb_beamr_io.md'
+ - hb_beamr: 'resources/source-code/hb_beamr.md'
+ - hb_cache_control: 'resources/source-code/hb_cache_control.md'
+ - hb_cache_render: 'resources/source-code/hb_cache_render.md'
+ - hb_cache: 'resources/source-code/hb_cache.md'
+ - hb_client: 'resources/source-code/hb_client.md'
+ - hb_crypto: 'resources/source-code/hb_crypto.md'
+ - hb_debugger: 'resources/source-code/hb_debugger.md'
+ - hb_escape: 'resources/source-code/hb_escape.md'
+ - hb_event: 'resources/source-code/hb_event.md'
+ - hb_examples: 'resources/source-code/hb_examples.md'
+ - hb_features: 'resources/source-code/hb_features.md'
+ - hb_gateway_client: 'resources/source-code/hb_gateway_client.md'
+ - hb_http_benchmark_tests: 'resources/source-code/hb_http_benchmark_tests.md'
+ - hb_http_client_sup: 'resources/source-code/hb_http_client_sup.md'
+ - hb_http_client: 'resources/source-code/hb_http_client.md'
+ - hb_http_server: 'resources/source-code/hb_http_server.md'
+ - hb_http: 'resources/source-code/hb_http.md'
+ - hb_json: 'resources/source-code/hb_json.md'
+ - hb_logger: 'resources/source-code/hb_logger.md'
+ - hb_message: 'resources/source-code/hb_message.md'
+ - hb_metrics_collector: 'resources/source-code/hb_metrics_collector.md'
+ - hb_name: 'resources/source-code/hb_name.md'
+ - hb_opts: 'resources/source-code/hb_opts.md'
+ - hb_path: 'resources/source-code/hb_path.md'
+ - hb_persistent: 'resources/source-code/hb_persistent.md'
+ - hb_private: 'resources/source-code/hb_private.md'
+ - hb_process_monitor: 'resources/source-code/hb_process_monitor.md'
+ - hb_router: 'resources/source-code/hb_router.md'
+ - hb_singleton: 'resources/source-code/hb_singleton.md'
+ - hb_store_fs: 'resources/source-code/hb_store_fs.md'
+ - hb_store_gateway: 'resources/source-code/hb_store_gateway.md'
+ - hb_store_remote_node: 'resources/source-code/hb_store_remote_node.md'
+ - hb_store_rocksdb: 'resources/source-code/hb_store_rocksdb.md'
+ - hb_store: 'resources/source-code/hb_store.md'
+ - hb_structured_fields: 'resources/source-code/hb_structured_fields.md'
+ - hb_sup: 'resources/source-code/hb_sup.md'
+ - hb_test_utils: 'resources/source-code/hb_test_utils.md'
+ - hb_tracer: 'resources/source-code/hb_tracer.md'
+ - hb_util: 'resources/source-code/hb_util.md'
+ - hb_volume: 'resources/source-code/hb_volume.md'
+ - hb: 'resources/source-code/hb.md'
+ - rsa_pss: 'resources/source-code/rsa_pss.md'
+ # - Troubleshooting: 'resources/reference/troubleshooting.md'
+
+ # - Community:
+ # - Contribute Overview: 'community/guidelines.md'
+ # - Development Setup: 'community/setup.md'
+ # - Contributing Documentation: 'community/contributing-docs.md'
markdown_extensions:
- attr_list
@@ -161,6 +166,7 @@ markdown_extensions:
pygments_lang_class: true
- pymdownx.inlinehilite
- pymdownx.snippets
+ - pymdownx.superfences
- pymdownx.tabbed:
alternate_style: true
- pymdownx.critic
@@ -168,23 +174,31 @@ markdown_extensions:
- def_list
- pymdownx.tasklist:
custom_checkbox: true
+ - toc:
+ toc_depth: 2
+ permalink: true
theme:
name: material
+ custom_dir: docs/theme/templates
language: en
- logo: assets/images/favicon.png
+ logo: https://arweave.net/e8SdCkAlqpMqvBSUuHu7sYpfZWoJsRKG7XuK0EXon_0
+ favicon: https://arweave.net/zMT0qotUQUmPUYhGcgLr80XhG7GRmYXeLWWGitok6Ao
icon:
repo: fontawesome/brands/github
features:
- - navigation.instant
+ # - navigation.instant
- navigation.instant.progress
+ - navigation.instant.prefetch
- navigation.tracking
- # - navigation.sections
+ - navigation.sections
- navigation.path
- - navigation.top
- # - navigation.expand
+ - navigation.expand
+ - navigation.tabs
+ - navigation.tabs.sticky
- navigation.indexes
- - toc.follow
+ - navigation.prune
+ - toc.integrate
- content.tooltips
- content.code.copy
- content.code.select
@@ -194,33 +208,45 @@ theme:
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
- primary: black
- accent: green
+ primary: white
+ accent: blue
toggle:
icon: material/brightness-7
name: Switch to dark mode
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
- scheme: slate
- primary: black
- accent: green
+ scheme: default
+ primary: white
+ accent: blue
toggle:
- icon: material/brightness-4
+ icon: material/brightness-7
name: Switch to light mode
font:
- text: Inter
- nav_style: dark
+ text: DM Sans
+ nav_style: default
highlightjs: true
+
plugins:
- search
+ - git-revision-date-localized
extra_css:
- - assets/css/style.css
- - assets/css/custom.css
+ - assets/style.css
+extra_javascript:
+ - js/utc-time.js
+ - js/custom-header.js
+ - js/parallax.js
+ - js/navigation.js
+ - js/toc-highlight.js
+ - js/disable-preload-transition.js
+ - js/header-scroll.js
+
extra:
social:
- icon: fontawesome/brands/github
- link: https://github.com/permaweb/HyperBEAM
+ link: https://github.com/permaweb/hyperBEAM
name: GitHub
+ generator: false
+
diff --git a/native/dev_snp_nif/src/digest.rs b/native/dev_snp_nif/src/digest.rs
index 6ab8e7d78..5adb3bcb9 100644
--- a/native/dev_snp_nif/src/digest.rs
+++ b/native/dev_snp_nif/src/digest.rs
@@ -106,7 +106,7 @@ pub fn compute_launch_digest<'a>(env: Env<'a>, input_map: Term<'a>) -> NifResult
// vcpu_type: CpuType::try_from(args.vcpu_type).unwrap(),
// vmm_type: Some(VMMType::try_from(args.vmm_type).unwrap()),
// guest_features: GuestFeatures(args.guest_features),
- vcpus: 1,
+ vcpus: 32,
vcpu_type: CpuType::EpycV4,
vmm_type: Some(VMMType::QEMU),
guest_features: GuestFeatures(0x1),
diff --git a/native/hb_keccak/hb_keccak.c b/native/hb_keccak/hb_keccak.c
new file mode 100644
index 000000000..f65e39b2f
--- /dev/null
+++ b/native/hb_keccak/hb_keccak.c
@@ -0,0 +1,174 @@
+/** libkeccak-tiny
+ *
+ * A single-file implementation of SHA-3 and SHAKE.
+ *
+ * Implementor: David Leon Gil
+ * License: CC0, attribution kindly requested. Blame taken too,
+ * but not liability.
+ */
+#include "include/hb_keccak.h"
+
+#include
+#include
+#include
+#include
+
+/******** The Keccak-f[1600] permutation ********/
+
+/*** Constants. ***/
+static const uint8_t rho[24] = \
+ { 1, 3, 6, 10, 15, 21,
+ 28, 36, 45, 55, 2, 14,
+ 27, 41, 56, 8, 25, 43,
+ 62, 18, 39, 61, 20, 44};
+static const uint8_t pi[24] = \
+ {10, 7, 11, 17, 18, 3,
+ 5, 16, 8, 21, 24, 4,
+ 15, 23, 19, 13, 12, 2,
+ 20, 14, 22, 9, 6, 1};
+static const uint64_t RC[24] = \
+ {1ULL, 0x8082ULL, 0x800000000000808aULL, 0x8000000080008000ULL,
+ 0x808bULL, 0x80000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL,
+ 0x8aULL, 0x88ULL, 0x80008009ULL, 0x8000000aULL,
+ 0x8000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL,
+ 0x8000000000008002ULL, 0x8000000000000080ULL, 0x800aULL, 0x800000008000000aULL,
+ 0x8000000080008081ULL, 0x8000000000008080ULL, 0x80000001ULL, 0x8000000080008008ULL};
+
+/*** Helper macros to unroll the permutation. ***/
+#define rol(x, s) (((x) << s) | ((x) >> (64 - s)))
+#define REPEAT6(e) e e e e e e
+#define REPEAT24(e) REPEAT6(e e e e)
+#define REPEAT5(e) e e e e e
+#define FOR5(v, s, e) \
+ v = 0; \
+ REPEAT5(e; v += s;)
+
+/*** Keccak-f[1600] ***/
+static inline void keccakf(void* state) {
+ uint64_t* a = (uint64_t*)state;
+ uint64_t b[5] = {0};
+ uint64_t t = 0;
+ uint8_t x, y;
+
+ for (int i = 0; i < 24; i++) {
+ // Theta
+ FOR5(x, 1,
+ b[x] = 0;
+ FOR5(y, 5,
+ b[x] ^= a[x + y]; ))
+ FOR5(x, 1,
+ FOR5(y, 5,
+ a[y + x] ^= b[(x + 4) % 5] ^ rol(b[(x + 1) % 5], 1); ))
+ // Rho and pi
+ t = a[1];
+ x = 0;
+ REPEAT24(b[0] = a[pi[x]];
+ a[pi[x]] = rol(t, rho[x]);
+ t = b[0];
+ x++; )
+ // Chi
+ FOR5(y,
+ 5,
+ FOR5(x, 1,
+ b[x] = a[y + x];)
+ FOR5(x, 1,
+ a[y + x] = b[x] ^ ((~b[(x + 1) % 5]) & b[(x + 2) % 5]); ))
+ // Iota
+ a[0] ^= RC[i];
+ }
+}
+
+/******** The FIPS202-defined functions. ********/
+
+/*** Some helper macros. ***/
+
+#define _(S) do { S } while (0)
+#define FOR(i, ST, L, S) \
+ _(for (size_t i = 0; i < L; i += ST) { S; })
+#define mkapply_ds(NAME, S) \
+ static inline void NAME(uint8_t* dst, \
+ const uint8_t* src, \
+ size_t len) { \
+ FOR(i, 1, len, S); \
+ }
+#define mkapply_sd(NAME, S) \
+ static inline void NAME(const uint8_t* src, \
+ uint8_t* dst, \
+ size_t len) { \
+ FOR(i, 1, len, S); \
+ }
+
+mkapply_ds(xorin, dst[i] ^= src[i]) // xorin
+mkapply_sd(setout, dst[i] = src[i]) // setout
+
+#define P keccakf
+#define Plen 200
+
+// Fold P*F over the full blocks of an input.
+#define foldP(I, L, F) \
+ while (L >= rate) { \
+ F(a, I, rate); \
+ P(a); \
+ I += rate; \
+ L -= rate; \
+ }
+
+/** The sponge-based hash construction. **/
+static inline int hash(uint8_t* out, size_t outlen,
+ const uint8_t* in, size_t inlen,
+ size_t rate, uint8_t delim) {
+ if ((out == NULL) || ((in == NULL) && inlen != 0) || (rate >= Plen)) {
+ return -1;
+ }
+ uint8_t a[Plen] = {0};
+ // Absorb input.
+ foldP(in, inlen, xorin);
+ // Xor in the DS and pad frame.
+ a[inlen] ^= delim;
+ a[rate - 1] ^= 0x80;
+ // Xor in the last block.
+ xorin(a, in, inlen);
+ // Apply P
+ P(a);
+ // Squeeze output.
+ foldP(out, outlen, setout);
+ setout(a, out, outlen);
+ memset(a, 0, 200);
+ return 0;
+}
+
+/*** Helper macros to define SHA3 and SHAKE instances. ***/
+#define defshake(bits) \
+ int shake##bits(uint8_t* out, size_t outlen, \
+ const uint8_t* in, size_t inlen) { \
+ return hash(out, outlen, in, inlen, 200 - (bits / 4), 0x1f); \
+ }
+
+
+#define defalgo(algoname, bits, pad) \
+ int algoname ## bits(uint8_t* out, size_t outlen, \
+ const uint8_t* in, size_t inlen) { \
+ if (outlen > (bits/8)) { \
+ return -1; \
+ } \
+ return hash(out, outlen, in, inlen, 200 - (bits / 4), pad); \
+ }
+
+#define defsha3(bits) defalgo(sha3_, bits, 0x06)
+#define defkeccak(bits) defalgo(keccak_, bits, 0x01)
+
+/*** FIPS202 SHAKE VOFs ***/
+defshake(128)
+defshake(256)
+
+/*** FIPS202 SHA3 FOFs ***/
+defsha3(224)
+defsha3(256)
+defsha3(384)
+defsha3(512)
+
+/*** ORIGINAL KECCAK SUBMISSION ***/
+defkeccak(224)
+defkeccak(256)
+defkeccak(384)
+defkeccak(512)
\ No newline at end of file
diff --git a/native/hb_keccak/hb_keccak_nif.c b/native/hb_keccak/hb_keccak_nif.c
new file mode 100644
index 000000000..4f6a26237
--- /dev/null
+++ b/native/hb_keccak/hb_keccak_nif.c
@@ -0,0 +1,40 @@
+#include "erl_nif.h"
+#include "include/hb_keccak.h"
+#include
+#include
+
+static ERL_NIF_TERM nif_sha3_256(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
+ ErlNifBinary input;
+ if (!enif_inspect_binary(env, argv[0], &input)) {
+ return enif_make_badarg(env);
+ }
+
+ uint8_t output[32];
+ sha3_256(output, 32, input.data, input.size); // this is the actual C implementation
+
+ ERL_NIF_TERM result;
+ uint8_t* bin = enif_make_new_binary(env, 32, &result);
+ memcpy(bin, output, 32);
+ return result;
+}
+
+static ERL_NIF_TERM nif_keccak_256(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {
+ ErlNifBinary input;
+ if (!enif_inspect_binary(env, argv[0], &input)) {
+ return enif_make_badarg(env);
+ }
+ uint8_t output[32];
+ keccak_256(output, 32, input.data, input.size);
+
+ ERL_NIF_TERM result;
+ uint8_t* bin = enif_make_new_binary(env, 32, &result);
+ memcpy(bin, output, 32);
+ return result;
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"sha3_256", 1, nif_sha3_256},
+ {"keccak_256", 1, nif_keccak_256}
+};
+
+ERL_NIF_INIT(hb_keccak, nif_funcs, NULL, NULL, NULL, NULL)
diff --git a/native/hb_keccak/include/hb_keccak.h b/native/hb_keccak/include/hb_keccak.h
new file mode 100644
index 000000000..96aa3143b
--- /dev/null
+++ b/native/hb_keccak/include/hb_keccak.h
@@ -0,0 +1,23 @@
+#ifndef KECCAK_FIPS202_H
+#define KECCAK_FIPS202_H
+#define __STDC_WANT_LIB_EXT1__ 1
+#include
+#include
+
+#define decshake(bits) \
+ int shake##bits(uint8_t*, size_t, const uint8_t*, size_t);
+
+#define decsha3(bits) \
+ int sha3_##bits(uint8_t*, size_t, const uint8_t*, size_t);
+
+#define deckeccak(bits) \
+ int keccak_##bits(uint8_t*, size_t, const uint8_t*, size_t);
+
+decshake(128)
+decshake(256)
+decsha3(224)
+decsha3(256)
+decsha3(384)
+decsha3(512)
+deckeccak(256)
+#endif
\ No newline at end of file
diff --git a/rebar.config b/rebar.config
index 5880e576f..6efdcf601 100644
--- a/rebar.config
+++ b/rebar.config
@@ -42,6 +42,9 @@
{overrides, []}.
{pre_hooks, [
+ {compile, "bash -c \"echo '-define(HB_BUILD_SOURCE, <<\\\"$(git rev-parse HEAD)\\\">>).\n' > ${REBAR_ROOT_DIR}/_build/hb_buildinfo.hrl\""},
+ {compile, "bash -c \"echo '-define(HB_BUILD_SOURCE_SHORT, <<\\\"$(git rev-parse --short HEAD)\\\">>).\n' >> ${REBAR_ROOT_DIR}/_build/hb_buildinfo.hrl\""},
+ {compile, "bash -c \"echo '-define(HB_BUILD_TIME, $(date +%s)).\n' >> ${REBAR_ROOT_DIR}/_build/hb_buildinfo.hrl\""},
{compile, "make -C \"${REBAR_ROOT_DIR}\" wamr"}
]}.
@@ -56,6 +59,7 @@
{"(linux|darwin|solaris)", clean, "rm -rf \"${REBAR_ROOT_DIR}/_build\" \"${REBAR_ROOT_DIR}/priv\""},
{"(linux|darwin|solaris)", compile, "echo 'Post-compile hooks executed'"},
{ compile, "rm -f native/hb_beamr/*.o native/hb_beamr/*.d"},
+ { compile, "rm -f native/hb_keccak/*.o native/hb_keccak/*.d"},
{ compile, "mkdir -p priv/html"},
{ compile, "cp -R src/html/* priv/html"}
]}.
@@ -78,7 +82,11 @@
"./native/hb_beamr/hb_driver.c",
"./native/hb_beamr/hb_helpers.c",
"./native/hb_beamr/hb_logging.c"
- ]}
+ ]},
+ {"./priv/hb_keccak.so", [
+ "./native/hb_keccak/hb_keccak.c",
+ "./native/hb_keccak/hb_keccak_nif.c"
+ ]}
]}.
{deps, [
@@ -102,7 +110,7 @@
{eunit_opts, [verbose]}.
{relx, [
- {release, {'hb', "0.0.1"}, [hb, b64fast, cowboy, gun, prometheus, prometheus_cowboy]},
+ {release, {'hb', "0.0.1"}, [hb, b64fast, cowboy, gun, luerl, prometheus, prometheus_cowboy]},
{include_erts, true},
{extended_start_script, true},
{overlay, [
@@ -123,12 +131,17 @@
[
{shell, "--sname hb --setcookie hb-debug --eval hb_debugger:start()."}
]
+ },
+ {'lua-test',
+ [
+ {eunit, "--module dev_lua_test"}
+ ]
}
]}.
{edoc_opts, [
{doclet, edown_doclet},
- {dir, "docs/source-code-docs"},
+ {dir, "docs/resources/source-code"},
{preprocess, true},
{preprocess, true},
{private, true},
diff --git a/scripts/dynamic-router.lua b/scripts/dynamic-router.lua
new file mode 100644
index 000000000..4e0ca921b
--- /dev/null
+++ b/scripts/dynamic-router.lua
@@ -0,0 +1,434 @@
+--- A dynamic route generator in an AO `~process@1.0'.
+--- This generator grants a routing table, found at `/now/routes', that is
+--- compatible with the `~router@1.0' interface. Subsequently, it can be
+--- used for routing by HyperBEAM nodes' via setting the `route-provider'
+--- node message key.
+---
+--- The configuration options are as follows:
+--- /is-admissible = A message to call with the registration request's body. Should
+--- return a boolean indicating whether the peer is admissible.
+--- /sampling-rate = The frequency at which random sampling of registered nodes
+--- should be performed, rather than scored routing. Default = 0.1.
+--- /pricing-weight = The level to which pricing should be preferred relative to
+--- performance in the scoring algorithm. Default = 1.
+--- /performance-weight = The level to which performance should be preferred
+--- relative to pricing in the scoring algorithm.
+--- Default = 1.
+--- /score-preference = The level to which the scoring algorithm influence routing
+--- decisions amongst scored route generations. Default = 1,
+--- yielding an exponential decay in preference for better
+--- performing nodes. Default = 1.
+--- /performance-period = Alters the rate at which performance scores are modified
+--- by new performance ratings. A lower period implies faster
+--- changes to the score.
+--- /recalculate-every = The number of messages to process between recalculating
+--- the routing table. Default = 1000.
+local function ensure_defaults(state)
+ state.routes = state.routes or {}
+ state["is-admissible"] =
+ state["is-admissible"] or {
+ path = "/default",
+ default = "true"
+ }
+ state["sampling-rate"] = state["sampling-rate"] or 0.1
+ state["pricing-weight"] = state["pricing-weight"] or 1
+ state["performance-weight"] = state["performance-weight"] or 1
+ state["score-preference"] = state["score-preference"] or 1
+ state["recalculate-every"] = state["recalculate-every"] or 1000
+ state["performance-period"] = state["performance-period"] or 1000
+ state["initial-performance"] = state["initial-performance"] or 30000
+ return state
+end
+
+-- Find the current route message for a template.
+local function current_route(routes, template, opts)
+ -- Find the existing route that matches the template, if it exists.
+ local status, res =
+ ao.resolve({
+ path = "/~router@1.0/match",
+ ["route-path"] = template, -- Only supports binary templates for now.
+ routes = routes
+ })
+ if status == "ok" then
+ -- We found an existing route for this template. Return it as-is.
+ return res
+ else
+ -- We haven't found a route for this template, so we need to create a new
+ -- one. We set the reference to the next available index in the routes
+ -- table.
+ return {
+ strategy = "By-Weight",
+ template = template,
+ nodes = {},
+ reference = "routes/" .. tostring(#routes + 1)
+ }
+ end
+end
+
+-- Compute the decay for a given score, modulated by the score preference.
+local function decay(state, score)
+ return math.exp(-state["score-preference"] * score)
+end
+
+-- Calculate statistics for a given key across all nodes in a route.
+local function calculate_stats(nodes, key)
+ local stats = {
+ count = 0,
+ total = 0,
+ max = 0,
+ mean = 0,
+ values = {}
+ }
+
+ for _, n in ipairs(nodes) do
+ stats.count = stats.count + 1
+ stats.total = stats.total + n[key]
+ if n[key] > stats.max then
+ stats.max = n[key]
+ end
+ if stats.min == nil or n[key] < stats.min then
+ stats.min = n[key]
+ end
+ table.insert(stats.values, n[key])
+ end
+
+ stats.mean = stats.total / stats.count
+
+ -- Add a function that returns the percentile of a node for the given key.
+ table.sort(stats.values)
+ stats.percentile =
+ function(n)
+ local n_key = n[key]
+ for ix, v in ipairs(stats.values) do
+ if n_key <= v then
+ return (ix-1) / stats.count
+ end
+ end
+ end
+
+ return stats
+end
+
+-- Compute the scores for all routes. Outputs a single weight value per node,
+-- where a higher value indicates that the node should be picked more frequently.
+-- Each of the 'scoring' factors, in their natural state, are worse if they are
+-- higher. Higher price and slower response times are negative factors for nodes.
+-- This function rectifies that, and scores each node relative to the performance
+-- of each of their peers.
+local function recalculate_scores(state, route, opts)
+ -- TODO: Refactor such that this does not have `O(:facepalm:)` properties...
+
+ -- Calculate stats for each relevant performance characteristic.
+ local perf_stats = calculate_stats(route.nodes, "performance")
+ local price_stats = calculate_stats(route.nodes, "price")
+
+ -- Calculate the multipliers for performance and price from their weights.
+ local total_weight = state["performance-weight"] + state["pricing-weight"]
+ local perf_weight = state["performance-weight"] / total_weight
+ local pricing_weight = state["pricing-weight"] / total_weight
+
+ -- Calculate the score per node.
+ for ix, node in ipairs(route.nodes) do
+ -- The performance score for the node on the route should be scaled by
+ -- moderated by the sampling rate. The sampling rate is used to ensure
+ -- that new/improving nodes (and improving nodes) are given a chance to
+ -- be selected.
+ local perf_percentile = perf_stats.percentile(node)
+ local perf_score =
+ (decay(state, perf_percentile) * (1 - state["sampling-rate"]))
+ + state["sampling-rate"]
+ -- The price score for the node on the route should be scaled by the
+ -- pricing weight. It is not moderated by the sampling rate, as we want
+ -- to ensure that the node is selected if it has a low price. New nodes
+ -- can improve their likelihood of being selected by lowering their price.
+ local price_percentile = price_stats.percentile(node)
+ local price_score = decay(state, price_percentile)
+
+ -- Calculate the final weight. In order to do this we:
+ -- 1. Apply the factor weights to the calculated scores.
+ -- 2. Sum them.
+ node.weight =
+ ((perf_score * perf_weight) + (price_score * pricing_weight))
+
+ ao.event("debug_scores",
+ {
+ "calculated_score", {
+ node = ix,
+ prefix = node.prefix,
+ perf = node.performance,
+ perf_percentile = perf_percentile,
+ perf_weight = perf_weight,
+ perf_score = perf_score,
+ price = node.price,
+ price_percentile = price_percentile,
+ pricing_weight = pricing_weight,
+ price_score = price_score,
+ result = node.weight
+ }
+ }
+ )
+ end
+
+ return route
+end
+
+local function add_node(state, req, opts)
+ local route = current_route(state.routes, req.route.template, opts)
+ local reference = route.reference .. "/nodes/" .. tostring(#route.nodes + 1)
+ table.insert(route.nodes, {
+ prefix = req.route.prefix,
+ price = req.route.price,
+ topup = req.route.topup,
+ performance = state["initial-performance"],
+ reference = reference,
+ opts = { http_reference = reference }
+ })
+
+ local new_state = ao.set(state, route.reference, route)
+ return new_state
+end
+
+-- Compute the new routes, with their weights, based on the current routes and
+-- a new route.
+function recalculate(state, _, opts)
+ state = ensure_defaults(state)
+
+ for _, r in ipairs(state.routes) do
+ r = recalculate_scores(state, r, opts)
+ end
+
+ return "ok", state
+end
+
+-- Register a new host to a route.
+function register(state, assignment, opts)
+ state = ensure_defaults(state)
+ local req = assignment.body
+ req.path = state["is-admissible"].path or "is-admissible"
+ local status, is_admissible = ao.resolve(state["is-admissible"], req)
+
+ ao.event("is-admissible result:", { status, is_admissible })
+ if status == "ok" and is_admissible == "true" then
+ state = add_node(state, req)
+ return recalculate(state, assignment, opts)
+ else
+ -- If the registration is untrusted signal the issue via and event and
+ -- return the state unmodified
+ ao.event("error", { "untrusted peer requested", req})
+ return "ok", state
+ end
+end
+
+-- Update the performance of a host by its reference.
+function duration(state, assignment, opts)
+ state = ensure_defaults(state)
+
+ local req = assignment.body
+ local reference = req.reference
+ if reference == nil then
+ ao.event("debug_dynrouter",
+ {
+ "ignoring duration update for request without reference: ",
+ req["request-path"]
+ }
+ )
+ return state
+ end
+ ao.event("debug_dynrouter", {"applying_duration", req.reference})
+ reference = reference .. "/performance"
+ local duration = req.duration
+ local change_factor = 1 / state["performance-period"]
+
+ -- Get the performance of the route at `reference'
+ local status, performance = ao.resolve(state, reference)
+
+ -- Modify the node's existing performance score, weighted by the change
+ -- factor, to give more weight to the existing performance score. Each node
+ -- is given a poor performance score (30000ms) to start, then will slowly
+ -- improve its performance score over time.
+ performance =
+ (performance * (1 - change_factor)) + (duration * change_factor)
+
+ ao.event("debug_perf",
+ {"Received performance", {
+ reference = reference,
+ performance = performance,
+ update_duration = duration,
+ change_factor = change_factor,
+ }
+ })
+
+ state = ao.set(state, reference, performance)
+
+ ao.event("debug_router",
+ {
+ "State after performance set",
+ { state = state, performance = performance }
+ }
+ )
+ return "ok", state
+end
+
+function compute(state, assignment, opts)
+ if assignment.body.path == "register" then
+ return register(state, assignment, opts)
+ elseif assignment.body.path == "recalculate" then
+ return recalculate(state, assignment, opts)
+ elseif assignment.body.path == "performance" then
+ return duration(state, assignment, opts)
+ else
+ -- If we have been called without a relevant path, simply ensure that
+ -- the state is initialized and return it.
+ state = ensure_defaults(state)
+ return "ok", state
+ end
+end
+
+--- Tests
+function register_test()
+ local state = {}
+ -- Simulate a register call upon a default state.
+ local req = {
+ path = "register",
+ route = {
+ prefix = "host1",
+ price = 5,
+ template = "/test-key"
+ }
+ }
+ _, state = register(state, { body = req }, {})
+
+ -- We must now have exactly one route in state.routes.
+ if #state.routes ~= 1 then
+ error("Expected 1 route after register, got "..tostring(#state.routes))
+ end
+
+ -- Verify the node, price and default performance.
+ local r = state.routes[1]
+ ao.event("debug_router", { "route:", r })
+ if r.nodes[1].prefix ~= "host1" then
+ error("Expected node='host1', got "..tostring(r.nodes[1].node))
+ end
+ if r.nodes[1].price ~= 5 then
+ error("Expected price=0.5, got "..tostring(r.nodes[1].price))
+ end
+ if r.nodes[1].performance ~= state["initial-performance"] then
+ error("Expected performance=" ..
+ tostring(state["initial-performance"]) ..
+ ", got " .. tostring(r.nodes[1].performance)
+ )
+ end
+
+ -- Register another provider on the route.
+ req = {
+ path = "register",
+ route = {
+ prefix = "host2",
+ price = 10,
+ template = "/test-key"
+ }
+ }
+ _, state = register(state, { body = req }, {})
+
+ ao.event("debug_router", {"state after second registration", state})
+
+ if #state.routes[1].nodes ~= 2 then
+ error("Expected 2 nodes after second registration, got "
+ .. tostring(#state.routes[1].nodes))
+ end
+
+ return "ok"
+ end
+
+ -- Test 2: performance updates and weight recalculation
+function performance_test()
+ -- Create a new state with a fast performance-period, giving rapid changes
+ -- to the performance score of nodes.
+ local state = {
+ ["performance-period"] = 6
+ }
+
+ -- Add a node to a new route on the state
+ local register_req = {
+ path = "register",
+ route = {
+ prefix = "host1",
+ price = 5,
+ template = "/test-key"
+ }
+ }
+ _, state = register(state, { body = register_req }, {})
+
+ -- Modify the request and add another node.
+ register_req.route.prefix = "host2"
+ _, state = register(state, { body = register_req }, {})
+
+ -- Get the references for the nodes on the route and validate it.
+ local node1_ref = state.routes[1].nodes[1].reference
+ local node2_ref = state.routes[1].nodes[2].reference
+
+ if node1_ref ~= "routes/1/nodes/1" then
+ error("Invalid reference. Received: " .. node1_ref)
+ end
+ if node2_ref ~= "routes/1/nodes/2" then
+ error("Invalid reference. Received: " .. node2_ref)
+ end
+
+ -- Record the starting scores for the nodes
+ local t0_node1_score = state.routes[1].nodes[1].weight
+ local t0_node2_score = state.routes[1].nodes[1].weight
+
+ if t0_node1_score ~= t0_node2_score then
+ error("Initial node scores should be equal. Received: "
+ .. tostring(t0_node1_score) .. " and " .. tostring(t0_node2_score))
+ end
+
+ -- Post 2 performance updates for the first node, improving its performance.
+ local perf_req = {
+ path = "duration",
+ host = "host1",
+ reference = node1_ref,
+ duration = 200
+ }
+ _, state = duration(state, { body = perf_req }, {})
+ _, state = duration(state, { body = perf_req }, {})
+ -- Post a performance update for the second node, with very poor performance
+ perf_req.reference = node2_ref
+ perf_req.duration = 55500
+ ao.event("debug_router", {"perf_req node 2", perf_req})
+ _, state = duration(state, { body = perf_req }, {})
+
+ ao.event("debug_router",
+ {"state after performance updates", {
+ state = state
+ }}
+ )
+
+ -- now trigger a recalc
+ _, state = recalculate(state, { body = { path = "recalculate" } }, {})
+
+ ao.event("debug_router",
+ {"Nodes after recalculation", state.routes[1].nodes}
+ )
+
+ -- Record the starting scores for the nodes
+ local t1_node1_score = state.routes[1].nodes[1].weight
+ local t1_node2_score = state.routes[1].nodes[2].weight
+
+ ao.event("debug_router_scores", {
+ t0_n1 = t0_node1_score,
+ t1_n1 = t1_node1_score,
+ t0_n2 = t0_node2_score,
+ t1_n2 = t1_node2_score
+ })
+
+ if t1_node1_score ~= t0_node1_score then
+ error("Node 1 sets the benchmark: It's score should stay the same.")
+ end
+
+ if t1_node2_score >= t0_node2_score then
+ error("Node 2 score should have decreased!")
+ end
+
+ return "ok"
+end
\ No newline at end of file
diff --git a/scripts/meta-test.lua b/scripts/meta-test.lua
new file mode 100644
index 000000000..6c763177c
--- /dev/null
+++ b/scripts/meta-test.lua
@@ -0,0 +1,30 @@
+--- A module that tests the `dev_lua_test' EUnit wrapper integration.
+
+-- Return a simple result to the calling test suite.
+function basic_test()
+ return "ok"
+end
+
+-- Return a message to the calling test suite.
+function return_message_test()
+ return "ok", { key1 = "Value1", key2 = "Value2" }
+end
+
+-- This function returns an `{error, _}` tuple which -- if it were to be picked
+-- up as a test by `dev_lua_tests' -- would cause the test suite to fail.
+-- Subsequently, by virtue of the code not being executed, we gain confidence
+-- that the test suite generator is differentiating between relevant and
+-- irrelevant functions correctly. Its a curious mechanism, but it is useful.
+function meta_test_ignored()
+ return "error", "This should not be picked up as a test!"
+end
+
+-- Test that the test environment granted by the generator allows us to execute
+-- calls with the `ao.resolve' function, outside of the sandbox. Currently,
+-- `dev_lua_test' does not support sandboxing.
+function sandbox_test()
+ -- Simply return an AO call to the test suite. If the router device is not
+ -- available, this will cause the test to fail.
+ local status, res = ao.resolve({ path = "/~router@1.0/routes/1/template" })
+ return status, res
+end
\ No newline at end of file
diff --git a/scripts/p4-payment-client.lua b/scripts/p4-payment-client.lua
new file mode 100644
index 000000000..20a1921ae
--- /dev/null
+++ b/scripts/p4-payment-client.lua
@@ -0,0 +1,84 @@
+--- A simple script that can be used as a `~p4@1.0` ledger device, marshalling
+--- requests to a local process.
+
+-- Find the user's balance in the current ledger state.
+function balance(base, request)
+ local status, res = ao.resolve({
+ path =
+ base["ledger-path"]
+ .. "/now/balance/"
+ .. request["target"]
+ })
+ ao.event({ "client received balance response",
+ { status = status, res = res, target = request["target"] } }
+ )
+ -- If the balance request fails (most likely because the user has no balance),
+ -- return a balance of 0.
+ if status ~= "ok" then
+ return "ok", 0
+ end
+
+ -- We have successfully retrieved the balance, so return it.
+ return "ok", res
+end
+
+-- Debit the user's balance in the current ledger state.
+function debit(base, request)
+ ao.event({ "client starting debit", { request = request, base = base } })
+ local status, res = ao.resolve({
+ path = "(" .. base["ledger-path"] .. ")/schedule",
+ method = "POST",
+ body = request
+ })
+ ao.event({ "client received schedule response", { status = status, res = res } })
+ status, res = ao.resolve({
+ path = base["ledger-path"] .. "/compute/balance/" .. request["account"],
+ slot = res.slot
+ })
+ ao.event({ "confirmed balance", { status = status, res = res } })
+ return "ok"
+end
+
+--- Poll an external ledger for credit events. If new credit noticess have been
+--- sent by the external ledger, push them to the local ledger.
+function poll(base, req)
+ local status, local_last_credit = ao.resolve({
+ path = base["ledger-path"] .. "/now/last-credit"
+ })
+ if status ~= "ok" then
+ ao.event(
+ { "error getting local last credit",
+ { status = status, res = local_last_credit } }
+ )
+ return "error", base
+ end
+
+ local status, external_last_credit = ao.resolve({
+ path = base["external-ledger"] .. "/now/last-credit"
+ })
+ if status ~= "ok" then
+ ao.event({ "error getting external last credit",
+ { status = status, res = external_last_credit } })
+ return "error", base
+ end
+
+ ao.event({ "Retreived sync data. Last credit info:",
+ {
+ local_last_credit = local_last_credit,
+ external_last_credit = external_last_credit }
+ }
+ )
+ while local_last_credit < external_last_credit do
+ status, res = ao.resolve({
+ path = base["external-ledger"] .. "/push",
+ slot = local_last_credit + 1
+ })
+ if status ~= "ok" then
+ ao.event({ "error pushing slot", { status = status, res = res } })
+ return "error", base
+ end
+ local_last_credit = local_last_credit + 1
+ end
+
+ return "ok", base
+end
\ No newline at end of file
diff --git a/scripts/p4-payment-process.lua b/scripts/p4-payment-process.lua
new file mode 100644
index 000000000..7bb60c2a2
--- /dev/null
+++ b/scripts/p4-payment-process.lua
@@ -0,0 +1,97 @@
+--- A ledger that allows account balances to be debited and credited by a
+--- specified address.
+
+-- Check if the request is a valid debit/credit request by checking if one of
+-- the committers is the operator.
+local function is_valid_request(base, assignment)
+ -- First, validate that the assignment is signed by the scheduler.
+ local scheduler = base.scheduler
+ local status, res = ao.resolve(assignment, "committers")
+ ao.event({
+ "assignment committers resp:",
+ { status = status, res = res, scheduler = scheduler }
+ })
+
+ if status ~= "ok" then
+ return false
+ end
+
+ local valid = false
+ for _, committer in ipairs(res) do
+ if committer == scheduler then
+ valid = true
+ end
+ end
+
+ if not valid then
+ return false
+ end
+
+ -- Next, validate that the request is signed by the operator.
+ local operator = base.operator
+ status, res = ao.resolve(assignment.body, "committers")
+ ao.event({
+ "request committers resp:",
+ { status = status, res = res, operator = operator }
+ })
+
+ if status ~= "ok" then
+ return false
+ end
+
+ for _, committer in ipairs(res) do
+ if committer == operator then
+ return true
+ end
+ end
+
+ return false
+end
+
+-- Debit the specified account by the given amount.
+function debit(base, assignment)
+ ao.event({ "process debit starting", { assignment = assignment } })
+ if not is_valid_request(base, assignment) then
+ base.result = { status = "error", error = "Operator signature required." }
+ ao.event({ "debit error", base.result })
+ return "ok", base
+ end
+ ao.event({ "process debit valid", { assignment = assignment } })
+ base.balance = base.balance or {}
+ base.balance[assignment.body.account] =
+ (base.balance[assignment.body.account] or 0) - assignment.body.quantity
+
+ ao.event({ "process debit success", { balances = base.balance } })
+ return "ok", base
+end
+
+-- Credit the specified account by the given amount.
+_G["credit-notice"] = function (base, assignment)
+ ao.event({ "credit-notice", { assignment = assignment }, { balances = base.balance } })
+ if not is_valid_request(base, assignment) then
+ base.result = { status = "error", error = "Operator signature required." }
+ return "ok", base
+ end
+ ao.event({ "is valid", { req = assignment.body } })
+ base.balance = base.balance or {}
+ base.balance[assignment.body.recipient] =
+ (base.balance[assignment.body.recipient] or 0) + assignment.body.quantity
+ ao.event({ "credit", { ["new balances"] = base.balance } })
+ return "ok", base
+end
+
+--- Index function, called by the `~process@1.0` device for scheduled messages.
+--- We route each to the appropriate function based on the request path.
+function compute(base, assignment, opts)
+ ao.event({ "compute", { assignment = assignment }, { balances = base.balance } })
+ if assignment.body.path == "debit" then
+ return debit(base, assignment.body)
+ elseif assignment.body.path == "credit-notice" then
+ return _G["credit-notice"](base, assignment.body)
+ elseif assignment.body.path == "balance" then
+ return balance(base, assignment.body)
+ elseif assignment.slot == 0 then
+ base.balance = base.balance or {}
+ return "ok", base
+ end
+end
diff --git a/src/ar_wallet.erl b/src/ar_wallet.erl
index 6e87405eb..bf4304bec 100644
--- a/src/ar_wallet.erl
+++ b/src/ar_wallet.erl
@@ -57,14 +57,16 @@ verify({{rsa, PublicExpnt}, Pub}, Data, Sig, DigestType) when PublicExpnt =:= 65
%% @doc Generate an address from a public key.
to_address(Pubkey) ->
to_address(Pubkey, ?DEFAULT_KEY_TYPE).
-to_address(PubKey, {rsa, 65537}) when bit_size(PubKey) == 256 ->
+to_address(PubKey, _) when bit_size(PubKey) == 256 ->
%% Small keys are not secure, nobody is using them, the clause
%% is for backwards-compatibility.
PubKey;
-to_address({{_, _, PubKey}, {_, PubKey}}, {rsa, 65537}) ->
+to_address({{_, _, PubKey}, {_, PubKey}}, _) ->
to_address(PubKey);
to_address(PubKey, {rsa, 65537}) ->
- to_rsa_address(PubKey).
+ to_rsa_address(PubKey);
+to_address(PubKey, {ecdsa, 256}) ->
+ to_ecdsa_address(PubKey).
%% @doc Generate a new wallet public and private key, with a corresponding keyfile.
%% The provided key is used as part of the file name.
@@ -194,6 +196,9 @@ to_rsa_address(PubKey) ->
hash_address(PubKey) ->
crypto:hash(sha256, PubKey).
+to_ecdsa_address(PubKey) ->
+ hb_keccak:key_to_ethereum_address(PubKey).
+
%%%===================================================================
%%% Private functions.
%%%===================================================================
diff --git a/src/dev_cache.erl b/src/dev_cache.erl
index 771e4526e..6bae07eff 100644
--- a/src/dev_cache.erl
+++ b/src/dev_cache.erl
@@ -24,6 +24,7 @@
read(_M1, M2, Opts) ->
Location = hb_ao:get(<<"target">>, M2, Opts),
?event({read, {key_extracted, Location}}),
+ ?event(debug_gateway, cache_read),
case hb_cache:read(Location, Opts) of
{ok, Res} ->
?event({read, {cache_result, ok, Res}}),
@@ -48,7 +49,8 @@ read(_M1, M2, Opts) ->
not_found ->
% The cache does not have this ID,but it may still be an explicit
% `data/' path.
- Store = hb_opts:get(store, [], Opts),
+ % Store = hb_opts:get(store, [], Opts),
+ Store = maps:get(store, Opts),
?event(dev_cache, {read, {location, Location}, {store, Store}}),
hb_store:read(Store, Location)
end.
diff --git a/src/dev_codec_ans104.erl b/src/dev_codec_ans104.erl
index 76829ea76..3b694daf9 100644
--- a/src/dev_codec_ans104.erl
+++ b/src/dev_codec_ans104.erl
@@ -6,11 +6,11 @@
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
-%% The size at which a value should be made into a body item, instead of a
-%% tag.
+%%% The size at which a value should be made into a body item, instead of a
+%%% tag.
-define(MAX_TAG_VAL, 128).
-%% The list of TX fields that users can set directly. Data is excluded because
-%% it may be set by the codec in order to support nested messages.
+%%% The list of TX fields that users can set directly. Data is excluded because
+%%% it may be set by the codec in order to support nested messages.
-define(TX_KEYS,
[
<<"id">>,
@@ -20,11 +20,25 @@
<<"signature">>
]
).
-%% The list of tags that a user is explicitly committing to when they sign an
-%% ANS-104 message.
+%%% The list of keys that should be forced into the tag list, rather than being
+%%% encoded as fields in the TX record.
+-define(FORCED_TAG_FIELDS,
+ [
+ <<"quantity">>,
+ <<"manifest">>,
+ <<"data_size">>,
+ <<"data_tree">>,
+ <<"data_root">>,
+ <<"reward">>,
+ <<"denomination">>,
+ <<"signature_type">>
+ ]
+).
+%%% The list of tags that a user is explicitly committing to when they sign an
+%%% ANS-104 message.
-define(COMMITTED_TAGS, ?TX_KEYS ++ [<<"data">>]).
-%% List of tags that should be removed during `to'. These relate to the nested
-%% ar_bundles format that is used by the `ans104@1.0' codec.
+%%% List of tags that should be removed during `to'. These relate to the nested
+%%% ar_bundles format that is used by the `ans104@1.0' codec.
-define(FILTERED_TAGS,
[
<<"bundle-format">>,
@@ -274,7 +288,7 @@ do_from(RawTX) ->
}
end;
_ ->
- Address = hb_util:human_id(ar_wallet:to_address(TX#tx.owner)),
+ Address = hb_util:human_id(ar_wallet:to_address(TX#tx.owner, TX#tx.signature_type)),
WithoutBaseCommitment =
maps:without(
[
@@ -454,10 +468,12 @@ to(RawTABM) when is_map(RawTABM) ->
end,
M
),
- NormalizedMsgKeyMap = hb_ao:normalize_keys(MsgKeyMap),
+ MsgKeyMap2 = hb_ao:normalize_keys(MsgKeyMap),
% Iterate through the default fields, replacing them with the values from
% the message map if they are present.
- {RemainingMap, BaseTXList} =
+ ForcedTagFields = maps:with(?FORCED_TAG_FIELDS, MsgKeyMap2),
+ NormalizedMsgKeyMap = maps:without(?FORCED_TAG_FIELDS, MsgKeyMap2),
+ {RemainingMapWithoutForcedTags, BaseTXList} =
lists:foldl(
fun({Field, Default}, {RemMap, Acc}) ->
NormKey = hb_ao:normalize_key(Field),
@@ -483,6 +499,7 @@ to(RawTABM) when is_map(RawTABM) ->
{NormalizedMsgKeyMap, []},
hb_message:default_tx_list()
),
+ RemainingMap = maps:merge(RemainingMapWithoutForcedTags, ForcedTagFields),
% Rebuild the tx record from the new list of fields and values.
TXWithoutTags = list_to_tuple([tx | lists:reverse(BaseTXList)]),
% Calculate which set of the remaining keys will be used as tags.
@@ -677,4 +694,33 @@ only_committed_maintains_target_test() ->
?event({only_committed, OnlyCommitted}),
Encoded = hb_message:convert(OnlyCommitted, <<"ans104@1.0">>, <<"structured@1.0">>, #{}),
?event({encoded, Encoded}),
- ?assertEqual(TX, Encoded).
\ No newline at end of file
+ ?assertEqual(TX, Encoded).
+
+quantity_field_is_ignored_in_from_test() ->
+ % Ensure that converting from a signed TX with a quantity field results
+ % in a message _without_ a quantity field.
+ TX =
+ ar_bundles:sign_item(
+ #tx {
+ tags = [
+ {<<"test-key">>, <<"value">>}
+ ],
+ quantity = 100
+ },
+ ar_wallet:new()
+ ),
+ ?event({tx, TX}),
+ EncodedMsg = from(TX),
+ ?assertEqual(not_found, hb_ao:get(<<"quantity">>, EncodedMsg, #{})).
+
+quantity_key_encoded_as_tag_test() ->
+ % Ensure that the reciprocal behavior works: converting a message with
+ % a quantity key should yield a tag, rather than a quantity field.
+ Msg = #{ <<"quantity">> => <<"100">> },
+ EncodedTX = to(Msg),
+ ?event({msg, Msg}),
+ ?assertEqual(0, EncodedTX#tx.quantity),
+ % Ensure that converting back to a message yields the original.
+ DecodedMsg2 = from(EncodedTX),
+ ?event({decoded_msg2, DecodedMsg2}),
+ ?assert(hb_message:match(Msg, DecodedMsg2) == true).
diff --git a/src/dev_codec_flat.erl b/src/dev_codec_flat.erl
index be0c43393..a63a66153 100644
--- a/src/dev_codec_flat.erl
+++ b/src/dev_codec_flat.erl
@@ -14,11 +14,12 @@ verify(Msg, Req, Opts) -> dev_codec_httpsig:verify(Msg, Req, Opts).
committed(Msg, Req, Opts) -> dev_codec_httpsig:committed(Msg, Req, Opts).
%% @doc Convert a flat map to a TABM.
-from(Bin) when is_binary(Bin) -> Bin;
+from(Bin) when is_binary(Bin) ->
+ hb_util:ok(deserialize(Bin));
from(Map) when is_map(Map) ->
maps:fold(
fun(Path, Value, Acc) ->
- inject_at_path(hb_path:term_to_path_parts(Path), from(Value), Acc)
+ inject_at_path(hb_path:term_to_path_parts(Path), Value, Acc)
end,
#{},
Map
@@ -153,9 +154,10 @@ path_list_test() ->
).
binary_passthrough_test() ->
- Bin = <<"raw binary">>,
- ?assertEqual(Bin, dev_codec_flat:from(Bin)),
- ?assertEqual(Bin, dev_codec_flat:to(Bin)).
+ % Note: Modified for changes to the `from/1' function.
+ Bin = <<"raw: binary">>,
+ ?assertEqual(#{<<"raw">> => <<"binary">>}, dev_codec_flat:from(Bin)),
+ ?assertEqual(Bin, dev_codec_flat:to(Bin)).
deep_nesting_test() ->
Flat = #{<<"a/b/c/d">> => <<"deep">>},
diff --git a/src/dev_cron.erl b/src/dev_cron.erl
index 4623d2195..fab70dc68 100644
--- a/src/dev_cron.erl
+++ b/src/dev_cron.erl
@@ -180,13 +180,13 @@ parse_time(BinString) ->
stop_once_test() ->
% Start a new node
Node = hb_http_server:start_node(),
- % Set up a standard test worker (even though long_task doesn't use its state)
+ % Set up a standard test worker (even though delay doesn't use its state)
TestWorkerPid = spawn(fun test_worker/0),
TestWorkerNameId = hb_util:human_id(crypto:strong_rand_bytes(32)),
hb_name:register({<<"test">>, TestWorkerNameId}, TestWorkerPid),
- % Create a "once" task targeting the long_task function
+ % Create a "once" task targeting the delay function
OnceUrlPath = <<"/~cron@1.0/once?test-id=", TestWorkerNameId/binary,
- "&cron-path=/~test-device@1.0/long_task">>,
+ "&cron-path=/~test-device@1.0/delay">>,
{ok, OnceTaskID} = hb_http:get(Node, OnceUrlPath, #{}),
?event({'cron:stop_once:test:created', {task_id, OnceTaskID}}),
% Give a short delay to ensure the task has started and called handle,
diff --git a/src/dev_delegated_compute.erl b/src/dev_delegated_compute.erl
index a93fde7c5..3af06da0b 100644
--- a/src/dev_delegated_compute.erl
+++ b/src/dev_delegated_compute.erl
@@ -79,7 +79,7 @@ do_compute(ProcID, Msg2, Opts) ->
<<"relay-path">> =>
<<
"/result/",
- (integer_to_binary(Slot))/binary,
+ (hb_util:bin(Slot))/binary,
"?process-id=",
ProcID/binary
>>,
diff --git a/src/dev_faff.erl b/src/dev_faff.erl
index b3f345be3..68f717a93 100644
--- a/src/dev_faff.erl
+++ b/src/dev_faff.erl
@@ -24,13 +24,9 @@
estimate(_, Msg, NodeMsg) ->
?event(payment, {estimate, {msg, Msg}}),
% Check if the address is in the allow-list.
- case hb_ao:get(<<"type">>, Msg, <<"pre">>, NodeMsg) of
- <<"pre">> ->
- case is_admissible(Msg, NodeMsg) of
- true -> {ok, 0};
- false -> {ok, <<"infinity">>}
- end;
- <<"post">> -> {ok, 0}
+ case is_admissible(Msg, NodeMsg) of
+ true -> {ok, 0};
+ false -> {ok, <<"infinity">>}
end.
%% @doc Check whether all of the signers of the request are in the allow-list.
diff --git a/src/dev_genesis_wasm.erl b/src/dev_genesis_wasm.erl
index cd1c80865..6d31ae932 100644
--- a/src/dev_genesis_wasm.erl
+++ b/src/dev_genesis_wasm.erl
@@ -32,7 +32,11 @@ compute(Msg, Msg2, Opts) ->
{ok, Msg4} =
hb_ao:resolve(
Msg3,
- {as, <<"patch@1.0">>, Msg2},
+ {
+ as,
+ <<"patch@1.0">>,
+ Msg2#{ <<"patch-from">> => <<"/results/outbox">> }
+ },
Opts
),
% Return the patched message.
diff --git a/src/dev_green_zone.erl b/src/dev_green_zone.erl
index 8c26b4a22..78609b7bd 100644
--- a/src/dev_green_zone.erl
+++ b/src/dev_green_zone.erl
@@ -1,145 +1,236 @@
%%% @doc The green zone device, which provides secure communication and identity
-%%% management between trusted nodes. It handles node initialization, joining
-%%% existing green zones, key exchange, and node identity cloning. All operations
-%%% are protected by hardware commitment and encryption.
+%%% management between trusted nodes.
+%%%
+%%% It handles node initialization, joining existing green zones, key exchange,
+%%% and node identity cloning. All operations are protected by hardware
+%%% commitment and encryption.
-module(dev_green_zone).
--export([join/3, init/3, become/3, key/3]).
+-export([info/1, info/3, join/3, init/3, become/3, key/3]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("public_key/include/public_key.hrl").
-%% @doc The default required options for a green zone. These are intended as
-%% sane basic requirements for a green zone:
-%% - The node will not load remote devices (or trust extra peers).
-%% - The node will use only the default preloaded devices (found on the
-%% initiating machine).
-%% - The node uses the default store configuration.
-%% - The node will not change its routes from the defaults.
-%% - The peer's preprocessor and postprocessor are the same as the local node's.
-%% - The node will not schedule messages. Without coordination, peers in the
-%% green zone will schedule messages without regard for avoiding
-%% double-assignment of slots.
-%% - The node must be in a permanent state (no further configuration changes
-%% being allowed).
-%% Each of these options is derived from the present node's configuration.
+%% @doc Controls which functions are exposed via the device API.
+%%
+%% This function defines the security boundary for the green zone device by
+%% explicitly listing which functions are available through the API.
+%%
+%% @param _ Ignored parameter
+%% @returns A map with the `exports' key containing a list of allowed functions
+info(_) ->
+ #{ exports => [info, init, join, become, key] }.
+
+%% @doc Provides information about the green zone device and its API.
+%%
+%% This function returns detailed documentation about the device, including:
+%% 1. A high-level description of the device's purpose
+%% 2. Version information
+%% 3. Available API endpoints with their parameters and descriptions
+%%
+%% @param _Msg1 Ignored parameter
+%% @param _Msg2 Ignored parameter
+%% @param _Opts A map of configuration options
+%% @returns {ok, Map} containing the device information and documentation
+info(_Msg1, _Msg2, _Opts) ->
+ InfoBody = #{
+ <<"description">> =>
+ <<"Green Zone secure communication and identity management for trusted nodes">>,
+ <<"version">> => <<"1.0">>,
+ <<"api">> => #{
+ <<"info">> => #{
+ <<"description">> => <<"Get device info">>
+ },
+ <<"init">> => #{
+ <<"description">> => <<"Initialize the green zone">>,
+ <<"details">> =>
+ <<"Sets up the node's cryptographic identity with wallet and AES key">>
+ },
+ <<"join">> => #{
+ <<"description">> => <<"Join an existing green zone">>,
+ <<"required_node_opts">> => #{
+ <<"green_zone_peer_location">> => <<"Target peer's address">>,
+ <<"green_zone_peer_id">> => <<"Target peer's unique identifier">>
+ },
+ <<"optional_node_opts">> => #{
+ <<"green_zone_adopt_config">> =>
+ <<"Whether to adopt peer's configuration (default: true)">>
+ }
+ },
+ <<"key">> => #{
+ <<"description">> => <<"Retrieve and encrypt the node's private key">>,
+ <<"details">> =>
+ <<"Returns the node's private key encrypted with the shared AES key">>
+ },
+ <<"become">> => #{
+ <<"description">> => <<"Clone the identity of a target node">>,
+ <<"required_node_opts">> => #{
+ <<"green_zone_peer_location">> => <<"Target peer's address">>,
+ <<"green_zone_peer_id">> => <<"Target peer's unique identifier">>
+ }
+ }
+ }
+ },
+ {ok, #{<<"status">> => 200, <<"body">> => InfoBody}}.
+
+%% @doc Provides the default required options for a green zone.
+%%
+%% This function defines the baseline security requirements for nodes in a green zone:
+%% 1. Restricts loading of remote devices and only allows trusted signers
+%% 2. Limits to preloaded devices from the initiating machine
+%% 3. Enforces specific store configuration
+%% 4. Prevents route changes from the defaults
+%% 5. Requires matching hooks across all peers
+%% 6. Disables message scheduling to prevent conflicts
+%% 7. Enforces a permanent state to prevent further configuration changes
+%%
+%% @param Opts A map of configuration options from which to derive defaults
+%% @returns A map of required configuration options for the green zone
+-spec default_zone_required_opts(Opts :: map()) -> map().
default_zone_required_opts(Opts) ->
- #{
- trusted_device_signers => hb_opts:get(trusted_device_signers, [], Opts),
+ #{
+ trusted_device_signers => hb_opts:get(trusted_device_signers, [], Opts),
load_remote_devices => hb_opts:get(load_remote_devices, false, Opts),
preload_devices => hb_opts:get(preload_devices, [], Opts),
- store => hb_opts:get(store, [], Opts),
+ % store => hb_opts:get(store, [], Opts),
routes => hb_opts:get(routes, [], Opts),
- preprocessor => hb_opts:get(preprocessor, undefined, Opts),
- postprocessor => hb_opts:get(postprocessor, undefined, Opts),
+ on => hb_opts:get(on, undefined, Opts),
scheduling_mode => disabled,
initialized => permanent
- }.
+ }.
+
-%% @doc Initialize the green zone.
-%% Sets up the node's cryptographic identity by ensuring that a wallet (keypair)
-%% exists and generating a shared AES key for secure communication. The wallet,
-%% AES key, and an empty trusted nodes list are stored in the node's configuration.
-%% @param M1 Ignored parameter.
-%% @param M2 Optionally contains a `required-config' map. If not provided, the
-%% default required config (derived from the nodes base configuration)
-%% will be used.
-%% @param Opts A map containing configuration options. If the wallet is not already
-%% provided (under key `priv_wallet'), a new one will be created.
-%% @returns {ok, Msg} where Msg is a binary confirmation message.
--spec init(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, binary()}.
-init(_M1, M2, Opts) ->
+%% @doc Initialize the green zone for a node.
+%%
+%% This function performs the following operations:
+%% 1. Validates the node's history to ensure this is a valid initialization
+%% 2. Retrieves or creates a required configuration for the green zone
+%% 3. Ensures a wallet (keypair) exists or creates a new one
+%% 4. Generates a new 256-bit AES key for secure communication
+%% 5. Updates the node's configuration with these cryptographic identities
+%%
+%% Config options in Opts map:
+%% - green_zone_required_config: (Optional) Custom configuration requirements
+%% - priv_wallet: (Optional) Existing wallet to use instead of creating a new one
+%% - priv_green_zone_aes: (Optional) Existing AES key, if already part of a zone
+%%
+%% @param _M1 Ignored parameter
+%% @param _M2 May contain a `required-config' map for custom requirements
+%% @param Opts A map of configuration options
+%% @returns `{ok, Binary}' on success with confirmation message, or
+%% `{error, Binary}' on failure with error message.
+-spec init(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, binary()} | {error, binary()}.
+init(_M1, _M2, Opts) ->
?event(green_zone, {init, start}),
- RequiredConfig =
- hb_ao:get(
- <<"required-config">>,
- M2,
- default_zone_required_opts(Opts),
- Opts
- ),
- % Check if a wallet exists; create one if absent.
- NodeWallet = case hb_opts:get(priv_wallet, undefined, Opts) of
- undefined ->
- ?event(green_zone, {init, wallet, missing}),
- hb:wallet();
- ExistingWallet ->
- ?event(green_zone, {init, wallet, found}),
- ExistingWallet
- end,
- % Generate a new 256-bit AES key if we have not already joined
- % a green zone.
- GreenZoneAES =
- case hb_opts:get(priv_green_zone_aes, undefined, Opts) of
- undefined ->
- ?event(green_zone, {init, aes_key, generated}),
- crypto:strong_rand_bytes(32);
- ExistingAES ->
- ?event(green_zone, {init, aes_key, found}),
- ExistingAES
- end,
- ?event(green_zone, {init, aes_key, generated}),
- % Store the wallet, AES key, and an empty trusted nodes map.
- ok = hb_http_server:set_opts(Opts#{
- priv_wallet => NodeWallet,
- priv_green_zone_aes => GreenZoneAES,
- trusted_nodes => #{},
- green_zone_required_opts => RequiredConfig
- }),
- ?event(green_zone, {init, complete}),
- {ok, <<"Green zone initialized successfully.">>}.
+ case hb_opts:validate_node_history(Opts) of
+ {ok, 1} ->
+ RequiredConfig = hb_opts:get(
+ <<"green_zone_required_config">>,
+ default_zone_required_opts(Opts),
+ Opts
+ ),
+ ?event(green_zone, {init, required_config, RequiredConfig}),
+ % Check if a wallet exists; create one if absent.
+ NodeWallet = case hb_opts:get(priv_wallet, undefined, Opts) of
+ undefined ->
+ ?event(green_zone, {init, wallet, missing}),
+ hb:wallet();
+ ExistingWallet ->
+ ?event(green_zone, {init, wallet, found}),
+ ExistingWallet
+ end,
+ % Generate a new 256-bit AES key if we have not already joined
+ % a green zone.
+ GreenZoneAES =
+ case hb_opts:get(priv_green_zone_aes, undefined, Opts) of
+ undefined ->
+ ?event(green_zone, {init, aes_key, generated}),
+ crypto:strong_rand_bytes(32);
+ ExistingAES ->
+ ?event(green_zone, {init, aes_key, found}),
+ ExistingAES
+ end,
+ % Store the wallet, AES key, and an empty trusted nodes map.
+ hb_http_server:set_opts(Opts#{
+ priv_wallet => NodeWallet,
+ priv_green_zone_aes => GreenZoneAES,
+ trusted_nodes => #{},
+ green_zone_required_opts => RequiredConfig
+ }),
+ ?event(green_zone, {init, complete}),
+ {ok, <<"Green zone initialized successfully.">>};
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
-%% @doc Initiate the join process for a node (Node B).
-%%
-%% When Node B wishes to join an existing green zone, it sends a GET request to
-%% its local join endpoint.
-%% This request includes a header with the target peer's address (Node A).
-%%
-%% Based on the presence of a peer address:
-%% - If the target peer is specified, Node B internally routes the request to
-%% the join_peer flow, where it generates an commitment report and prepares
-%% a POST request to forward to Node A.
-%% - If no peer address is present, the join request is processed locally via
-%% the validate_join flow.
-%%
-%% @param M1 The join request message containing a header with the target peer's
-%% address.
-%% @param M2 Ignored parameter.
-%% @param Opts A map of configuration options.
-%% @returns {ok, Map} on success with join response details, or {error, Reason}
-%% on failure.
+%% @doc Initiates the join process for a node to enter an existing green zone.
+%%
+%% This function performs the following operations depending on the state:
+%% 1. Validates the node's history to ensure proper initialization
+%% 2. Checks for target peer information (location and ID)
+%% 3. If target peer is specified:
+%% a. Generates a commitment report for the peer
+%% b. Prepares and sends a POST request to the target peer
+%% c. Verifies the response and decrypts the returned zone key
+%% d. Updates local configuration with the shared AES key
+%% 4. If no peer is specified, processes the join request locally
+%%
+%% Config options in Opts map:
+%% - green_zone_peer_location: Target peer's address
+%% - green_zone_peer_id: Target peer's unique identifier
+%% - green_zone_adopt_config:
+%% (Optional) Whether to adopt peer's configuration (default: true)
+%%
+%% @param M1 The join request message with target peer information
+%% @param M2 Additional request details, may include adoption preferences
+%% @param Opts A map of configuration options for join operations
+%% @returns `{ok, Map}' on success with join response details, or
+%% `{error, Binary}' on failure with error message.
-spec join(M1 :: term(), M2 :: term(), Opts :: map()) ->
{ok, map()} | {error, binary()}.
join(M1, M2, Opts) ->
?event(green_zone, {join, start}),
- PeerLocation = hb_ao:get(<<"peer-location">>, M1, undefined, Opts),
- PeerID = hb_ao:get(<<"peer-id">>, M1, undefined, Opts),
- ?event(green_zone, {join_peer, PeerLocation, PeerID}),
- if (PeerLocation =:= undefined) or (PeerID =:= undefined) ->
- validate_join(M1, M2, Opts);
- true ->
- join_peer(PeerLocation, PeerID, M1, M2, Opts)
- end.
-
+ case hb_opts:validate_node_history(Opts, 0, 1) of
+ {ok, _N} ->
+ PeerLocation = hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts),
+ PeerID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts),
+ ?event(green_zone, {join_peer, PeerLocation, PeerID}),
+ if (PeerLocation =:= undefined) or (PeerID =:= undefined) ->
+ validate_join(M1, M2, Opts);
+ true ->
+ join_peer(PeerLocation, PeerID, M1, M2, Opts)
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end.
-%% @doc Retrieve and encrypt the node's private key.
-%% Encrypts the node's private key using the shared AES key in AES-256-GCM mode.
-%% It returns the encrypted key along with the initialization vector (IV) needed
-%% for decryption.
-%% @param M1 Ignored parameter.
-%% @param M2 Ignored parameter.
-%% @param Opts A map of configuration options. Must include keys `priv_wallet'
-%% and `priv_green_zone_aes'.
-%% @returns {ok, Map} on success, where Map contains:
-%% - status: 200
-%% - encrypted_key: the encrypted private key (Base64 encoded)
-%% - iv: the initialization vector (Base64 encoded)
-%% Returns {error, Reason} if the node is not part of the green zone.
--spec key(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, map()} | {error, binary()}.
+%% @doc Encrypts and provides the node's private key for secure sharing.
+%%
+%% This function performs the following operations:
+%% 1. Retrieves the shared AES key and the node's wallet
+%% 2. Verifies that the node is part of a green zone (has a shared AES key)
+%% 3. Generates a random initialization vector (IV) for encryption
+%% 4. Encrypts the node's private key using AES-256-GCM with the shared key
+%% 5. Returns the encrypted key and IV for secure transmission
+%%
+%% Required configuration in Opts map:
+%% - priv_green_zone_aes: The shared AES key for the green zone
+%% - priv_wallet: The node's wallet containing the private key to encrypt
+%%
+%% @param _M1 Ignored parameter
+%% @param _M2 Ignored parameter
+%% @param Opts A map of configuration options
+%% @returns `{ok, Map}' containing the encrypted key and IV on success, or
+%% `{error, Binary}' if the node is not part of a green zone
+-spec key(M1 :: term(), M2 :: term(), Opts :: map()) ->
+ {ok, map()} | {error, binary()}.
key(_M1, _M2, Opts) ->
?event(green_zone, {get_key, start}),
% Retrieve the shared AES key and the node's wallet.
GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts),
{{KeyType, Priv, Pub}, _PubKey} = hb_opts:get(priv_wallet, undefined, Opts),
- ?event(green_zone, {get_key, wallet, hb_util:human_id(ar_wallet:to_address(Pub))}),
+ ?event(green_zone,
+ {get_key, wallet, hb_util:human_id(ar_wallet:to_address(Pub))}),
case GreenZoneAES of
undefined ->
% Log error if no shared AES key is found.
@@ -156,35 +247,45 @@ key(_M1, _M2, Opts) ->
<<>>,
true
),
-
+
% Log successful encryption of the private key.
?event(green_zone, {get_key, encrypt, complete}),
{ok, #{
<<"status">> => 200,
- <<"encrypted_key">> => base64:encode(<>),
+ <<"encrypted_key">> =>
+ base64:encode(<>),
<<"iv">> => base64:encode(IV)
}}
end.
-%% @doc Clone the identity of a target node.
-%% Allows a node to adopt the identity of a target node by retrieving the target
-%% node's encrypted private key and IV, decrypting it using the shared AES key,
-%% and updating the local node's wallet with the target node's keypair.
-%% @param M1 The message containing the target node's encrypted private key and IV.
-%% @param M2 Ignored parameter.
-%% @param Opts A map of configuration options. Must include `priv_green_zone_aes'.
-%% @returns {ok, Map} on success, where Map includes:
-%% - status: 200
-%% - message: confirmation text
-%% - node: the target node's address
-%% Returns {error, Reason} if the node is not part of the green zone.
+%% @doc Clones the identity of a target node in the green zone.
+%%
+%% This function performs the following operations:
+%% 1. Retrieves target node location and ID from the configuration
+%% 2. Verifies that the local node has a valid shared AES key
+%% 3. Requests the target node's encrypted key via its key endpoint
+%% 4. Verifies the response is from the expected peer
+%% 5. Decrypts the target node's private key using the shared AES key
+%% 6. Updates the local node's wallet with the target node's identity
+%%
+%% Required configuration in Opts map:
+%% - green_zone_peer_location: Target node's address
+%% - green_zone_peer_id: Target node's unique identifier
+%% - priv_green_zone_aes: The shared AES key for the green zone
+%%
+%% @param _M1 Ignored parameter
+%% @param _M2 Ignored parameter
+%% @param Opts A map of configuration options
+%% @returns `{ok, Map}' on success with confirmation details, or
+%% `{error, Binary}' if the node is not part of a green zone or
+%% identity adoption fails.
-spec become(M1 :: term(), M2 :: term(), Opts :: map()) ->
{ok, map()} | {error, binary()}.
-become(_M1, M2, Opts) ->
+become(_M1, _M2, Opts) ->
?event(green_zone, {become, start}),
% 1. Retrieve the target node's address from the incoming message.
- NodeLocation = hb_ao:get(<<"peer-location">>, M2, Opts),
- NodeID = hb_ao:get(<<"peer-id">>, M2, Opts),
+ NodeLocation = hb_opts:get(<<"green_zone_peer_location">>, undefined, Opts),
+ NodeID = hb_opts:get(<<"green_zone_peer_id">>, undefined, Opts),
% 2. Check if the local node has a valid shared AES key.
GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, Opts),
case GreenZoneAES of
@@ -195,14 +296,17 @@ become(_M1, M2, Opts) ->
_ ->
% 3. Request the target node's encrypted key from its key endpoint.
?event(green_zone, {become, getting_key, NodeLocation, NodeID}),
- {ok, KeyResp} = hb_http:get(NodeLocation, <<"/~greenzone@1.0/key">>, Opts),
+ {ok, KeyResp} = hb_http:get(NodeLocation,
+ <<"/~greenzone@1.0/key">>, Opts),
Signers = hb_message:signers(KeyResp),
- case hb_message:verify(KeyResp, Signers) and lists:member(NodeID, Signers) of
+ case hb_message:verify(KeyResp, Signers) and
+ lists:member(NodeID, Signers) of
false ->
% The response is not from the expected peer.
{error, <<"Received incorrect response from peer!">>};
true ->
- finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts)
+ finalize_become(KeyResp, NodeLocation, NodeID,
+ GreenZoneAES, Opts)
end
end.
@@ -253,62 +357,65 @@ finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) ->
<<"peer-id">> => NodeID
}}.
-%%%--------------------------------------------------------------------
-%%% Internal Functions
-%%%--------------------------------------------------------------------
-
-%% @doc Process an internal join request when a target peer is specified.
-%%
-%% In this flow (executed on Node B):
-%% 1. Node B generates an commitment report and prepares a POST request.
-%% 2. It then forwards the POST request to Node A's join endpoint.
-%% 3. Upon receiving a response from Node A, Node B decrypts the returned
-%% zone-key (an encrypted shared AES key) using its local private key, then
-%% updates its configuration with the shared AES key.
-%%
-%% @param Peer The target peer's (Node A's) address.
-%% @param M1 Ignored parameter.
-%% @param M2 Ignored parameter.
-%% @param Opts A map of configuration options.
-%% @returns {ok, Map} on success with a confirmation message, or {error, Map} on failure.
+%% @doc Processes a join request to a specific peer node.
+%%
+%% This function handles the client-side join flow when connecting to a peer:
+%% 1. Verifies the node is not already in a green zone
+%% 2. Optionally adopts configuration from the target peer
+%% 3. Generates a hardware-backed commitment report
+%% 4. Sends a POST request to the peer's join endpoint
+%% 5. Verifies the response signature
+%% 6. Decrypts the returned AES key
+%% 7. Updates local configuration with the shared key
+%% 8. Optionally mounts an encrypted volume using the shared key
+%%
+%% @param PeerLocation The target peer's address
+%% @param PeerID The target peer's unique identifier
+%% @param _M1 Ignored parameter
+%% @param M2 May contain ShouldMount flag to enable encrypted volume mounting
+%% @param InitOpts A map of initial configuration options
+%% @returns `{ok, Map}' on success with confirmation message, or
+%% `{error, Map|Binary}' on failure with error details
-spec join_peer(
PeerLocation :: binary(),
PeerID :: binary(),
M1 :: term(),
M2 :: term(),
- Opts :: map()) -> {ok, map()} | {error, map()}.
+ Opts :: map()) -> {ok, map()} | {error, map() | binary()}.
join_peer(PeerLocation, PeerID, _M1, M2, InitOpts) ->
- % Check here if the node is already part of a green zone.
- GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, InitOpts),
- case (GreenZoneAES == undefined) andalso maybe_set_zone_opts(PeerLocation, PeerID, M2, InitOpts) of
- {ok, Opts} ->
- Wallet = hb_opts:get(priv_wallet, undefined, Opts),
- {ok, Report} = dev_snp:generate(#{}, #{}, Opts),
- WalletPub = element(2, Wallet),
- MergedReq = hb_ao:set(
- Report,
- <<"public-key">>,
- base64:encode(term_to_binary(WalletPub)),
- Opts
- ),
- % Create an committed join request using the wallet.
- Req = hb_message:commit(MergedReq, Wallet),
- ?event({join_req, Req}),
- ?event({verify_res, hb_message:verify(Req)}),
- % Log that the commitment report is being sent to the peer.
- ?event(green_zone, {join, sending_commitment, PeerLocation, PeerID, Req}),
- case hb_http:post(PeerLocation, <<"/~greenzone@1.0/join">>, Req, Opts) of
- {ok, Resp} ->
- % Log the response received from the peer.
- ?event(green_zone, {join, join_response, PeerLocation, PeerID, Resp}),
+ % Check here if the node is already part of a green zone.
+ GreenZoneAES = hb_opts:get(priv_green_zone_aes, undefined, InitOpts),
+ case (GreenZoneAES == undefined) andalso
+ maybe_set_zone_opts(PeerLocation, PeerID, M2, InitOpts) of
+ {ok, Opts} ->
+ Wallet = hb_opts:get(priv_wallet, undefined, Opts),
+ {ok, Report} = dev_snp:generate(#{}, #{}, Opts),
+ WalletPub = element(2, Wallet),
+ ?event(green_zone, {remove_uncommitted, Report}),
+ MergedReq = hb_ao:set(
+ Report,
+ <<"public-key">>,
+ base64:encode(term_to_binary(WalletPub)),
+ Opts
+ ),
+ % Create an committed join request using the wallet.
+ Req = hb_message:commit(MergedReq, Wallet),
+ ?event({join_req, {explicit, Req}}),
+ ?event({verify_res, hb_message:verify(Req)}),
+ % Log that the commitment report is being sent to the peer.
+ ?event(green_zone, {join, sending_commitment, PeerLocation, PeerID, Req}),
+ case hb_http:post(PeerLocation, <<"/~greenzone@1.0/join">>, Req, Opts) of
+ {ok, Resp} ->
+ % Log the response received from the peer.
+ ?event(green_zone, {join, join_response, PeerLocation, PeerID, Resp}),
% Ensure that the response is from the expected peer, avoiding
% the risk of a man-in-the-middle attack.
Signers = hb_message:signers(Resp),
- ?event(green_zone, {join, signers, Signers}),
- IsVerified = hb_message:verify(Resp, Signers),
- ?event(green_zone, {join, verify, IsVerified}),
- IsPeerSigner = lists:member(PeerID, Signers),
- ?event(green_zone, {join, peer_is_signer, IsPeerSigner, PeerID}),
+ ?event(green_zone, {join, signers, Signers}),
+ IsVerified = hb_message:verify(Resp, Signers),
+ ?event(green_zone, {join, verify, IsVerified}),
+ IsPeerSigner = lists:member(PeerID, Signers),
+ ?event(green_zone, {join, peer_is_signer, IsPeerSigner, PeerID}),
case IsPeerSigner andalso IsVerified of
false ->
% The response is not from the expected peer.
@@ -324,48 +431,75 @@ join_peer(PeerLocation, PeerID, _M1, M2, InitOpts) ->
% shared AES key.
?event(green_zone, {oldOpts, {explicit, InitOpts}}),
?event(green_zone, {newOpts, {explicit, Opts}}),
- hb_http_server:set_opts(Opts#{
+ NewOpts = Opts#{
priv_green_zone_aes => AESKey
- }),
- ?event(successfully_joined_greenzone),
- {ok, #{ <<"body">> => <<"Node joined green zone successfully.">>, <<"status">> => 200}}
+ },
+ hb_http_server:set_opts(NewOpts),
+ ?event(successfully_joined_greenzone),
+ try_mount_encrypted_volume(AESKey, NewOpts),
+ {ok, #{
+ <<"body">> =>
+ <<"Node joined green zone successfully.">>,
+ <<"status">> => 200
+ }}
end;
- {error, Reason} ->
- {error, #{<<"status">> => 400, <<"reason">> => Reason}};
- {unavailable, Reason} ->
- ?event(green_zone, {
+ {error, Reason} ->
+ {error, #{<<"status">> => 400, <<"reason">> => Reason}};
+ {unavailable, Reason} ->
+ ?event(green_zone, {
join_error,
peer_unavailable,
PeerLocation,
PeerID,
Reason
}),
- {error, #{
+ {error, #{
<<"status">> => 503,
<<"body">> => <<"Peer node is unreachable.">>
}}
- end;
- false ->
- ?event(green_zone, {join, already_joined}),
- {error, <<"Node already part of green zone.">>};
+ end;
+ false ->
+ ?event(green_zone, {join, already_joined}),
+ {error, <<"Node already part of green zone.">>};
{error, Reason} ->
% Log the error and return the initial options.
?event(green_zone, {join, error, Reason}),
{error, Reason}
- end.
+ end.
-%% @doc If the operator requests it, the node can automatically adopt the
-%% necessary configuration to join a green zone. `adopt-config' can be a boolean,
-%% a list of fields that should be included in the node message, alongside the
-%% required config of the green zone they are joining.
+%% @doc Adopts configuration from a peer when joining a green zone.
+%%
+%% This function handles the conditional adoption of peer configuration:
+%% 1. Checks if adoption is enabled (default: true)
+%% 2. Requests required configuration from the peer
+%% 3. Verifies the authenticity of the configuration
+%% 4. Creates a node message with appropriate settings
+%% 5. Updates the local node configuration
+%%
+%% Config options:
+%% - green_zone_adopt_config: Controls configuration adoption (boolean, list, or binary)
+%%
+%% @param PeerLocation The location of the peer node to join
+%% @param PeerID The ID of the peer node to join
+%% @param Req The request message with adoption preferences
+%% @param InitOpts A map of initial configuration options
+%% @returns `{ok, Map}' with updated configuration on success, or
+%% `{error, Binary}' if configuration retrieval fails
+-spec maybe_set_zone_opts(
+ PeerLocation :: binary(),
+ PeerID :: binary(),
+ Req :: map(),
+ InitOpts :: map()) -> {ok, map()} | {error, binary()}.
maybe_set_zone_opts(PeerLocation, PeerID, Req, InitOpts) ->
- case hb_ao:get(<<"adopt-config">>, Req, true, InitOpts) of
+ case hb_opts:get(<<"green_zone_adopt_config">>, true, InitOpts) of
false ->
% The node operator does not want to adopt the peer's config. Return
% the initial options unchanged.
{ok, InitOpts};
AdoptConfig ->
- ?event(green_zone, {adopt_config, AdoptConfig, PeerLocation, PeerID, InitOpts}),
+ ?event(green_zone,
+ {adopt_config, AdoptConfig, PeerLocation, PeerID, InitOpts}
+ ),
% Request the required config from the peer.
RequiredConfigRes =
hb_http:get(
@@ -373,52 +507,73 @@ maybe_set_zone_opts(PeerLocation, PeerID, Req, InitOpts) ->
<<"/~meta@1.0/info/green_zone_required_opts">>,
InitOpts
),
- % Ensure the response is okay.
- ?event({req_opts_get_result, RequiredConfigRes}),
- case RequiredConfigRes of
- {error, Reason} ->
- % Log the error and return the initial options.
- ?event(green_zone, {join_error, get_req_opts_failed, Reason}),
- {error, <<"Could not get required config from peer.">>};
- {ok, RequiredConfig} ->
- % Print the required config response.
- Signers = hb_message:signers(RequiredConfig),
- ?event(green_zone, {req_conf_signers, {explicit, Signers}}),
- % Extract and log the verification steps
- IsVerified = hb_message:verify(RequiredConfig, Signers),
- ?event(green_zone, {req_opts, {verified, IsVerified}, {signers, Signers}}),
- % Combined check
- case lists:member(PeerID, Signers) andalso IsVerified of
- false ->
- % The response is not from the expected peer.
- {error, <<"Peer gave invalid signature for required config.">>};
- true ->
- % Generate the node message that should be set prior to
- % joining a green zone.
- NodeMessage =
- calculate_node_message(RequiredConfig, Req, AdoptConfig),
- % Adopt the node message.
- dev_meta:adopt_node_message(NodeMessage, InitOpts)
- end
- end
+ % Ensure the response is okay.
+ ?event({req_opts_get_result, RequiredConfigRes}),
+ case RequiredConfigRes of
+ {error, Reason} ->
+ % Log the error and return the initial options.
+ ?event(green_zone,
+ {join_error, get_req_opts_failed, Reason}
+ ),
+ {error, <<"Could not get required config from peer.">>};
+ {ok, RequiredConfig} ->
+ % Print the required config response.
+ Signers = hb_message:signers(RequiredConfig),
+ ?event(green_zone, {req_conf_signers, {explicit, Signers}}),
+ % Extract and log the verification steps
+ IsVerified = hb_message:verify(RequiredConfig, Signers),
+ ?event(green_zone,
+ {req_opts, {verified, IsVerified}, {signers, Signers}}
+ ),
+ % Combined check
+ case lists:member(PeerID, Signers) andalso IsVerified of
+ false ->
+ % The response is not from the expected peer.
+ {
+ error,
+ <<"Peer gave invalid signature for required config.">>
+ };
+ true ->
+ % Generate the node message that should be set prior
+ % to joining a green zone.
+ NodeMessage =
+ calculate_node_message(
+ RequiredConfig,
+ Req,
+ AdoptConfig
+ ),
+ % Adopt the node message.
+ hb_http_server:set_opts(NodeMessage, InitOpts)
+ end
+ end
end.
-%% @doc Generate the node message that should be set prior to joining a green zone.
-%% This function takes a required opts message, a request message, and an `adopt-config'
-%% value. The `adopt-config' value can be a boolean, a list of fields that should be
-%% included in the node message from the request, or a binary string of fields to
-%% include, separated by commas.
+%% @doc Generate the node message that should be set prior to joining
+%% a green zone.
+%%
+%% This function takes a required opts message, a request message, and an
+%% `adopt-config' value. The `adopt-config' value can be a boolean, a list of
+%% fields that should be included in the node message from the request, or a
+%% binary string of fields to include, separated by commas.
+%%
+%% @param RequiredOpts The required configuration options from the peer node.
+%% @param Req The request message containing configuration options.
+%% @param AdoptConfig Boolean, list, or binary string indicating which fields
+%% to adopt.
+%% @returns A map containing the merged configuration to be used as the
+%% node message.
calculate_node_message(RequiredOpts, Req, true) ->
% Remove irrelevant fields from the request.
StrippedReq =
maps:without(
[
- <<"adopt-config">>, <<"peer-location">>,
- <<"peer-id">>, <<"path">>, <<"method">>
+ <<"green_zone_adopt_config">>, <<"green_zone_peer_location">>,
+ <<"green_zone_peer_id">>, <<"path">>, <<"method">>
],
hb_message:uncommitted(Req)
),
- % Convert atoms to binaries in RequiredOpts to prevent binary_to_existing_atom errors
+ % Convert atoms to binaries in RequiredOpts to prevent
+ % binary_to_existing_atom errors.
% The required config should override the request, if necessary.
maps:merge(StrippedReq, RequiredOpts);
calculate_node_message(RequiredOpts, Req, <<"true">>) ->
@@ -428,30 +583,34 @@ calculate_node_message(RequiredOpts, Req, List) when is_list(List) ->
calculate_node_message(RequiredOpts, Req, BinList) when is_binary(BinList) ->
calculate_node_message(RequiredOpts, hb_util:list(BinList), Req).
-%% @doc Validate an incoming join request.
-%%
-%% When Node A receives a POST join request from Node B, this routine is executed:
-%% 1. It extracts the commitment report, the requesting node's address, and
-%% the encoded public key.
-%% 2. It verifies the commitment report included in the request.
-%% 3. If the report is valid, Node A adds Node B to its list of trusted nodes.
-%% 4. Node A then encrypts the shared AES key (zone-key) with Node B's public
-%% key and returns it along with its public key.
-%%
-%% @param M1 Ignored parameter.
-%% @param Req The join request message containing the commitment report and
-%% other join details.
-%% @param Opts A map of configuration options.
-%% @returns {ok, Map} on success with join response details, or {error, Reason}
-%% if verification fails.
+%%%--------------------------------------------------------------------
+%%% Internal Functions
+%%%--------------------------------------------------------------------
+
+%% @doc Validates an incoming join request from another node.
+%%
+%% This function handles the server-side join flow when receiving a connection
+%% request:
+%% 1. Validates the peer's configuration meets required standards
+%% 2. Extracts the commitment report and public key from the request
+%% 3. Verifies the hardware-backed commitment report
+%% 4. Adds the joining node to the trusted nodes list
+%% 5. Encrypts the shared AES key with the peer's public key
+%% 6. Returns the encrypted key to the requesting node
+%%
+%% @param M1 Ignored parameter
+%% @param Req The join request containing commitment report and public key
+%% @param Opts A map of configuration options
+%% @returns `{ok, Map}' on success with encrypted AES key, or
+%% `{error, Binary}' on failure with error message
-spec validate_join(M1 :: term(), Req :: map(), Opts :: map()) ->
{ok, map()} | {error, binary()}.
validate_join(_M1, Req, Opts) ->
- case validate_peer_opts(Req, Opts) of
- true -> do_nothing;
- false -> throw(invalid_join_request)
- end,
- ?event(green_zone, {join, start}),
+ case validate_peer_opts(Req, Opts) of
+ true -> do_nothing;
+ false -> throw(invalid_join_request)
+ end,
+ ?event(green_zone, {join, start}),
% Retrieve the commitment report and address from the join request.
Report = hb_ao:get(<<"report">>, Req, Opts),
NodeAddr = hb_ao:get(<<"address">>, Req, Opts),
@@ -496,89 +655,117 @@ validate_join(_M1, Req, Opts) ->
Error
end.
+%% @doc Validates that a peer's configuration matches required options.
+%%
+%% This function ensures the peer node meets configuration requirements:
+%% 1. Retrieves the local node's required configuration
+%% 2. Gets the peer's options from its message
+%% 3. Adds required configuration to peer's required options list
+%% 4. Verifies the peer's node history is valid
+%% 5. Checks that the peer's options match the required configuration
+%%
+%% @param Req The request message containing the peer's configuration
+%% @param Opts A map of the local node's configuration options
+%% @returns true if the peer's configuration is valid, false otherwise
+-spec validate_peer_opts(Req :: map(), Opts :: map()) -> boolean().
validate_peer_opts(Req, Opts) ->
?event(green_zone, {validate_peer_opts, start, Req}),
- % Get the required config from the local node's configuration.
- RequiredConfig =
+ % Get the required config from the local node's configuration.
+ RequiredConfig =
hb_ao:normalize_keys(
hb_opts:get(green_zone_required_opts, #{}, Opts)),
?event(green_zone, {validate_peer_opts, required_config, RequiredConfig}),
-
PeerOpts =
hb_ao:normalize_keys(
hb_ao:get(<<"node-message">>, Req, undefined, Opts)),
?event(green_zone, {validate_peer_opts, peer_opts, PeerOpts}),
-
% Add the required config itself to the required options of the peer. This
% enforces that the new peer will also enforce the required config on peers
% that join them.
- FullRequiredOpts = RequiredConfig#{
- green_zone_required_opts => RequiredConfig
- },
- ?event(green_zone, {validate_peer_opts, full_required_opts, FullRequiredOpts}),
-
+ FullRequiredOpts = RequiredConfig#{
+ green_zone_required_opts => RequiredConfig
+ },
+ ?event(green_zone,
+ {validate_peer_opts, full_required_opts, FullRequiredOpts}
+ ),
% Debug: Check if PeerOpts is a map
- ?event(green_zone, {validate_peer_opts, is_map_peer_opts, is_map(PeerOpts)}),
-
+ ?event(green_zone,
+ {validate_peer_opts, is_map_peer_opts, is_map(PeerOpts)}
+ ),
% Debug: Get node_history safely
NodeHistory = hb_ao:get(<<"node_history">>, PeerOpts, [], Opts),
?event(green_zone, {validate_peer_opts, node_history, NodeHistory}),
-
% Debug: Check length of node_history
- HistoryCheck = case is_list(NodeHistory) of
- true -> length(NodeHistory) =< 1;
+ case NodeHistory of
+ List when length(List) =< 1 ->
+ ?event(green_zone,
+ {validate_peer_opts, history_check, correct_length}
+ ),
+ % Debug: Try the match check separately
+ try
+ MatchCheck =
+ hb_message:match(PeerOpts, FullRequiredOpts, only_present) ==
+ true,
+ ?event(green_zone,
+ {validate_peer_opts, match_check, MatchCheck}
+ ),
+ % Final result
+ ?event(green_zone,
+ {validate_peer_opts, final_result, MatchCheck}
+ ),
+ MatchCheck
+ catch
+ Error:Reason:Stacktrace ->
+ ?event(green_zone,
+ {validate_peer_opts,
+ match_error,
+ {Error, Reason, Stacktrace}
+ }
+ ),
+ false
+ end;
false -> {error, not_a_list}
- end,
- ?event(green_zone, {validate_peer_opts, history_check, HistoryCheck}),
-
- % Debug: Try the match check separately
- MatchCheck = try
- Result = hb_message:match(PeerOpts, FullRequiredOpts, only_present),
- ?event(green_zone, {validate_peer_opts, match_check, Result}),
- Result
- catch
- Error:Reason:Stacktrace ->
- ?event(green_zone, {validate_peer_opts, match_error, {Error, Reason, Stacktrace}}),
- false
- end,
-
- % Final result
- FinalResult = MatchCheck andalso (HistoryCheck =:= true),
- ?event(green_zone, {validate_peer_opts, final_result, FinalResult}),
- FinalResult.
+ end.
-%% @doc Add a joining node's details to the trusted nodes list.
-%% Updates the local configuration with the new trusted node's commitment report
-%% and public key.
-%% @param NodeAddr The joining node's address.
-%% @param Report The commitment report provided by the joining node.
-%% @param RequesterPubKey The joining node's public key.
-%% @param Opts A map of configuration options.
-%% @returns ok.
+%% @doc Adds a node to the trusted nodes list with its commitment report.
+%%
+%% This function updates the trusted nodes configuration:
+%% 1. Retrieves the current trusted nodes map
+%% 2. Adds the new node with its report and public key
+%% 3. Updates the node configuration with the new trusted nodes list
+%%
+%% @param NodeAddr The joining node's address
+%% @param Report The commitment report provided by the joining node
+%% @param RequesterPubKey The joining node's public key
+%% @param Opts A map of configuration options
+%% @returns ok
-spec add_trusted_node(
NodeAddr :: binary(),
Report :: map(),
RequesterPubKey :: term(), Opts :: map()) -> ok.
add_trusted_node(NodeAddr, Report, RequesterPubKey, Opts) ->
- % Retrieve the current trusted nodes map.
- TrustedNodes = hb_opts:get(trusted_nodes, #{}, Opts),
- % Add the joining node's details to the trusted nodes.
- UpdatedTrustedNodes = maps:put(NodeAddr, #{
- report => Report,
- public_key => RequesterPubKey
- }, TrustedNodes),
- % Update configuration with the new trusted nodes and AES key.
- ok = hb_http_server:set_opts(Opts#{
- trusted_nodes => UpdatedTrustedNodes
- }).
+ % Retrieve the current trusted nodes map.
+ TrustedNodes = hb_opts:get(trusted_nodes, #{}, Opts),
+ % Add the joining node's details to the trusted nodes.
+ UpdatedTrustedNodes = maps:put(NodeAddr, #{
+ report => Report,
+ public_key => RequesterPubKey
+ }, TrustedNodes),
+ % Update configuration with the new trusted nodes and AES key.
+ ok = hb_http_server:set_opts(Opts#{
+ trusted_nodes => UpdatedTrustedNodes
+ }).
-%% @doc Encrypt the shared AES key with the requester's RSA public key.
-%% Encrypts the shared AES key using the RSA public key provided by the joining
-%% node. The RSA public key is extracted from a tuple and converted into a
-%% record suitable for encryption.
-%% @param AESKey The shared AES key (256-bit binary).
-%% @param RequesterPubKey The requester's public RSA key.
-%% @returns The AES key encrypted with the RSA public key.
+%% @doc Encrypts an AES key with a node's RSA public key.
+%%
+%% This function securely encrypts the shared key for transmission:
+%% 1. Extracts the RSA public key components
+%% 2. Creates an RSA public key record
+%% 3. Performs public key encryption on the AES key
+%%
+%% @param AESKey The shared AES key (256-bit binary)
+%% @param RequesterPubKey The node's public RSA key
+%% @returns The encrypted AES key
-spec encrypt_payload(AESKey :: binary(), RequesterPubKey :: term()) -> binary().
encrypt_payload(AESKey, RequesterPubKey) ->
?event(green_zone, {encrypt_payload, start}),
@@ -592,12 +779,17 @@ encrypt_payload(AESKey, RequesterPubKey) ->
?event(green_zone, {encrypt_payload, complete}),
Encrypted.
-%% @doc Decrypt the zone AES key using the node's RSA private key.
-%% Decrypts the encrypted zone AES key using the RSA private key from the node's
-%% wallet.
-%% @param EncZoneKey The encrypted zone AES key (Base64 encoded or binary).
-%% @param Opts A map of configuration options.
-%% @returns {ok, DecryptedKey} on success, where DecryptedKey is the shared AES key.
+%% @doc Decrypts an AES key using the node's RSA private key.
+%%
+%% This function handles decryption of the zone key:
+%% 1. Decodes the encrypted key if it's in Base64 format
+%% 2. Extracts the RSA private key components from the wallet
+%% 3. Creates an RSA private key record
+%% 4. Performs private key decryption on the encrypted key
+%%
+%% @param EncZoneKey The encrypted zone AES key (Base64 encoded or binary)
+%% @param Opts A map of configuration options
+%% @returns {ok, DecryptedKey} on success with the decrypted AES key
-spec decrypt_zone_key(EncZoneKey :: binary(), Opts :: map()) ->
{ok, binary()} | {error, binary()}.
decrypt_zone_key(EncZoneKey, Opts) ->
@@ -607,7 +799,8 @@ decrypt_zone_key(EncZoneKey, Opts) ->
false -> EncZoneKey
end,
% Get wallet and extract key components
- {{_KeyType = {rsa, E}, Priv, Pub}, _PubKey} = hb_opts:get(priv_wallet, #{}, Opts),
+ {{_KeyType = {rsa, E}, Priv, Pub}, _PubKey} =
+ hb_opts:get(priv_wallet, #{}, Opts),
% Create RSA private key record
RSAPrivKey = #'RSAPrivateKey'{
publicExponent = E,
@@ -618,8 +811,36 @@ decrypt_zone_key(EncZoneKey, Opts) ->
?event(green_zone, {decrypt_zone_key, complete}),
{ok, DecryptedKey}.
+%% @doc Attempts to mount an encrypted volume using the green zone AES key.
+%%
+%% This function handles the complete process of secure storage setup by
+%% delegating to the dev_volume module, which provides a unified interface
+%% for volume management.
+%%
+%% The encryption key used for the volume is the same AES key used for green zone
+%% communication, ensuring that only nodes in the green zone can access the data.
+%%
+%% @param AESKey The AES key obtained from joining the green zone.
+%% @param Opts A map of configuration options.
+%% @returns ok (implicit) in all cases, with detailed event logs of the results.
+try_mount_encrypted_volume(AESKey, Opts) ->
+ ?event(green_zone, {try_mount_encrypted_volume, start}),
+ % Set up options for volume mounting with default paths
+ VolumeOpts = Opts#{
+ volume_key => AESKey
+ },
+ % Call the dev_volume:mount function to handle the complete process
+ case dev_volume:mount(undefined, undefined, VolumeOpts) of
+ {ok, Result} ->
+ ?event(green_zone, {volume_mount, success, Result}),
+ ok;
+ {error, Error} ->
+ ?event(green_zone, {volume_mount, error, Error}),
+ ok % Still return ok as this is an optional operation
+ end.
%% @doc Test RSA operations with the existing wallet structure.
+%%
%% This test function verifies that encryption and decryption using the RSA keys
%% from the wallet work correctly. It creates a new wallet, encrypts a test
%% message with the RSA public key, and then decrypts it with the RSA private
@@ -648,4 +869,5 @@ rsa_wallet_integration_test() ->
% Verify roundtrip
?assertEqual(PlainText, Decrypted),
% Verify wallet structure
- ?assertEqual(KeyType, {rsa, 65537}).
\ No newline at end of file
+ ?assertEqual(KeyType, {rsa, 65537}).
+
diff --git a/src/dev_hook.erl b/src/dev_hook.erl
new file mode 100644
index 000000000..2cb17f3e9
--- /dev/null
+++ b/src/dev_hook.erl
@@ -0,0 +1,287 @@
+%%% @doc A generalized interface for `hooking' into HyperBEAM nodes.
+%%%
+%%% This module allows users to define `hooks' that are executed at various
+%%% points in the lifecycle of nodes and message evaluations.
+%%%
+%%% Hooks are maintained in the `node message' options, under the key `on'
+%%% key. Each `hook' may have zero or many `handlers' which their request is
+%%% executed against. A new `handler' of a hook can be registered by simply
+%%% adding a new key to that message. If multiple hooks need to be executed for
+%%% a single event, the key's value can be set to a list of hooks.
+%%%
+%%% `hook's themselves do not need to be added explicitly. Any device can add
+%%% a hook by simply executing `dev_hook:on(HookName, Req, Opts)`. This
+%%% function is does not affect the hashpath of a message and is not exported on
+%%% the device's API, such that it is not possible to call it directly with
+%%% AO-Core resolution.
+%%%
+%%% All handlers are expressed in the form of a message, upon which the hook's
+%%% request is evaluated:
+%%%
+%%% AO(HookMsg, Req, Opts) => {Status, Result}
+%%%
+%%% The `Status' and `Result' of the evaluation can be used at the `hook' caller's
+%%% discretion. If multiple handlers are to be executed for a single `hook', the
+%%% result of each is used as the input to the next, on the assumption that the
+%%% status of the previous is `ok'. If a non-`ok' status is encountered, the
+%%% evaluation is halted and the result is returned to the caller. This means
+%%% that in most cases, hooks take the form of chainable pipelines of functions,
+%%% passing the most pertinent data in the `body' key of both the request and
+%%% result. Hook definitions can also set the `hook/result' key to `ignore', if
+%%% the result of the execution should be discarded and the prior value (the
+%%% input to the hook) should be used instead. The `hook/commit-request' key can
+%%% also be set to `true' if the request should be committed by the node before
+%%% execution of the hook.
+%%%
+%%% The default HyperBEAM node implements several useful hooks. They include:
+%%%
+%%% start: Executed when the node starts.
+%%% Req/body: The node's initial configuration.
+%%% Result/body: The node's possibly updated configuration.
+%%% request: Executed when a request is received via the HTTP API.
+%%% Req/body: The sequence of messages that the node will evaluate.
+%%% Req/request: The raw, unparsed singleton request.
+%%% Result/body: The sequence of messages that the node will evaluate.
+%%% step: Executed after each message in a sequence has been evaluated.
+%%% Req/body: The result of the evaluation.
+%%% Result/body: The result of the evaluation.
+%%% response: Executed when a response is sent via the HTTP API.
+%%% Req/body: The result of the evaluation.
+%%% Req/request: The raw, unparsed singleton request that was used to
+%%% generate the response.
+%%% Result/body: The message to be sent in response to the request.
+%%%
+%%% Additionally, this module implements a traditional device API, allowing the
+%%% node operator to register hooks to the node and find those that are
+%%% currently active.
+-module(dev_hook).
+-export([info/1, on/3, find/2, find/3]).
+-include("include/hb.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%% @doc Device API information
+info(_) ->
+ #{ excludes => [<<"on">>] }.
+
+%% @doc Execute a named hook with the provided request and options
+%% This function finds all handlers for the hook and evaluates them in sequence.
+%% The result of each handler is used as input to the next handler.
+on(HookName, Req, Opts) ->
+ ?event(hook, {attempting_execution_for_hook, HookName}),
+ % Get all handlers for this hook from the options
+ Handlers = find(HookName, Opts),
+ % If no handlers are found, return the original request with ok status
+ case Handlers of
+ [] ->
+ ?event(hook, {no_handlers_for_hook, HookName}),
+ {ok, Req};
+ _ ->
+ % Execute each handler in sequence, passing the result of each to
+ % the next as input.
+ execute_handlers(HookName, Handlers, Req, Opts)
+ end.
+
+%% @doc Get all handlers for a specific hook from the node message options.
+%% Handlers are stored in the `on' key of this message. The `find/2' variant of
+%% this function only takes a hook name and node message, and is not called
+%% directly via the device API. Instead it is used by `on/3' and other internal
+%% functionality to find handlers when necessary. The `find/3' variant can,
+%% however, be called directly via the device API.
+find(HookName, Opts) ->
+ find(#{}, #{ <<"target">> => <<"body">>, <<"body">> => HookName }, Opts).
+find(_Base, Req, Opts) ->
+ HookName = maps:get(maps:get(<<"target">>, Req, <<"body">>), Req),
+ case maps:get(HookName, hb_opts:get(on, #{}, Opts), []) of
+ Handler when is_map(Handler) ->
+ % If a single handler is found, wrap it in a list.
+ [Handler];
+ Handlers when is_list(Handlers) ->
+ % If multiple handlers are found, return them as is
+ Handlers;
+ _ ->
+ % If no handlers are found or the value is invalid, return an empty
+ % list.
+ []
+ end.
+
+%% @doc Execute a list of handlers in sequence.
+%% The result of each handler is used as input to the next handler.
+%% If a handler returns a non-ok status, execution is halted.
+execute_handlers(_HookName, [], Req, _Opts) ->
+ % If no handlers remain, return the final request with ok status
+ {ok, Req};
+execute_handlers(HookName, [Handler|Rest], Req, Opts) ->
+ % Execute the current handler
+ ?event(hook, {executing_handler, HookName, Handler, Req}),
+ % Check the status of the execution
+ case execute_handler(HookName, Handler, Req, Opts) of
+ {ok, NewReq} ->
+ % If status is ok, continue with the next handler
+ ?event(hook, {handler_executed_successfully, HookName, NewReq}),
+ execute_handlers(HookName, Rest, NewReq, Opts);
+ {error, _} = Error ->
+ % If status is error, halt execution and return the error
+ ?event({handler_error, HookName, Error}),
+ Error;
+ Other ->
+ % If status is unknown, convert to error and halt execution
+ ?event({unexpected_handler_result, HookName, Other}),
+ {failure,
+ <<
+ "Handler for hook `",
+ (hb_ao:normalize_key(HookName))/binary,
+ "` returned unexpected result."
+ >>
+ }
+ end.
+
+%% @doc Execute a single handler
+%% Handlers are expressed as messages that can be resolved via AO.
+execute_handler(<<"step">>, Handler, Req, Opts = #{ on := On = #{ <<"step">> := _ }}) ->
+ % The `step' hook is a special case: It is executed during the course of
+ % a resolution, and as such, the key must be removed from the node message
+ % before execution of the handler. Failure to do so will result in infinite
+ % recursion.
+ execute_handler(
+ <<"step">>,
+ maps:remove(<<"step">>, Handler),
+ Req,
+ Opts#{ on => maps:remove(<<"step">>, On) }
+ );
+execute_handler(HookName, Handler, Req, Opts) ->
+ try
+ % Resolve the handler message, setting the path to the handler name if
+ % it is not already set. We ensure to ignore the hashpath such that the
+ % handler does not affect the hashpath of a request's output. If the
+ % `hook/commit` key is set to `true`, the handler request will be
+ % committed before execution.
+ BaseReq =
+ Req#{
+ <<"path">> => hb_ao:get(<<"path">>, Handler, HookName, Opts),
+ <<"method">> => hb_ao:get(<<"method">>, Handler, <<"GET">>, Opts)
+ },
+ PreparedReq =
+ case hb_ao:get(<<"hook/commit-request">>, Handler, false, Opts) of
+ true -> hb_message:commit(BaseReq, Opts);
+ false -> BaseReq
+ end,
+ ?event(hook,
+ {resolving_handler,
+ {name, HookName},
+ {handler, Handler},
+ {req, {explicit, PreparedReq}}
+ }
+ ),
+ % Resolve the prepared request upon the handler.
+ {Status, Res} =
+ hb_ao:resolve(
+ Handler,
+ PreparedReq,
+ Opts#{ hashpath => ignore }
+ ),
+ ?event(hook,
+ {handler_result,
+ {name, HookName},
+ {status, Status},
+ {res, Res}
+ }
+ ),
+ case {Status, hb_ao:get(<<"hook/result">>, Handler, <<"return">>, Opts)} of
+ {ok, <<"ignore">>} -> {Status, Req};
+ {ok, <<"return">>} -> {Status, Res};
+ {ok, <<"error">>} -> {error, Res};
+ _ -> {Status, Res}
+ end
+ catch
+ Error:Reason:Stacktrace ->
+ % If an exception occurs during execution, log it and return an error.
+ ?event(hook, {handler_exception, Error, Reason, Stacktrace}),
+ {failure, <<
+ "Handler for hook `",
+ (hb_ao:normalize_key(HookName))/binary,
+ "` raised an exception: ",
+ (iolist_to_binary(io_lib:format("~p:~p", [Error, Reason])))/binary
+ >>}
+ end.
+
+%%% Tests
+
+%% @doc Test that hooks with no handlers return the original request
+no_handlers_test() ->
+ Req = #{ <<"test">> => <<"value">> },
+ Opts = #{},
+ {ok, Result} = on(<<"test_hook">>, Req, Opts),
+ ?assertEqual(Req, Result).
+
+%% @doc Test that a single handler is executed correctly
+single_handler_test() ->
+ % Create a message with a mock handler that adds a key to the request.
+ Handler = #{
+ <<"device">> => #{
+ <<"test-hook">> =>
+ fun(_, Req, _) ->
+ {ok, Req#{ <<"handler_executed">> => true }}
+ end
+ }
+ },
+ Req = #{ <<"test">> => <<"value">> },
+ Opts = #{ on => #{ <<"test-hook">> => Handler }},
+ {ok, Result} = on(<<"test-hook">>, Req, Opts),
+ ?assertEqual(true, maps:get(<<"handler_executed">>, Result)).
+
+%% @doc Test that multiple handlers form a pipeline
+multiple_handlers_test() ->
+ % Create mock handlers that modify the request in sequence
+ Handler1 = #{
+ <<"device">> => #{
+ <<"test-hook">> =>
+ fun(_, Req, _) ->
+ {ok, Req#{ <<"handler1">> => true }}
+ end
+ }
+ },
+ Handler2 = #{
+ <<"device">> => #{
+ <<"test-hook">> =>
+ fun(_, Req, _) ->
+ {ok, Req#{ <<"handler2">> => true }}
+ end
+ }
+ },
+ Req = #{ <<"test">> => <<"value">> },
+ Opts = #{ on => #{ <<"test-hook">> => [Handler1, Handler2] }},
+ {ok, Result} = on(<<"test-hook">>, Req, Opts),
+ ?assertEqual(true, maps:get(<<"handler1">>, Result)),
+ ?assertEqual(true, maps:get(<<"handler2">>, Result)).
+
+%% @doc Test that pipeline execution halts on error
+halt_on_error_test() ->
+ % Create handlers where the second one returns an error
+ Handler1 = #{
+ <<"device">> => #{
+ <<"test-hook">> =>
+ fun(_, Req, _) ->
+ {ok, Req#{ <<"handler1">> => true }}
+ end
+ }
+ },
+ Handler2 = #{
+ <<"device">> => #{
+ <<"test-hook">> =>
+ fun(_, _, _) ->
+ {error, <<"Error in handler2">>}
+ end
+ }
+ },
+ Handler3 = #{
+ <<"device">> => #{
+ <<"test-hook">> =>
+ fun(_, Req, _) ->
+ {ok, Req#{ <<"handler3">> => true }}
+ end
+ }
+ },
+ Req = #{ <<"test">> => <<"value">> },
+ Opts = #{ on => #{ <<"test-hook">> => [Handler1, Handler2, Handler3] }},
+ {error, Result} = on(<<"test-hook">>, Req, Opts),
+ ?assertEqual(<<"Error in handler2">>, Result).
\ No newline at end of file
diff --git a/src/dev_hyperbuddy.erl b/src/dev_hyperbuddy.erl
index 3af5af5e6..ad98bb05d 100644
--- a/src/dev_hyperbuddy.erl
+++ b/src/dev_hyperbuddy.erl
@@ -10,11 +10,13 @@ info() ->
routes => #{
<<"index">> => <<"index.html">>,
<<"console">> => <<"console.html">>,
+ <<"graph">> => <<"graph.html">>,
<<"styles.css">> => <<"styles.css">>,
<<"metrics.js">> => <<"metrics.js">>,
<<"devices.js">> => <<"devices.js">>,
<<"utils.js">> => <<"utils.js">>,
- <<"main.js">> => <<"main.js">>
+ <<"main.js">> => <<"main.js">>,
+ <<"graph.js">> => <<"graph.js">>
}
}.
@@ -47,6 +49,7 @@ format(Base, _, _) ->
%% listed in the `routes' field of the `info/0' return value.
serve(<<"keys">>, M1, _M2, _Opts) -> dev_message:keys(M1);
serve(<<"set">>, M1, M2, Opts) -> dev_message:set(M1, M2, Opts);
+serve(<<"graph-data">>, _, _, Opts) -> hb_cache_render:get_graph_data(Opts);
serve(Key, _, _, _) ->
?event({hyperbuddy_serving, Key}),
case maps:get(Key, maps:get(routes, info(), no_routes), undefined) of
diff --git a/src/dev_json_iface.erl b/src/dev_json_iface.erl
index 71edda2f8..01e40c0af 100644
--- a/src/dev_json_iface.erl
+++ b/src/dev_json_iface.erl
@@ -133,12 +133,18 @@ message_to_json_struct(RawMsg, Features) ->
end,
Data = hb_ao:get(<<"data">>, {as, <<"message@1.0">>, MsgWithoutCommitments}, <<>>, #{}),
Target = hb_ao:get(<<"target">>, {as, <<"message@1.0">>, MsgWithoutCommitments}, <<>>, #{}),
+
+ % Ethereum addresses are already encoded
+ EncodedOwner = case byte_size(Owner) of
+ 42 -> Owner;
+ _ -> hb_util:encode(Owner)
+ end,
% Set "From" if From-Process is Tag or set with "Owner" address
From =
hb_ao:get(
<<"from-process">>,
{as, <<"message@1.0">>, MsgWithoutCommitments},
- hb_util:encode(Owner),
+ EncodedOwner,
#{}
),
Sig = hb_ao:get(<<"signature">>, {as, <<"message@1.0">>, MsgWithoutCommitments}, <<>>, #{}),
@@ -147,7 +153,7 @@ message_to_json_struct(RawMsg, Features) ->
% NOTE: In Arweave TXs, these are called "last_tx"
<<"Anchor">> => Last,
% NOTE: When sent to ao "Owner" is the wallet address
- <<"Owner">> => hb_util:encode(Owner),
+ <<"Owner">> => EncodedOwner,
<<"From">> => case ?IS_ID(From) of true -> safe_to_id(From); false -> From end,
<<"Tags">> => prepare_tags(TABM),
<<"Target">> => safe_to_id(Target),
diff --git a/src/dev_local_name.erl b/src/dev_local_name.erl
index 1a4d20f0c..61401b916 100644
--- a/src/dev_local_name.erl
+++ b/src/dev_local_name.erl
@@ -35,7 +35,7 @@ default_lookup(Key, _, Req, Opts) ->
%% @doc Takes a `key' and `value' argument and registers the name. The caller
%% must be the node operator in order to register a name.
register(_, Req, Opts) ->
- case dev_meta:is_operator(Req, Opts) of
+ case dev_meta:is(admin, Req, Opts) of
false ->
{error,
#{
diff --git a/src/dev_lookup.erl b/src/dev_lookup.erl
index 405883905..bb009d16b 100644
--- a/src/dev_lookup.erl
+++ b/src/dev_lookup.erl
@@ -5,6 +5,7 @@
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
+%%% @doc Fetch a resource from the cache using "target" ID extracted from the message
read(_M1, M2, Opts) ->
ID = hb_ao:get(<<"target">>, M2, Opts),
?event({lookup, {id, ID}, {opts, Opts}}),
diff --git a/src/dev_lua.erl b/src/dev_lua.erl
index 9e13ba3b6..18ac19b1e 100644
--- a/src/dev_lua.erl
+++ b/src/dev_lua.erl
@@ -1,10 +1,12 @@
-%%% @doc A device that calls a Lua script upon a request and returns the result.
+%%% @doc A device that calls a Lua module upon a request and returns the result.
-module(dev_lua).
--export([info/1, init/3, snapshot/3, normalize/3]).
+-export([info/1, init/3, snapshot/3, normalize/3, functions/3]).
+%%% Public Utilities
+-export([encode/1, decode/1]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
-%%% The set of functions that will be sandboxed by default if `sandbox` is set
-%%% to only `true`. Setting `sandbox` to a map allows the invoker to specify
+%%% The set of functions that will be sandboxed by default if `sandbox' is set
+%%% to only `true'. Setting `sandbox' to a map allows the invoker to specify
%%% which functions should be sandboxed and what to return instead. Providing
%%% a list instead of a map will result in all functions being sandboxed and
%%% returning `sandboxed'.
@@ -27,11 +29,15 @@
]).
%% @doc All keys that are not directly available in the base message are
-%% resolved by calling the Lua function in the script of the same name.
+%% resolved by calling the Lua function in the module of the same name.
+%% Additionally, we exclude the `keys', `set', `encode' and `decode' functions
+%% which are `message@1.0' core functions, and Lua public utility functions.
info(Base) ->
#{
default => fun compute/4,
- excludes => [<<"keys">>, <<"set">>] ++ maps:keys(Base)
+ excludes =>
+ [<<"keys">>, <<"set">>, <<"encode">>, <<"decode">>]
+ ++ maps:keys(Base)
}.
%% @doc Initialize the device state, loading the script into memory if it is
@@ -39,34 +45,8 @@ info(Base) ->
init(Base, Req, Opts) ->
ensure_initialized(Base, Req, Opts).
-%% @doc Find the script in the base message, either by ID or by string.
-find_script(Base, Opts) ->
- case hb_ao:get(<<"script-id">>, {as, <<"message@1.0">>, Base}, Opts) of
- not_found ->
- case hb_ao:get(<<"script">>, {as, <<"message@1.0">>, Base}, Opts) of
- not_found ->
- {error, <<"missing-script-id-or-script">>};
- Script ->
- {ok, Script}
- end;
- ScriptID ->
- case hb_cache:read(ScriptID, Opts) of
- {ok, Script} ->
- Data = hb_ao:get(<<"data">>, Script, #{}),
- ?event(debug_lua, { data, Data}),
- {ok, Data};
- {error, Error} ->
- {error, #{
- <<"status">> => 404,
- <<"body">> =>
- <<"Lua script '", ScriptID/binary, "' not available.">>,
- <<"cache-error">> => Error
- }}
- end
- end.
-
%% @doc Initialize the Lua VM if it is not already initialized. Optionally takes
-%% the script as a Binary string. If not provided, the script will be loaded
+%% the script as a Binary string. If not provided, the module will be loaded
%% from the base message.
ensure_initialized(Base, _Req, Opts) ->
case hb_private:from_message(Base) of
@@ -75,28 +55,152 @@ ensure_initialized(Base, _Req, Opts) ->
{ok, Base};
_ ->
?event(debug_lua, initializing_lua_state),
- case find_script(Base, Opts) of
- {ok, Script} ->
- initialize(Base, Script, Opts);
+ case find_modules(Base, Opts) of
+ {ok, Modules} ->
+ initialize(Base, Modules, Opts);
Error ->
Error
end
end.
-%% @doc Initialize a new Lua state with a given base message and script.
-initialize(Base, Script, Opts) ->
+%% @doc Find the script in the base message, either by ID or by string.
+find_modules(Base, Opts) ->
+ case hb_ao:get(<<"module">>, {as, <<"message@1.0">>, Base}, Opts) of
+ not_found ->
+ {error, <<"no-modules-found">>};
+ Module when is_binary(Module) ->
+ find_modules(Base#{ <<"module">> => [Module] }, Opts);
+ Module when is_map(Module) ->
+ % If the module is a map, check its content type to see if it is
+ % a literal Lua module, or a map of modules with content types.
+ case hb_ao:get(<<"content-type">>, Module, Opts) of
+ CT when CT == <<"application/lua">> orelse CT == <<"text/x-lua">> ->
+ find_modules(Base#{ <<"module">> => [Module] }, Opts);
+ _ ->
+ % If the script is not a literal Lua script, assume it is a
+ % map of scripts with content types, and recurse.
+ find_modules(Base#{ <<"module">> => maps:values(Module) }, Opts)
+ end;
+ Modules when is_list(Modules) ->
+ % We have found a list of scripts, load them.
+ load_modules(Modules, Opts)
+ end.
+
+%% @doc Load a list of modules for installation into the Lua VM.
+load_modules(Modules, Opts) -> load_modules(Modules, Opts, []).
+load_modules([], _Opts, Acc) ->
+ {ok, lists:reverse(Acc)};
+load_modules([ModuleID | Rest], Opts, Acc) when ?IS_ID(ModuleID) ->
+ case hb_cache:read(ModuleID, Opts) of
+ {ok, Module} when is_binary(Module) ->
+ % The ID referred to a binary module item, so we add it to the list
+ % as-is.
+ load_modules(Rest, Opts, [{ModuleID, Module}|Acc]);
+ {ok, ModuleMsg} when is_map(ModuleMsg) ->
+ % We read a message from the store, so we recurse upon the output,
+ % as if the module message had beeen given directly.
+ load_modules([ModuleMsg|Rest], Opts, Acc);
+ not_found ->
+ {error, #{
+ <<"status">> => 404,
+ <<"body">> => <<"Lua module '", ModuleID/binary, "' not found.">>
+ }}
+ end;
+load_modules([Module | Rest], Opts, Acc) when is_map(Module) ->
+ % We have found a message with a Lua module inside. Search for the binary
+ % of the program in the body and the data.
+ ModuleBin =
+ hb_ao:get_first(
+ [
+ {Module, <<"body">>},
+ {Module, <<"data">>}
+ ],
+ Module,
+ Opts
+ ),
+ case ModuleBin of
+ not_found ->
+ {error, #{
+ <<"status">> => 404,
+ <<"body">> =>
+ <<
+ """
+ Lua module not loadable. Lua modules must have a
+ `body' element set to a binary of the code to load.
+ """
+ >>,
+ <<"module">> => Module
+ }};
+ ModuleBin ->
+ % Get the `name' key from the script message if it exists, or
+ % return the module ID as the module name.
+ Name =
+ hb_ao:get_first(
+ [
+ {Module, <<"name">>},
+ {Module, <<"id">>}
+ ],
+ Module,
+ Opts
+ ),
+ % Load the module into the Lua state.
+ load_modules(Rest, Opts, [{Name, ModuleBin}|Acc])
+ end.
+
+%% @doc Initialize a new Lua state with a given base message and module.
+initialize(Base, Modules, Opts) ->
State0 = luerl:init(),
- {ok, _, State1} = luerl:do_dec(Script, State0),
+ % Load each script into the Lua state.
+ State1 =
+ lists:foldl(
+ fun({ModuleID, ModuleBin}, StateIn) ->
+ {ok, _, StateOut} =
+ luerl:do_dec(
+ ModuleBin,
+ [{name, hb_util:list(ModuleID)}],
+ StateIn
+ ),
+ StateOut
+ end,
+ State0,
+ Modules
+ ),
+ % Apply any sandboxing rules to the state.
State2 =
case hb_ao:get(<<"sandbox">>, {as, <<"message@1.0">>, Base}, false, Opts) of
false -> State1;
true -> sandbox(State1, ?DEFAULT_SANDBOX, Opts);
Spec -> sandbox(State1, Spec, Opts)
end,
- {ok, State3} = add_ao_core_resolver(Base, State2, Opts),
+ % Install the AO-Core Lua library into the state.
+ {ok, State3} = dev_lua_lib:install(Base, State2, Opts),
% Return the base message with the state added to it.
{ok, hb_private:set(Base, <<"state">>, State3, Opts)}.
+%%% @doc Return a list of all functions in the Lua environment.
+functions(Base, _Req, Opts) ->
+ case hb_private:get(<<"state">>, Base, Opts) of
+ not_found ->
+ {error, not_found};
+ State ->
+ {ok, [Res], _S2} =
+ luerl:do_dec(
+ <<
+ """
+ local __tests = {}
+ for k, v in pairs(_G) do
+ if type(v) == "function" then
+ table.insert(__tests, k)
+ end
+ end
+ return __tests
+ """
+ >>,
+ State
+ ),
+ {ok, hb_util:message_to_ordered_list(decode(Res))}
+ end.
+
%% @doc Sandbox (render inoperable) a set of Lua functions. Each function is
%% referred to as if it is a path in AO-Core, with its value being what to
%% return to the caller. For example, 'os.exit' would be referred to as
@@ -113,76 +217,6 @@ sandbox(State, [Path | Rest], Opts) ->
{ok, NextState} = luerl:set_table_keys_dec(Path, <<"sandboxed">>, State),
sandbox(NextState, Rest, Opts).
-%% @doc Add a HTTP-style AO-Core resolution function to the Lua environment.
-%% Optionally, limit the devices that the environment can make use of.
-add_ao_core_resolver(Base, State, Opts) ->
- % Calculate and set the new `preloaded_devices' option.
- AllDevs = hb_opts:get(preloaded_devices, Opts),
- DevSandboxDef =
- hb_ao:get(
- <<"device-sandbox">>,
- {as, <<"message@1.0">>, Base},
- false,
- Opts
- ),
- AdmissibleDevs =
- case DevSandboxDef of
- false -> AllDevs;
- DevNames ->
- lists:map(
- fun(Name) ->
- [Dev] =
- lists:filter(
- fun(X) ->
- hb_ao:get(<<"name">>, X, Opts) == Name
- end,
- AllDevs
- ),
- Dev
- end,
- hb_util:message_to_ordered_list(DevNames)
- )
- end,
- ?event({adding_ao_core_resolver, {devs, AdmissibleDevs}}),
- ExecOpts = Opts#{ preloaded_devices => AdmissibleDevs },
- % Initialize the AO-Core resolver.
- BaseAOTable =
- case luerl:get_table_keys_dec([ao], State) of
- {ok, nil, _} ->
- ?event(no_ao_table),
- #{};
- {ok, ExistingTable, _} ->
- ?event({existing_ao_table, ExistingTable}),
- decode(ExistingTable)
- end,
- ?event({base_ao_table, BaseAOTable}),
- {ok, State2} =
- luerl:set_table_keys_dec(
- [ao],
- encode(BaseAOTable),
- State
- ),
- % Add the AO-Core resolver to the base AO table.
- luerl:set_table_keys_dec(
- [ao, resolve],
- fun([EncodedMsg], ExecState) ->
- AOMsg = decode(luerl:decode(EncodedMsg, ExecState)),
- ?event({ao_core_resolver, {msg, AOMsg}}),
- ParsedMsgs = hb_singleton:from(AOMsg),
- ?event({parsed_msgs_to_resolve, ParsedMsgs}),
- try hb_ao:resolve_many(ParsedMsgs, ExecOpts) of
- {Status, Res} ->
- ?event({resolved_msgs, {status, Status}, {res, Res}}),
- {[Status, encode(Res)], ExecState}
- catch
- Error ->
- ?event({ao_core_resolver_error, Error}),
- {error, Error}
- end
- end,
- State2
- ).
-
%% @doc Call the Lua script with the given arguments.
compute(Key, RawBase, Req, Opts) ->
?event(debug_lua, compute_called),
@@ -220,29 +254,65 @@ compute(Key, RawBase, Req, Opts) ->
),
?event(debug_lua, parameters_found),
% Call the VM function with the given arguments.
- ?event({calling_lua_func, {function, Function}, {args, Params}, {req, Req}}),
- ?event(debug_lua, calling_lua_func),
- % ?event(debug_lua, {lua_params, Params}),
- case luerl:call_function_dec([Function], encode(Params), State) of
- {ok, [LuaResult], NewState} when is_map(LuaResult) ->
- ?event(debug_lua, got_lua_result),
- Result = decode(LuaResult),
- ?event(debug_lua, decoded_result),
- {ok, Result#{
- <<"priv">> => OldPriv#{
+ ?event(lua,
+ {calling_lua_func,
+ {function, Function},
+ {args, Params},
+ {req, Req}
+ }
+ ),
+ process_response(
+ try luerl:call_function_dec(
+ [Function],
+ encode(Params),
+ State
+ )
+ catch
+ _:Reason:Stacktrace -> {error, Reason, Stacktrace}
+ end,
+ OldPriv
+ ).
+
+%% @doc Process a response to a Luerl invocation. Returns the typical AO-Core
+%% HyperBEAM response format.
+process_response({ok, [Result], NewState}, Priv) ->
+ process_response({ok, [<<"ok">>, Result], NewState}, Priv);
+process_response({ok, [Status, MsgResult], NewState}, Priv) ->
+ % If the result is a HyperBEAM device return (`{Status, Msg}'), decode it
+ % and add the previous `priv' element back into the resulting message.
+ case decode(MsgResult) of
+ Msg when is_map(Msg) ->
+ ?event(lua, {response, {status, Status}, {msg, Msg}}),
+ {hb_util:atom(Status), Msg#{
+ <<"priv">> => Priv#{
<<"state">> => NewState
}
}};
- {ok, [LuaResult], _NewState} ->
- ?event(debug_lua, got_lua_result),
- {ok, LuaResult};
- {lua_error, Error, Details} ->
- {error, #{
- <<"status">> => 500,
- <<"body">> => Error,
- <<"details">> => Details
- }}
- end.
+ NonMsgRes -> {hb_util:atom(Status), NonMsgRes}
+ end;
+process_response({lua_error, RawError, State}, _Priv) ->
+ % An error occurred while calling the Lua function. Parse the stack trace
+ % and return it.
+ Error = try decode(luerl:decode(RawError, State)) catch _:_ -> RawError end,
+ StackTrace = decode_stacktrace(luerl:get_stacktrace(State), State),
+ ?event(lua_error, {lua_error, Error, {stacktrace, StackTrace}}),
+ {error, #{
+ <<"status">> => 500,
+ <<"body">> => Error,
+ <<"trace">> => hb_ao:normalize_keys(StackTrace)
+ }};
+process_response({error, Reason, Trace}, _Priv) ->
+ % An Erlang error occurred while calling the Lua function. Return it.
+ ?event(lua_error, {trace, Trace}),
+ TraceBin = iolist_to_binary(hb_util:format_trace(Trace)),
+ ?event(lua_error, {formatted, TraceBin}),
+ ReasonBin = iolist_to_binary(io_lib:format("~p", [Reason])),
+ {error, #{
+ <<"status">> => 500,
+ <<"body">> =>
+ << "Erlang error while running Lua: ", ReasonBin/binary >>,
+ <<"trace">> => TraceBin
+ }}.
%% @doc Snapshot the Lua state from a live computation. Normalizes its `priv'
%% state element, then serializes the state to a binary.
@@ -293,34 +363,143 @@ normalize(Base, _Req, RawOpts) ->
end.
%% @doc Decode a Lua result into a HyperBEAM `structured@1.0' message.
-decode(Map = [{_K, _V} | _]) when is_list(Map) ->
- maps:map(fun(_, V) -> decode(V) end, maps:from_list(Map));
+decode(EncMsg = [{_K, _V} | _]) when is_list(EncMsg) ->
+ decode(maps:map(fun(_, V) -> decode(V) end, maps:from_list(EncMsg)));
+decode(Msg) when is_map(Msg) ->
+ % If the message is an ordered list encoded as a map, decode it to a list.
+ case hb_util:is_ordered_list(Msg) of
+ true ->
+ lists:map(fun decode/1, hb_util:message_to_ordered_list(Msg));
+ false ->
+ Msg
+ end;
decode(Other) ->
Other.
-%% @doc Encode a HyperBEAM `structured@1.0' message into a Lua result.
+%% @doc Encode a HyperBEAM `structured@1.0' message into a Lua term.
encode(Map) when is_map(Map) ->
- maps:to_list(maps:map(fun(_, V) -> encode(V) end, Map));
+ case hb_util:is_ordered_list(Map) of
+ true -> encode(hb_util:message_to_ordered_list(Map));
+ false -> maps:to_list(maps:map(fun(_, V) -> encode(V) end, Map))
+ end;
encode(List) when is_list(List) ->
lists:map(fun encode/1, List);
+encode(Atom) when is_atom(Atom) and (Atom /= false) and (Atom /= true)->
+ hb_util:bin(Atom);
encode(Other) ->
Other.
+%% @doc Parse a Lua stack trace into a list of messages.
+decode_stacktrace(StackTrace, State0) ->
+ decode_stacktrace(StackTrace, State0, []).
+decode_stacktrace([], _State, Acc) ->
+ lists:reverse(Acc);
+decode_stacktrace([{FuncBin, ParamRefs, FileInfo} | Rest], State0, Acc) ->
+ %% Decode all the Lua table refs into Erlang terms
+ DecodedParams = decode_params(ParamRefs, State0),
+ %% Pull out the line number
+ Line = proplists:get_value(line, FileInfo),
+ File = proplists:get_value(file, FileInfo, undefined),
+ ?event(debug_lua_stack, {stack_file, FileInfo}),
+ %% Build our message‐map
+ Entry = #{
+ <<"function">> => FuncBin,
+ <<"parameters">> => hb_util:list_to_numbered_map(DecodedParams)
+ },
+ MaybeLine =
+ if is_binary(File) andalso is_integer(Line) ->
+ #{
+ <<"line">> =>
+ iolist_to_binary(
+ io_lib:format("~s:~p", [File, Line])
+ )
+ };
+ is_integer(Line) ->
+ #{ <<"line">> => Line };
+ true ->
+ #{}
+ end,
+ decode_stacktrace(Rest, State0, [maps:merge(Entry, MaybeLine)|Acc]).
+
+%% @doc Decode a list of Lua references, as found in a stack trace, into a
+%% list of Erlang terms.
+decode_params([], _State) -> [];
+decode_params([Tref|Rest], State) ->
+ Decoded = decode(luerl:decode(Tref, State)),
+ [Decoded|decode_params(Rest, State)].
+
%%% Tests
simple_invocation_test() ->
{ok, Script} = file:read_file("test/test.lua"),
Base = #{
<<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Script
+ },
<<"parameters">> => []
},
?assertEqual(2, hb_ao:get(<<"assoctable/b">>, Base, #{})).
+load_modules_by_id_test() ->
+ % Start a node to ensure the HTTP services are available.
+ _Node = hb_http_server:start_node(#{}),
+ Module = <<"DosEHUAqhl_O5FH3vDqPlgGsG92Guxcm6nrwqnjsDKg">>,
+ {ok, Acc} = load_modules([Module], #{}),
+ [{_,Code}|_] = Acc,
+ <> = Code,
+ ?assertEqual(<<"function">>, Prefix).
+
+multiple_modules_test() ->
+ {ok, Module} = file:read_file("test/test.lua"),
+ Module2 =
+ <<
+ """
+ function test_second_script()
+ return 4
+ end
+ """
+ >>,
+ Base = #{
+ <<"device">> => <<"lua@5.3a">>,
+ <<"module">> => [
+ #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
+ #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module2
+ }
+ ],
+ <<"parameters">> => []
+ },
+ ?assertEqual(2, hb_ao:get(<<"assoctable/b">>, Base, #{})),
+ ?assertEqual(4, hb_ao:get(<<"test_second_script">>, Base, #{})).
+
+error_response_test() ->
+ {ok, Module} = file:read_file("test/test.lua"),
+ Base = #{
+ <<"device">> => <<"lua@5.3a">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
+ <<"parameters">> => []
+ },
+ ?assertEqual(
+ {error, <<"Very bad, but Lua caught it.">>},
+ hb_ao:resolve(Base, <<"error_response">>, #{})
+ ).
+
sandboxed_failure_test() ->
- {ok, Script} = file:read_file("test/test.lua"),
+ {ok, Module} = file:read_file("test/test.lua"),
Base = #{
<<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"parameters">> => [],
<<"sandbox">> => true
},
@@ -328,10 +507,13 @@ sandboxed_failure_test() ->
%% @doc Run an AO-Core resolution from the Lua environment.
ao_core_sandbox_test() ->
- {ok, Script} = file:read_file("test/test.lua"),
+ {ok, Module} = file:read_file("test/test.lua"),
Base = #{
<<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"parameters">> => [],
<<"device-sandbox">> => [<<"message@1.0">>]
},
@@ -340,10 +522,13 @@ ao_core_sandbox_test() ->
%% @doc Run an AO-Core resolution from the Lua environment.
ao_core_resolution_from_lua_test() ->
- {ok, Script} = file:read_file("test/test.lua"),
+ {ok, Module} = file:read_file("test/test.lua"),
Base = #{
<<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"parameters">> => []
},
{ok, Res} = hb_ao:resolve(Base, <<"ao_resolve">>, #{}),
@@ -352,10 +537,13 @@ ao_core_resolution_from_lua_test() ->
%% @doc Benchmark the performance of Lua executions.
direct_benchmark_test() ->
BenchTime = 3,
- {ok, Script} = file:read_file("test/test.lua"),
+ {ok, Module} = file:read_file("test/test.lua"),
Base = #{
<<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"parameters">> => []
},
Iterations = hb:benchmark(
@@ -375,10 +563,13 @@ direct_benchmark_test() ->
%% @doc Call a non-compute key on a Lua device message and ensure that the
%% function of the same name in the script is called.
invoke_non_compute_key_test() ->
- {ok, Script} = file:read_file("test/test.lua"),
+ {ok, Module} = file:read_file("test/test.lua"),
Base = #{
<<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"test-value">> => 42
},
{ok, Result1} = hb_ao:resolve(Base, <<"hello">>, #{}),
@@ -394,16 +585,21 @@ invoke_non_compute_key_test() ->
?event({result2, Result2}),
?assertEqual(<<"Alice">>, hb_ao:get(<<"hello">>, Result2, #{})).
-%% @doc Use a Lua script as a preprocessor on the HTTP server via `~meta@1.0'.
-lua_http_preprocessor_test() ->
- {ok, Script} = file:read_file("test/test.lua"),
+%% @doc Use a Lua module as a hook on the HTTP server via `~meta@1.0'.
+lua_http_hook_test() ->
+ {ok, Module} = file:read_file("test/test.lua"),
Node = hb_http_server:start_node(
#{
- preprocessor =>
- #{
- <<"device">> => <<"lua@5.3a">>,
- <<"script">> => Script
- }
+ on => #{
+ <<"request">> =>
+ #{
+ <<"device">> => <<"lua@5.3a">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ }
+ }
+ }
}),
{ok, Res} = hb_http:get(Node, <<"/hello?hello=world">>, #{}),
?assertMatch(#{ <<"body">> := <<"i like turtles">> }, Res).
@@ -472,7 +668,8 @@ aos_authority_not_trusted_test() ->
<<"type">> => <<"Message">>,
<<"data">> => <<"1 + 1">>,
<<"random-seed">> => rand:uniform(1337),
- <<"action">> => <<"Eval">>
+ <<"action">> => <<"Eval">>,
+ <<"from-process">> => <<"1234">>
}, GuestWallet)
}, GuestWallet
@@ -518,13 +715,16 @@ aos_process_benchmark_test_() ->
%% @doc Generate a Lua process message.
generate_lua_process(File) ->
Wallet = hb:wallet(),
- {ok, Script} = file:read_file(File),
+ {ok, Module} = file:read_file(File),
hb_message:commit(#{
<<"device">> => <<"process@1.0">>,
<<"type">> => <<"Process">>,
<<"scheduler-device">> => <<"scheduler@1.0">>,
<<"execution-device">> => <<"lua@5.3a">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"authority">> => [
hb:address(),
<<"E3FJ53E6xtAzcftBpaw2E1H4ZM9h6qy6xz9NXh5lhEQ">>
@@ -538,6 +738,15 @@ generate_lua_process(File) ->
generate_test_message(Process) ->
ProcID = hb_message:id(Process, all),
Wallet = hb:wallet(),
+ Code = """
+ Count = 0
+ function add()
+ Send({Target = 'Foo', Data = 'Bar' });
+ Count = Count + 1
+ end
+ add()
+ return Count
+ """,
hb_message:commit(#{
<<"path">> => <<"schedule">>,
<<"method">> => <<"POST">>,
@@ -546,7 +755,10 @@ generate_test_message(Process) ->
#{
<<"target">> => ProcID,
<<"type">> => <<"Message">>,
- <<"data">> => <<"Count = 0\n function add() Send({Target = 'Foo', Data = 'Bar' }); Count = Count + 1 end\n add()\n return Count">>,
+ <<"body">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => list_to_binary(Code)
+ },
<<"random-seed">> => rand:uniform(1337),
<<"action">> => <<"Eval">>
},
@@ -559,7 +771,7 @@ generate_test_message(Process) ->
%% @doc Generate a stack message for the Lua process.
generate_stack(File) ->
Wallet = hb:wallet(),
- {ok, Script} = file:read_file(File),
+ {ok, Module} = file:read_file(File),
Msg1 = #{
<<"device">> => <<"Stack@1.0">>,
<<"device-stack">> =>
@@ -571,11 +783,14 @@ generate_stack(File) ->
<<"function">> => <<"json_result">>,
<<"passes">> => 2,
<<"stack-keys">> => [<<"init">>, <<"compute">>],
- <<"script">> => Script,
+ <<"module">> => Module,
<<"process">> =>
hb_message:commit(#{
<<"type">> => <<"Process">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Module
+ },
<<"scheduler">> => hb:address(),
<<"authority">> => hb:address()
}, Wallet)
@@ -583,21 +798,21 @@ generate_stack(File) ->
{ok, Msg2} = hb_ao:resolve(Msg1, <<"init">>, #{}),
Msg2.
-execute_aos_call(Base) ->
- Req =
- hb_message:commit(#{
- <<"action">> => <<"Eval">>,
- <<"function">> => <<"json_result">>,
- <<"data">> => <<"return 2">>
- },
- hb:wallet()
- ),
- execute_aos_call(Base, Req).
-execute_aos_call(Base, Req) ->
- hb_ao:resolve(Base,
- #{
- <<"path">> => <<"compute">>,
- <<"body">> => Req
- },
- #{}
- ).
\ No newline at end of file
+% execute_aos_call(Base) ->
+% Req =
+% hb_message:commit(#{
+% <<"action">> => <<"Eval">>,
+% <<"function">> => <<"json_result">>,
+% <<"data">> => <<"return 2">>
+% },
+% hb:wallet()
+% ),
+% execute_aos_call(Base, Req).
+% execute_aos_call(Base, Req) ->
+% hb_ao:resolve(Base,
+% #{
+% <<"path">> => <<"compute">>,
+% <<"body">> => Req
+% },
+% #{}
+% ).
\ No newline at end of file
diff --git a/src/dev_lua_lib.erl b/src/dev_lua_lib.erl
new file mode 100644
index 000000000..502fe59a9
--- /dev/null
+++ b/src/dev_lua_lib.erl
@@ -0,0 +1,184 @@
+%%% @doc A module for providing AO library functions to the Lua environment.
+%%% This module contains the implementation of the functions, each by the name
+%%% that should be used in the `ao' table in the Lua environment. Every export
+%%% is imported into the Lua environment.
+%%%
+%%% Each function adheres closely to the Luerl calling convention, adding the
+%%% appropriate node message as a third argument:
+%%%
+%%% fun(Args, State, NodeMsg) -> {ResultTerms, NewState}
+%%%
+%%% As Lua allows for multiple return values, each function returns a list of
+%%% terms to grant to the caller. Matching the tuple convention used by AO-Core,
+%%% the first term is typically the status, and the second term is the result.
+-module(dev_lua_lib).
+%%% Library functions. Each exported function is _automatically_ added to the
+%%% Lua environment, except for the `install/3' function, which is used to
+%%% install the library in the first place.
+-export([resolve/3, set/3, event/3, install/3]).
+-include("include/hb.hrl").
+
+%% @doc Install the library into the given Lua environment.
+install(Base, State, Opts) ->
+ % Calculate and set the new `preloaded_devices' option.
+ AllDevs = hb_opts:get(preloaded_devices, Opts),
+ DevSandboxDef =
+ hb_ao:get(
+ <<"device-sandbox">>,
+ {as, <<"message@1.0">>, Base},
+ false,
+ Opts
+ ),
+ AdmissibleDevs =
+ case DevSandboxDef of
+ false -> AllDevs;
+ DevNames ->
+ lists:map(
+ fun(Name) ->
+ [Dev] =
+ lists:filter(
+ fun(X) ->
+ hb_ao:get(<<"name">>, X, Opts) == Name
+ end,
+ AllDevs
+ ),
+ Dev
+ end,
+ hb_util:message_to_ordered_list(DevNames)
+ )
+ end,
+ ?event({adding_ao_core_resolver, {device_sandbox, AdmissibleDevs}}),
+ ExecOpts = Opts#{ preloaded_devices => AdmissibleDevs },
+ % Initialize the AO-Core resolver.
+ BaseAOTable =
+ case luerl:get_table_keys_dec([ao], State) of
+ {ok, nil, _} ->
+ ?event(no_ao_table),
+ #{};
+ {ok, ExistingTable, _} ->
+ ?event({existing_ao_table, ExistingTable}),
+ dev_lua:decode(ExistingTable)
+ end,
+ ?event({base_ao_table, BaseAOTable}),
+ {ok, State2} =
+ luerl:set_table_keys_dec(
+ [ao],
+ dev_lua:encode(BaseAOTable),
+ State
+ ),
+ {
+ ok,
+ lists:foldl(
+ fun(FuncName, StateIn) ->
+ {ok, StateOut} =
+ luerl:set_table_keys_dec(
+ [ao, FuncName],
+ fun(RawArgs, ImportState) ->
+ ?event(lua_import, {calling_import, {func, FuncName}}),
+ % Decode the arguments from the Lua environment.
+ Args =
+ lists:map(
+ fun(Arg) ->
+ dev_lua:decode(
+ luerl:decode(Arg, ImportState)
+ )
+ end,
+ RawArgs
+ ),
+ % Call the function with the decoded arguments.
+ {Res, ResState} =
+ ?MODULE:FuncName(Args, ImportState, ExecOpts),
+ % Encode the response for return to Lua
+ return(Res, ResState)
+ end,
+ StateIn
+ ),
+ StateOut
+ end,
+ State2,
+ [
+ FuncName
+ ||
+ {FuncName, _} <- dev_lua_lib:module_info(exports),
+ FuncName /= module_info,
+ FuncName /= ?FUNCTION_NAME
+ ]
+ )
+ }.
+
+%% @doc Helper function for returning a result from a Lua function.
+return(Result, ExecState) ->
+ ?event(lua_import, {import_returning, {result, Result}}),
+ TableEncoded = dev_lua:encode(Result),
+ {ReturnParams, ResultingState} =
+ lists:foldr(
+ fun(LuaEncoded, {Params, StateIn}) ->
+ {NewParam, NewState} = luerl:encode(LuaEncoded, StateIn),
+ {[NewParam | Params], NewState}
+ end,
+ {[], ExecState},
+ TableEncoded
+ ),
+ ?event({lua_encoded, ReturnParams}),
+ {ReturnParams, ResultingState}.
+
+%% @doc A wrapper function for performing AO-Core resolutions. Offers both the
+%% single-message (using `hb_singleton:from/1' to parse) and multiple-message
+%% (using `hb_ao:resolve_many/2') variants.
+resolve([SingletonMsg], ExecState, ExecOpts) ->
+ ?event({ao_core_resolver, {msg, SingletonMsg}}),
+ ParsedMsgs = hb_singleton:from(SingletonMsg),
+ ?event({parsed_msgs_to_resolve, ParsedMsgs}),
+ resolve({many, ParsedMsgs}, ExecState, ExecOpts);
+resolve([Base, Path], ExecState, ExecOpts) when is_binary(Path) ->
+ PathParts = hb_path:term_to_path_parts(Path, ExecOpts),
+ resolve({many, [Base] ++ PathParts}, ExecState, ExecOpts);
+resolve(Msgs, ExecState, ExecOpts) when is_list(Msgs) ->
+ resolve({many, Msgs}, ExecState, ExecOpts);
+resolve({many, Msgs}, ExecState, ExecOpts) ->
+ MaybeAsMsgs = lists:map(fun convert_as/1, Msgs),
+ try hb_ao:resolve_many(MaybeAsMsgs, ExecOpts) of
+ {Status, Res} ->
+ ?event({resolved_msgs, {status, Status}, {res, Res}, {exec_opts, ExecOpts}}),
+ {[Status, Res], ExecState}
+ catch
+ Error ->
+ ?event(lua_error, {ao_core_resolver_error, Error}),
+ {[<<"error">>, Error], ExecState}
+ end.
+
+%% @doc Converts any `as' terms from Lua to their HyperBEAM equivalents.
+convert_as([<<"as">>, Device, RawMsg]) ->
+ {as, Device, RawMsg};
+convert_as(Other) ->
+ Other.
+
+%% @doc Wrapper for `hb_ao''s `set' functionality.
+set([Base, Key, Value], ExecState, ExecOpts) ->
+ ?event({ao_core_set, {base, Base}, {key, Key}, {value, Value}}),
+ NewRes = hb_ao:set(Base, Key, Value, ExecOpts),
+ ?event({ao_core_set_result, {result, NewRes}}),
+ {[NewRes], ExecState};
+set([Base, NewValues], ExecState, ExecOpts) ->
+ ?event({ao_core_set, {base, Base}, {new_values, NewValues}}),
+ NewRes = hb_ao:set(Base, NewValues, ExecOpts),
+ ?event({ao_core_set_result, {result, NewRes}}),
+ {[NewRes], ExecState}.
+
+%% @doc Allows Lua scripts to signal events using the HyperBEAM hosts internal
+%% event system.
+event([Event], ExecState, Opts) ->
+ ?event({recalling_event, Event}),
+ event([global, Event], ExecState, Opts);
+event([Group, Event], State, Opts) when is_list(Event) ->
+ event([Group, list_to_tuple(Event)], State, Opts);
+event([Group, Event], ExecState, Opts) ->
+ ?event(
+ lua_event,
+ {event,
+ {group, Group},
+ {event, Event}
+ }
+ ),
+ ?event(Group, Event),
+ {[<<"ok">>], ExecState}.
\ No newline at end of file
diff --git a/src/dev_lua_test.erl b/src/dev_lua_test.erl
new file mode 100644
index 000000000..399e7c769
--- /dev/null
+++ b/src/dev_lua_test.erl
@@ -0,0 +1,165 @@
+%%% A wrapper module for generating and executing EUnit tests for all Lua modules.
+%%% When executed with `rebar3 lua-test`, this module will be invoked and scan the
+%%% `scripts' directory for all Lua files, and generate an EUnit test suite for
+%%% each one. By default, an individual test is generated for each function in
+%%% the global `_G' table that ends in `_test'.
+%%%
+%%% In order to specify other tests to run instead, the user may employ the
+%%% `LUA_TESTS' and `LUA_SCRIPTS' environment variables. The syntax for these
+%%% variables is described in the function documentation for `parse_spec'.
+%%%
+-module(dev_lua_test).
+-export([parse_spec/1]).
+-include_lib("include/hb.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+%% @doc Parse a string representation of test descriptions received from the
+%% command line via the `LUA_TESTS' environment variable.
+%%
+%% Supported syntax in loose BNF/RegEx:
+%%
+%% Definitions := (ModDef,)+
+%% ModDef := ModName(TestDefs)?
+%% ModName := ModuleInLUA_SCRIPTS|(FileName[.lua])?
+%% TestDefs := (:TestDef)+
+%% TestDef := TestName
+%%
+%% File names ending in `.lua' are assumed to be relative paths from the current
+%% working directory. Module names lacking the `.lua' extension are assumed to
+%% be modules found in the `LUA_SCRIPTS' environment variable (defaulting to
+%% `scripts/').
+%%
+%% For example, to run a single test one could call the following:
+%%
+%% LUA_TESTS=~/src/LuaScripts/test.yourTest rebar3 lua-tests
+%%
+%% To specify that one would like to run all of the tests in the
+%% `scripts/test.lua' file and two tests from the `scripts/test2.lua' file, the
+%% user could provide the following test definition:
+%%
+%% LUA_TESTS="test,scripts/test2.userTest1|userTest2" rebar3 lua-tests
+%%
+parse_spec(Str) when is_list(Str) ->
+ parse_spec(hb_util:bin(Str));
+parse_spec(tests) ->
+ % The user has not given a test spec, so we default to running all tests in
+ % the `LUA_SCRIPTS' directory (defaulting to `scripts/').
+ {ok, Files} = file:list_dir(ScriptDir = hb_opts:get(lua_scripts)),
+ RelevantFiles =
+ lists:filter(
+ fun(File) ->
+ terminates_with(File, <<"lua">>)
+ end,
+ Files
+ ),
+ ?event({loading_scripts, RelevantFiles}),
+ [
+ {
+ <<
+ (hb_util:bin(ScriptDir))/binary,
+ "/",
+ (hb_util:bin(File))/binary
+ >>,
+ tests
+ }
+ ||
+ File <- RelevantFiles
+ ];
+parse_spec(Str) ->
+ lists:map(
+ fun(ModDef) ->
+ [ModName|TestDefs] = binary:split(ModDef, <<":">>, [global, trim_all]),
+ ScriptDir = hb_util:bin(hb_opts:get(lua_scripts)),
+ File =
+ case terminates_with(ModName, <<".lua">>) of
+ true -> ModName;
+ false -> << ScriptDir/binary, "/", ModName/binary, ".lua" >>
+ end,
+ Tests =
+ case TestDefs of
+ [] -> tests;
+ TestDefs -> TestDefs
+ end,
+ {File, Tests}
+ end,
+ binary:split(Str, <<",">>, [global, trim_all])
+ ).
+
+%% @doc Main entrypoint for Lua tests.
+exec_test_() ->
+ ScriptDefs = hb_opts:get(lua_tests),
+ lists:map(
+ fun({File, Funcs}) -> suite(File, Funcs) end,
+ ScriptDefs
+ ).
+
+%% @doc Generate an EUnit test suite for a given Lua script. If the `Funcs' is
+%% the atom `tests' we find all of the global functions in the script, then
+%% filter for those ending in `_test' in a similar fashion to Eunit.
+suite(File, Funcs) ->
+ {ok, State} = new_state(File),
+ {foreach,
+ fun() -> ok end,
+ fun(_) -> ok end,
+ lists:map(
+ fun(FuncName) ->
+ {
+ hb_util:list(File) ++ ":" ++ hb_util:list(FuncName),
+ fun() -> exec_test(State, FuncName) end
+ }
+ end,
+ case Funcs of
+ tests ->
+ lists:filter(
+ fun(FuncName) ->
+ terminates_with(FuncName, <<"_test">>)
+ end,
+ hb_ao:get(<<"functions">>, State, #{})
+ );
+ FuncNames -> FuncNames
+ end
+ )
+ }.
+
+%% @doc Create a new Lua environment for a given script.
+new_state(File) ->
+ ?event(debug_lua_test, {generating_state_for, File}),
+ {ok, Module} = file:read_file(hb_util:list(File)),
+ {ok, _} =
+ hb_ao:resolve(
+ #{
+ <<"device">> => <<"lua@5.3a">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"name">> => File,
+ <<"body">> => Module
+ }
+ },
+ <<"init">>,
+ #{}
+ ).
+
+%% @doc Generate an EUnit test for a given function.
+exec_test(State, Function) ->
+ {Status, Result} =
+ hb_ao:resolve(
+ State,
+ #{ <<"path">> => Function, <<"parameters">> => [] },
+ #{}
+ ),
+ case Status of
+ ok -> ok;
+ error ->
+ hb_util:debug_print(Result, <<"Lua">>, Function, 1),
+ ?assertEqual(
+ ok,
+ Status
+ )
+ end.
+
+%%% Utility functions.
+
+%% @doc Check if a string terminates with a given suffix.
+terminates_with(String, Suffix) ->
+ binary:longest_common_suffix(lists:map(fun hb_util:bin/1, [String, Suffix]))
+ == byte_size(Suffix).
\ No newline at end of file
diff --git a/src/dev_message.erl b/src/dev_message.erl
index 7d6e36b5c..5b70bd267 100644
--- a/src/dev_message.erl
+++ b/src/dev_message.erl
@@ -428,7 +428,7 @@ set(Message1, NewValuesMsg, Opts) ->
KeysToSet =
lists:filter(
fun(Key) ->
- not lists:member(Key, ?DEVICE_KEYS) andalso
+ not lists:member(Key, ?DEVICE_KEYS ++ [<<"set-mode">>]) andalso
(maps:get(Key, NewValuesMsg, undefined) =/= undefined)
end,
NewValuesKeys
@@ -482,7 +482,12 @@ set(Message1, NewValuesMsg, Opts) ->
},
Opts
),
- ?event({setting, {committed_keys, CommittedKeys}, {keys_to_set, KeysToSet}, {message, Message1}}),
+ ?event(
+ {setting,
+ {committed_keys, CommittedKeys},
+ {keys_to_set, KeysToSet},
+ {message, Message1}
+ }),
OverwrittenCommittedKeys =
lists:filtermap(
fun(Key) ->
@@ -497,8 +502,15 @@ set(Message1, NewValuesMsg, Opts) ->
CommittedKeys
),
?event({setting, {overwritten_committed_keys, OverwrittenCommittedKeys}}),
- % Combine with deep merge
- Merged = hb_private:set_priv(hb_util:deep_merge(BaseValues, NewValues), OriginalPriv),
+ % Combine with deep merge or if `set-mode` is `explicit' then just merge.
+ Merged =
+ hb_private:set_priv(
+ case maps:get(<<"set-mode">>, NewValuesMsg, <<"deep">>) of
+ <<"explicit">> -> maps:merge(BaseValues, NewValues);
+ _ -> hb_util:deep_merge(BaseValues, NewValues)
+ end,
+ OriginalPriv
+ ),
case OverwrittenCommittedKeys of
[] -> {ok, Merged};
_ ->
@@ -636,6 +648,32 @@ unset_with_set_test() ->
?assertMatch({ok, Msg3} when ?IS_EMPTY_MESSAGE(Msg3),
hb_ao:resolve(Msg1, Msg2, #{ hashpath => ignore })).
+deep_unset_test() ->
+ Opts = #{ hashpath => ignore },
+ Msg1 = #{
+ <<"test-key1">> => <<"Value1">>,
+ <<"deep">> => #{
+ <<"test-key2">> => <<"Value2">>,
+ <<"test-key3">> => <<"Value3">>
+ }
+ },
+ Msg2 = hb_ao:set(Msg1, #{ <<"deep/test-key2">> => unset }, Opts),
+ ?assertEqual(#{
+ <<"test-key1">> => <<"Value1">>,
+ <<"deep">> => #{ <<"test-key3">> => <<"Value3">> }
+ },
+ Msg2
+ ),
+ Msg3 = hb_ao:set(Msg2, <<"deep/test-key3">>, unset, Opts),
+ ?assertEqual(#{
+ <<"test-key1">> => <<"Value1">>,
+ <<"deep">> => #{}
+ },
+ Msg3
+ ),
+ Msg4 = hb_ao:set(Msg3, #{ <<"deep">> => unset }, Opts),
+ ?assertEqual(#{ <<"test-key1">> => <<"Value1">> }, Msg4).
+
set_ignore_undefined_test() ->
Msg1 = #{ <<"test-key">> => <<"Value1">> },
Msg2 = #{ <<"path">> => <<"set">>, <<"test-key">> => undefined },
diff --git a/src/dev_meta.erl b/src/dev_meta.erl
index 082e7aa4e..a0e428fd3 100644
--- a/src/dev_meta.erl
+++ b/src/dev_meta.erl
@@ -7,11 +7,11 @@
%%% resolver. Additionally, a post-processor can be set, which is executed after
%%% the AO-Core resolver has returned a result.
-module(dev_meta).
--export([info/1, info/3, handle/2, adopt_node_message/2]).
-%%% Public API
--export([is_operator/2]).
+-export([info/1, info/3, build/3, handle/2, adopt_node_message/2, is/2, is/3]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
+%%% Include the auto-generated build info header file.
+-include_lib("../_build/hb_buildinfo.hrl").
%% @doc Ensure that the helper function `adopt_node_message/2' is not exported.
%% The naming of this method carefully avoids a clash with the exported `info/3'
@@ -22,27 +22,26 @@
%% info call will match the three-argument version of the function. If in the
%% future the `request' is added as an argument to AO-Core's internal `info'
%% function, we will need to find a different approach.
-info(_) -> #{ exports => [info] }.
+info(_) -> #{ exports => [info, build] }.
-%% @doc Utility function for determining if a request is from the `operator' of
-%% the node.
-is_operator(Request, NodeMsg) ->
- RequestSigners = hb_message:signers(Request),
- Operator =
- hb_opts:get(
- operator,
- case hb_opts:get(priv_wallet, no_viable_wallet, NodeMsg) of
- no_viable_wallet -> unclaimed;
- Wallet -> ar_wallet:to_address(Wallet)
- end,
- NodeMsg
- ),
- EncOperator =
- case Operator of
- unclaimed -> unclaimed;
- NativeAddress -> hb_util:human_id(NativeAddress)
- end,
- EncOperator == unclaimed orelse lists:member(EncOperator, RequestSigners).
+%% @doc Emits the version number and commit hash of the HyperBEAM node source,
+%% if available.
+%%
+%% We include the short hash separately, as the length of this hash may change in
+%% the future, depending on the git version/config used to build the node.
+%% Subsequently, rather than embedding the `git-short-hash-length', for the
+%% avoidance of doubt, we include the short hash separately, as well as its long
+%% hash.
+build(_, _, _NodeMsg) ->
+ {ok,
+ #{
+ <<"node">> => <<"HyperBEAM">>,
+ <<"version">> => ?HYPERBEAM_VERSION,
+ <<"source">> => ?HB_BUILD_SOURCE,
+ <<"source-short">> => ?HB_BUILD_SOURCE_SHORT,
+ <<"build-time">> => ?HB_BUILD_TIME
+ }
+ }.
%% @doc Normalize and route messages downstream based on their path. Messages
%% with a `Meta' key are routed to the `handle_meta/2' function, while all
@@ -64,9 +63,9 @@ handle(NodeMsg, RawRequest) ->
_ -> handle_resolve(RawRequest, NormRequest, NodeMsg)
end.
-handle_initialize([Base = #{ <<"device">> := Device}, Req = #{ <<"path">> := Path }|_], NodeMsg) ->
- ?event({got, {device, Device}, {path, Path}}),
- case {Device, Path} of
+handle_initialize([Base = #{ <<"device">> := Dev}, Req = #{ <<"path">> := Path }|_], NodeMsg) ->
+ ?event({got, {device, Dev}, {path, Path}}),
+ case {Dev, Path} of
{<<"meta@1.0">>, <<"info">>} -> info(Base, Req, NodeMsg);
_ -> {error, <<"Node must be initialized before use.">>}
end;
@@ -84,8 +83,7 @@ info(_, Request, NodeMsg) ->
case hb_ao:get(<<"method">>, Request, NodeMsg) of
<<"GET">> ->
?event({get_config_req, Request, NodeMsg}),
- DynamicKeys = add_dynamic_keys(NodeMsg),
- ?event(green_zone, {get_config, DynamicKeys}),
+ DynamicKeys = add_dynamic_keys(NodeMsg),
embed_status({ok, filter_node_msg(DynamicKeys)});
<<"POST">> ->
case hb_ao:get(<<"initialized">>, NodeMsg, not_found, NodeMsg) of
@@ -120,29 +118,14 @@ add_dynamic_keys(NodeMsg) ->
NodeMsg;
Wallet ->
%% Create a new map with address and merge it (overwriting existing)
- Address = hb_util:id(ar_wallet:to_address(Wallet)),
+ Address = hb_util:id(ar_wallet:to_address(Wallet)),
NodeMsg#{ address => Address, <<"address">> => Address }
end.
%% @doc Validate that the request is signed by the operator of the node, then
%% allow them to update the node message.
update_node_message(Request, NodeMsg) ->
- RequestSigners = hb_message:signers(Request),
- Operator =
- hb_opts:get(
- operator,
- case hb_opts:get(priv_wallet, no_viable_wallet, NodeMsg) of
- no_viable_wallet -> unclaimed;
- Wallet -> ar_wallet:to_address(Wallet)
- end,
- NodeMsg
- ),
- EncOperator =
- case Operator of
- unclaimed -> unclaimed;
- NativeAddress -> hb_util:human_id(NativeAddress)
- end,
- case EncOperator == unclaimed orelse lists:member(EncOperator, RequestSigners) of
+ case is(admin, Request, NodeMsg) of
false ->
?event({set_node_message_fail, Request}),
embed_status({error, <<"Unauthorized">>});
@@ -156,7 +139,8 @@ update_node_message(Request, NodeMsg) ->
<<"body">> =>
iolist_to_binary(
io_lib:format(
- "Node message updated. History: ~p updates.",
+ "Node message updated. History: ~p"
+ "updates.",
[length(NewH)]
)
),
@@ -173,35 +157,24 @@ update_node_message(Request, NodeMsg) ->
%% @doc Attempt to adopt changes to a node message.
adopt_node_message(Request, NodeMsg) ->
?event({set_node_message_success, Request}),
- MergedOpts =
- maps:merge(
- NodeMsg,
- hb_opts:mimic_default_types(hb_message:uncommitted(Request), new_atoms)
- ),
% Ensure that the node history is updated and the http_server ID is
% not overridden.
case hb_opts:get(initialized, permanent, NodeMsg) of
permanent ->
{error, <<"Node message is already permanent.">>};
_ ->
- hb_http_server:set_opts(
- MergedOpts#{
- http_server => hb_opts:get(http_server, no_server, NodeMsg),
- node_history => [Request|hb_opts:get(node_history, [], NodeMsg)]
- }
- ),
- {ok, MergedOpts}
+ hb_http_server:set_opts(Request, NodeMsg)
end.
%% @doc Handle an AO-Core request, which is a list of messages. We apply
%% the node's pre-processor to the request first, and then resolve the request
%% using the node's AO-Core implementation if its response was `ok'.
-%% After execution, we run the node's `postprocessor' message on the result of
+%% After execution, we run the node's `response' hook on the result of
%% the request before returning the result it grants back to the user.
handle_resolve(Req, Msgs, NodeMsg) ->
- TracePID = maps:get(trace, NodeMsg),
+ TracePID = hb_opts:get(trace, no_tracer_set, NodeMsg),
% Apply the pre-processor to the request.
- case resolve_processor(<<"preprocess">>, preprocessor, Req, Msgs, NodeMsg) of
+ case resolve_hook(<<"request">>, Req, Msgs, NodeMsg) of
{ok, PreProcessedMsg} ->
?event(
{result_after_preprocessing,
@@ -209,10 +182,11 @@ handle_resolve(Req, Msgs, NodeMsg) ->
),
AfterPreprocOpts = hb_http_server:get_opts(NodeMsg),
% Resolve the request message.
- HTTPOpts = maps:merge(
- AfterPreprocOpts,
- hb_opts:get(http_extra_opts, #{}, NodeMsg)
- ),
+ HTTPOpts =
+ maps:merge(
+ AfterPreprocOpts,
+ hb_opts:get(http_extra_opts, #{}, NodeMsg)
+ ),
Res =
try
hb_ao:resolve_many(
@@ -224,10 +198,13 @@ handle_resolve(Req, Msgs, NodeMsg) ->
ID = hb_util:human_id(MsgID),
{error, #{
<<"status">> => 404,
- <<"unavilable">> => ID,
+ <<"unavailable">> => ID,
<<"body">> =>
- <<"Message necessary to resolve request not found: ",
- ID/binary>>
+ <<
+ "Message necessary to resolve request ",
+ "not found: ",
+ ID/binary
+ >>
}}
end,
{ok, StatusEmbeddedRes} =
@@ -239,9 +216,8 @@ handle_resolve(Req, Msgs, NodeMsg) ->
% Apply the post-processor to the result.
Output = maybe_sign(
embed_status(
- resolve_processor(
- <<"postprocess">>,
- postprocessor,
+ resolve_hook(
+ <<"response">>,
Req,
StatusEmbeddedRes,
AfterResolveOpts
@@ -254,29 +230,27 @@ handle_resolve(Req, Msgs, NodeMsg) ->
Res -> embed_status(hb_ao:force_message(Res, NodeMsg))
end.
-%% @doc Execute a message from the node message upon the user's request. The
-%% invocation of the processor provides a request of the following form:
+%% @doc Execute a hook from the node message upon the user's request. The
+%% invocation of the hook provides a request of the following form:
%%
-%% /path => preprocess | postprocess
+%% /path => request | response
%% /request => the original request singleton
-%% /body => list of messages the user wishes to process
+%% /body => parsed sequence of messages to process | the execution result
%%
-resolve_processor(PathKey, Processor, Req, Query, NodeMsg) ->
- case hb_opts:get(Processor, undefined, NodeMsg) of
- undefined -> {ok, Query};
- ProcessorMsg ->
- ?event(processor, {processor_resolving, PathKey, ProcessorMsg}),
- Res = hb_ao:resolve(
- ProcessorMsg,
- #{
- <<"path">> => PathKey,
- <<"body">> => Query,
- <<"request">> => Req
- },
- NodeMsg#{ hashpath => ignore }
- ),
- ?event(processor, {processor_result, {type, PathKey}, {res, Res}}),
- Res
+resolve_hook(HookName, InitiatingRequest, Body, NodeMsg) ->
+ HookReq =
+ #{
+ <<"request">> => InitiatingRequest,
+ <<"body">> => Body
+ },
+ ?event(hook, {resolve_hook, HookName, HookReq}),
+ case dev_hook:on(HookName, HookReq, NodeMsg) of
+ {ok, #{ <<"body">> := ResponseBody }} ->
+ {ok, ResponseBody};
+ {error, _} = Error ->
+ Error;
+ Other ->
+ {error, Other}
end.
%% @doc Wrap the result of a device call in a status.
@@ -306,6 +280,7 @@ status_code(ok) -> 200;
status_code(error) -> 400;
status_code(created) -> 201;
status_code(not_found) -> 404;
+status_code(failure) -> 500;
status_code(unavailable) -> 503.
%% @doc Get the HTTP status code from a transaction (if it exists).
@@ -340,6 +315,76 @@ maybe_sign(Res, NodeMsg) ->
false -> Res
end.
+%% @doc Check if the request in question is signed by a given `role' on the node.
+%% The `role' can be one of `operator' or `initiator'.
+is(Request, NodeMsg) ->
+ is(operator, Request, NodeMsg).
+is(admin, Request, NodeMsg) ->
+ % Does the caller have the right to change the node message?
+ RequestSigners = hb_message:signers(Request),
+ ValidOperator =
+ hb_util:bin(
+ hb_opts:get(
+ operator,
+ case hb_opts:get(priv_wallet, no_viable_wallet, NodeMsg) of
+ no_viable_wallet -> unclaimed;
+ Wallet -> ar_wallet:to_address(Wallet)
+ end,
+ NodeMsg
+ )
+ ),
+ EncOperator =
+ case ValidOperator of
+ <<"unclaimed">> -> unclaimed;
+ NativeAddress -> hb_util:human_id(NativeAddress)
+ end,
+ ?event({is,
+ {operator,
+ {valid_operator, ValidOperator},
+ {encoded_operator, EncOperator},
+ {request_signers, RequestSigners}
+ }
+ }),
+ EncOperator == unclaimed orelse lists:member(EncOperator, RequestSigners);
+is(operator, Req, NodeMsg) ->
+ % Is the caller explicitly set to be the operator?
+ % Get the operator from the node message
+ Operator = hb_opts:get(operator, unclaimed, NodeMsg),
+ % Get the request signers
+ RequestSigners = hb_message:signers(Req),
+ % Ensure the operator is present in the request
+ lists:member(Operator, RequestSigners);
+is(initiator, Request, NodeMsg) ->
+ % Is the caller the first identity that configured the node message?
+ NodeHistory = hb_opts:get(node_history, [], NodeMsg),
+ % Check if node_history exists and is not empty
+ case NodeHistory of
+ [] ->
+ ?event(green_zone, {init, node_history, empty}),
+ false;
+ [InitializationRequest | _] ->
+ % Extract signature from first entry
+ InitializationRequestSigners = hb_message:signers(InitializationRequest),
+ % Get request signers
+ RequestSigners = hb_message:signers(Request),
+ % Ensure all signers of the initalization request are present in the
+ % request.
+ AllSignersPresent =
+ lists:all(
+ fun(Signer) -> lists:member(Signer, RequestSigners) end,
+ InitializationRequestSigners
+ ),
+ case AllSignersPresent of
+ true ->
+ {ok, true};
+ false ->
+ {error, #{
+ <<"status">> => 401,
+ <<"message">> => <<"Invalid request signature.">>
+ }}
+ end
+ end.
+
%%% Tests
%% @doc Test that we can get the node message.
@@ -422,7 +467,7 @@ permanent_node_message_test() ->
Owner = ar_wallet:new(),
Node = hb_http_server:start_node(
#{
- operator => unclaimed,
+ operator => <<"unclaimed">>,
initialized => false,
test_config_item => <<"test">>
}
@@ -506,69 +551,122 @@ claim_node_test() ->
?assertEqual(<<"test2">>, hb_ao:get(<<"test_config_item">>, Res2, #{})),
?assertEqual(2, length(hb_ao:get(<<"node_history">>, Res2, [], #{}))).
-%% Test that we can use a preprocessor upon a request.
-% preprocessor_test() ->
-% Parent = self(),
-% Node = hb_http_server:start_node(
-% #{
-% preprocessor =>
-% #{
-% <<"device">> => #{
-% <<"preprocess">> =>
-% fun(_, #{ <<"body">> := Msgs }, _) ->
-% Parent ! ok,
-% {ok, Msgs}
-% end
-% }
-% }
-% }),
-% hb_http:get(Node, <<"/~meta@1.0/info">>, #{}),
-% ?assert(receive ok -> true after 1000 -> false end).
+%% Test that we can use a hook upon a request.
+request_response_hooks_test() ->
+ Parent = self(),
+ Node = hb_http_server:start_node(
+ #{
+ on =>
+ #{
+ <<"request">> =>
+ #{
+ <<"device">> => #{
+ <<"request">> =>
+ fun(_, #{ <<"body">> := Msgs }, _) ->
+ Parent ! {hook, request},
+ {ok, #{ <<"body">> => Msgs} }
+ end
+ }
+ },
+ <<"response">> =>
+ #{
+ <<"device">> => #{
+ <<"response">> =>
+ fun(_, #{ <<"body">> := Msgs }, _) ->
+ Parent ! {hook, response},
+ {ok, #{ <<"body">> => Msgs} }
+ end
+ }
+ }
+ },
+ http_extra_opts => #{
+ <<"cache-control">> => [<<"no-store">>, <<"no-cache">>]
+ }
+ }),
+ hb_http:get(Node, <<"/~meta@1.0/info">>, #{}),
+ % Receive both of the responses from the hooks, if possible.
+ Res =
+ receive
+ {hook, request} ->
+ receive {hook, response} -> true after 100 -> false end
+ after 100 ->
+ false
+ end,
+ ?assert(Res).
-%% @doc Test that we can halt a request if the preprocessor returns an error.
+%% @doc Test that we can halt a request if the hook returns an error.
halt_request_test() ->
Node = hb_http_server:start_node(
#{
- preprocessor =>
+ on =>
#{
- <<"device">> => #{
- <<"preprocess">> =>
- fun(_, _, _) ->
- {error, <<"Bad">>}
- end
- }
+ <<"request">> =>
+ #{
+ <<"device">> => #{
+ <<"request">> =>
+ fun(_, _, _) ->
+ {error, <<"Bad">>}
+ end
+ }
+ }
}
}),
{error, Res} = hb_http:get(Node, <<"/~meta@1.0/info">>, #{}),
?assertEqual(<<"Bad">>, Res).
-%% @doc Test that a preprocessor can modify a request.
+%% @doc Test that a hook can modify a request.
modify_request_test() ->
Node = hb_http_server:start_node(
#{
- preprocessor =>
+ on =>
#{
- <<"device">> => #{
- <<"preprocess">> =>
- fun(_, #{ <<"body">> := [M|Ms] }, _) ->
- {ok, [M#{ <<"added">> => <<"value">> }|Ms]}
- end
- }
+ <<"request">> =>
+ #{
+ <<"device">> => #{
+ <<"request">> =>
+ fun(_, #{ <<"body">> := [M|Ms] }, _) ->
+ {
+ ok,
+ #{
+ <<"body">> =>
+ [
+ M#{
+ <<"added">> =>
+ <<"value">>
+ }
+ |
+ Ms
+ ]
+ }
+ }
+ end
+ }
+ }
}
}),
{ok, Res} = hb_http:get(Node, <<"/added">>, #{}),
?assertEqual(<<"value">>, Res).
-%% Test that we can use a postprocessor upon a request. Calls the `test@1.0'
-%% device's postprocessor, which sets the `postprocessor-called' key to true in
-%% the HTTP server.
-% postprocessor_test() ->
-% Node = hb_http_server:start_node(
-% #{
-% postprocessor => <<"test-device@1.0">>
-% }),
-% hb_http:get(Node, <<"/~meta@1.0/info">>, #{}),
-% timer:sleep(100),
-% {ok, Res} = hb_http:get(Node, <<"/~meta@1.0/info/postprocessor-called">>, #{}),
-% ?event({res, Res}),
-% ?assertEqual(true, Res).
\ No newline at end of file
+%% @doc Test that version information is available and returned correctly.
+buildinfo_test() ->
+ Node = hb_http_server:start_node(#{}),
+ ?assertEqual(
+ {ok, <<"HyperBEAM">>},
+ hb_http:get(Node, <<"/~meta@1.0/build/node">>, #{})
+ ),
+ ?assertEqual(
+ {ok, ?HYPERBEAM_VERSION},
+ hb_http:get(Node, <<"/~meta@1.0/build/version">>, #{})
+ ),
+ ?assertEqual(
+ {ok, ?HB_BUILD_SOURCE},
+ hb_http:get(Node, <<"/~meta@1.0/build/source">>, #{})
+ ),
+ ?assertEqual(
+ {ok, ?HB_BUILD_SOURCE_SHORT},
+ hb_http:get(Node, <<"/~meta@1.0/build/source-short">>, #{})
+ ),
+ ?assertEqual(
+ {ok, ?HB_BUILD_TIME},
+ hb_http:get(Node, <<"/~meta@1.0/build/build-time">>, #{})
+ ).
diff --git a/src/dev_node_process.erl b/src/dev_node_process.erl
index 67dbbf774..e213c915f 100644
--- a/src/dev_node_process.erl
+++ b/src/dev_node_process.erl
@@ -49,7 +49,7 @@ spawn_register(Name, Opts) ->
Signed = hb_message:commit(augment_definition(BaseDef, Opts), Opts),
ID = hb_message:id(Signed, signed, Opts),
?event(node_process, {spawned, {name, Name}, {process, Signed}}),
- % `POST` to the schedule device for the process to start its sequence.
+ % `POST' to the schedule device for the process to start its sequence.
{ok, Assignment} =
hb_ao:resolve(
Signed,
@@ -105,13 +105,16 @@ augment_definition(BaseDef, Opts) ->
%% @doc Helper function to generate a test environment and its options.
generate_test_opts() ->
- {ok, Script} = file:read_file(<<"test/test.lua">>),
+ {ok, Module} = file:read_file(<<"test/test.lua">>),
generate_test_opts(#{
?TEST_NAME => #{
<<"device">> => <<"process@1.0">>,
<<"execution-device">> => <<"lua@5.3a">>,
<<"scheduler-device">> => <<"scheduler@1.0">>,
- <<"script">> => Script
+ <<"module">> => #{
+ <<"content-type">> => <<"text/x-lua">>,
+ <<"body">> => Module
+ }
}
}).
generate_test_opts(Defs) ->
diff --git a/src/dev_p4.erl b/src/dev_p4.erl
index a24af741a..badd5e23a 100644
--- a/src/dev_p4.erl
+++ b/src/dev_p4.erl
@@ -5,8 +5,8 @@
%%%
%%% The device requires the following node message settings in order to function:
%%%
-%%% - `p4_pricing_device': The device that will estimate the cost of a request.
-%%% - `p4_ledger_device': The device that will act as a payment ledger.
+%%% - `p4_pricing-device': The device that will estimate the cost of a request.
+%%% - `p4_ledger-device': The device that will act as a payment ledger.
%%%
%%% The pricing device should implement the following keys:
%%%
@@ -22,17 +22,18 @@
%%% circumstances. Else, the value returned by the `price' key will be passed to
%%% the ledger device as the `amount' key.
%%%
-%%% The ledger device should implement the following keys:
+%%% A ledger device should implement the following keys:
%%%
%%%
%%% The `type' key is optional and defaults to `pre'. If `type' is set to `post',
%%% the debit must be applied to the ledger, whereas the `pre' type is used to
%%% check whether the debit would succeed before execution.
-module(dev_p4).
--export([preprocess/3, postprocess/3, balance/3]).
+-export([request/3, response/3, balance/3]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
@@ -43,24 +44,33 @@
]).
%% @doc Estimate the cost of a transaction and decide whether to proceed with
-%% a request. The default behavior if `pricing_device' or `p4_balances' are
+%% a request. The default behavior if `pricing-device' or `p4_balances' are
%% not set is to proceed, so it is important that a user initialize them.
-preprocess(State, Raw, NodeMsg) ->
- PricingDevice = hb_ao:get(<<"pricing_device">>, State, false, NodeMsg),
- LedgerDevice = hb_ao:get(<<"ledger_device">>, State, false, NodeMsg),
+request(State, Raw, NodeMsg) ->
+ PricingDevice = hb_ao:get(<<"pricing-device">>, State, false, NodeMsg),
+ LedgerDevice = hb_ao:get(<<"ledger-device">>, State, false, NodeMsg),
Messages = hb_ao:get(<<"body">>, Raw, NodeMsg#{ hashpath => ignore }),
Request = hb_ao:get(<<"request">>, Raw, NodeMsg),
IsChargable = is_chargable_req(Request, NodeMsg),
- ?event(payment, {preprocess_with_devices, PricingDevice, LedgerDevice, {chargable, IsChargable}}),
+ ?event(payment,
+ {preprocess_with_devices,
+ PricingDevice,
+ LedgerDevice,
+ {chargable, IsChargable}
+ }
+ ),
case {IsChargable, (PricingDevice =/= false) and (LedgerDevice =/= false)} of
- {false, _} -> {ok, Messages};
- {true, false} -> {ok, Messages};
+ {false, _} ->
+ ?event(payment, non_chargable_route),
+ {ok, #{ <<"body">> => Messages }};
+ {true, false} ->
+ ?event(payment, {p4_pre_pricing_response, {error, <<"infinity">>}}),
+ {ok, #{ <<"body">> => Messages }};
{true, true} ->
- PricingMsg = #{ <<"device">> => PricingDevice },
- LedgerMsg = #{ <<"device">> => LedgerDevice },
+ PricingMsg = State#{ <<"device">> => PricingDevice },
+ LedgerMsg = State#{ <<"device">> => LedgerDevice },
PricingReq = #{
<<"path">> => <<"estimate">>,
- <<"type">> => <<"pre">>,
<<"request">> => Request,
<<"body">> => Messages
},
@@ -77,45 +87,83 @@ preprocess(State, Raw, NodeMsg) ->
{error,
<<"Node will not service this request "
"under any circumstances.">>};
+ {ok, 0} ->
+ % The device has estimated the cost of the request to be
+ % zero, so we proceed.
+ {ok, #{ <<"body">> => Messages }};
{ok, Price} ->
- % The device has estimated the cost of the request. We
- % forward the request to the ledger device to check if we
- % have enough funds to service the request.
- LedgerReq =
- #{
- <<"path">> => <<"debit">>,
- <<"amount">> => Price,
- <<"type">> => <<"pre">>,
- <<"request">> => Request
- },
+ % The device has estimated the cost of the request. We check
+ % the user's balance to see if they have enough funds to
+ % service the request.
+ LedgerReq = #{
+ <<"path">> => <<"balance">>,
+ <<"target">> =>
+ case hb_message:signers(Request) of
+ [Signer] -> Signer;
+ [] -> <<"unknown">>;
+ Multiple -> Multiple
+ end,
+ <<"request">> => Request
+ },
?event(payment, {p4_pre_pricing_estimate, Price}),
case hb_ao:resolve(LedgerMsg, LedgerReq, NodeMsg) of
- {ok, true} ->
+ {ok, Sufficient} when
+ Sufficient =:= true orelse
+ Sufficient =:= <<"infinity">> ->
% The ledger device has confirmed that the user has
% enough funds for the request, so we proceed.
- {ok, Messages};
- {ok, false} ->
- ?event(payment, {pre_ledger_validation, false}),
- {error,
- #{
- <<"status">> => 429,
- <<"body">> => <<"Insufficient funds">>,
- <<"price">> => Price
+ ?event(payment,
+ {p4_pre_ledger_response,
+ {balance_check, guaranteed}
+ }
+ ),
+ {ok, #{ <<"body">> => Messages }};
+ {ok, Balance} when Balance >= Price ->
+ % The user has enough funds to service the request,
+ % so we proceed.
+ ?event(payment,
+ {p4_pre_ledger_response,
+ {balance_check, sufficient}
}
- };
+ ),
+ {ok, #{ <<"body">> => Messages }};
+ {ok, Balance} ->
+ % The user does not have enough funds to service
+ % the request, so we don't proceed.
+ ?event(payment,
+ {insufficient_funds,
+ {balance, Balance},
+ {price, Price}
+ }
+ ),
+ {error, #{
+ <<"status">> => 429,
+ <<"body">> => <<"Insufficient funds">>,
+ <<"price">> => Price,
+ <<"balance">> => Balance
+ }};
{error, Error} ->
% The ledger device is unable to process the request,
% so we don't proceed.
- ?event(payment, {pre_ledger_validation, {error, Error}}),
- {error, {error_checking_ledger, Error}}
+ ?event(payment,
+ {pre_ledger_validation,
+ {error, Error},
+ {base, LedgerMsg},
+ {req, LedgerReq}
+ }
+ ),
+ {error, #{
+ <<"status">> => 500,
+ <<"body">> => <<"Error checking ledger balance.">>
+ }}
end
end
end.
%% @doc Postprocess the request after it has been fulfilled.
-postprocess(State, RawResponse, NodeMsg) ->
- PricingDevice = hb_ao:get(<<"pricing_device">>, State, false, NodeMsg),
- LedgerDevice = hb_ao:get(<<"ledger_device">>, State, false, NodeMsg),
+response(State, RawResponse, NodeMsg) ->
+ PricingDevice = hb_ao:get(<<"pricing-device">>, State, false, NodeMsg),
+ LedgerDevice = hb_ao:get(<<"ledger-device">>, State, false, NodeMsg),
Response =
hb_ao:get(
<<"body">>,
@@ -124,14 +172,16 @@ postprocess(State, RawResponse, NodeMsg) ->
),
Request = hb_ao:get(<<"request">>, RawResponse, NodeMsg),
?event(payment, {post_processing_with_devices, PricingDevice, LedgerDevice}),
- case (PricingDevice =/= false) and (LedgerDevice =/= false) of
- false -> {ok, Response};
+ ?event({response_hook, {request, Request}, {response, Response}}),
+ case ((PricingDevice =/= false) and (LedgerDevice =/= false)) andalso
+ is_chargable_req(Request, NodeMsg) of
+ false ->
+ {ok, #{ <<"body">> => Response }};
true ->
- PricingMsg = #{ <<"device">> => PricingDevice },
- LedgerMsg = #{ <<"device">> => LedgerDevice },
+ PricingMsg = State#{ <<"device">> => PricingDevice },
+ LedgerMsg = State#{ <<"device">> => LedgerDevice },
PricingReq = #{
<<"path">> => <<"price">>,
- <<"type">> => <<"post">>,
<<"request">> => Request,
<<"body">> => Response
},
@@ -148,25 +198,37 @@ postprocess(State, RawResponse, NodeMsg) ->
?event(payment, {p4_post_pricing_response, PricingRes}),
case PricingRes of
{ok, Price} ->
- % We have successfully estimated the cost of the request,
- % so we proceed to debit the user's account.
+ % We have successfully determined the cost of the request,
+ % so we proceed to debit the user's account. We sign the
+ % request with the node's private key, as it is the node
+ % that is performing the debit, not the user.
LedgerReq =
- #{
- <<"path">> => <<"debit">>,
- <<"type">> => <<"post">>,
- <<"amount">> => Price,
- <<"request">> => Request
- },
- ?event({p4_ledger_request, LedgerReq}),
- {ok, Resp} =
- hb_ao:resolve(
- LedgerMsg,
- LedgerReq,
- NodeMsg
+ hb_message:commit(
+ #{
+ <<"path">> => <<"debit">>,
+ <<"quantity">> => Price,
+ <<"account">> =>
+ case hb_message:signers(Request) of
+ [Signer] -> Signer;
+ [] -> <<"unknown">>;
+ Multiple -> Multiple
+ end,
+ <<"request">> => Request
+ },
+ hb_opts:get(priv_wallet, no_viable_wallet, NodeMsg)
),
- ?event(payment, {p4_post_ledger_response, Resp}),
- % Return the original request.
- {ok, Response};
+ ?event({p4_ledger_request, LedgerReq}),
+ case hb_ao:resolve(LedgerMsg, LedgerReq, NodeMsg) of
+ {ok, _} ->
+ ?event(payment, {p4_post_ledger_response, {ok, Price}}),
+ % Return the original response.
+ {ok, #{ <<"body">> => Response }};
+ {error, Error} ->
+ ?event(payment, {p4_post_ledger_response, {error, Error}}),
+ % The debit failed, so we return the error from the
+ % ledger device.
+ {error, Error}
+ end;
{error, PricingError} ->
% The pricing device is unable to process the request,
% so we don't proceed.
@@ -176,24 +238,24 @@ postprocess(State, RawResponse, NodeMsg) ->
%% @doc Get the balance of a user in the ledger.
balance(_, Req, NodeMsg) ->
- Preprocessor =
- hb_opts:get(
- <<"preprocessor">>,
- preprocessor_not_set,
- NodeMsg
- ),
- LedgerDevice = hb_ao:get(<<"ledger_device">>, Preprocessor, false, NodeMsg),
- LedgerMsg = #{ <<"device">> => LedgerDevice },
- LedgerReq = #{
- <<"path">> => <<"balance">>,
- <<"request">> => Req
- },
- ?event({ledger_message, {ledger_msg, LedgerMsg}}),
- case hb_ao:resolve(LedgerMsg, LedgerReq, NodeMsg) of
- {ok, Balance} ->
- {ok, Balance};
- {error, Error} ->
- {error, Error}
+ case dev_hook:find(<<"request">>, NodeMsg) of
+ [] ->
+ {error, <<"No request hook found.">>};
+ [Handler] ->
+ LedgerDevice =
+ hb_ao:get(<<"ledger-device">>, Handler, false, NodeMsg),
+ LedgerMsg = Handler#{ <<"device">> => LedgerDevice },
+ LedgerReq = #{
+ <<"path">> => <<"balance">>,
+ <<"request">> => Req
+ },
+ ?event(debug, {ledger_message, {ledger_msg, LedgerMsg}}),
+ case hb_ao:resolve(LedgerMsg, LedgerReq, NodeMsg) of
+ {ok, Balance} ->
+ {ok, Balance};
+ {error, Error} ->
+ {error, Error}
+ end
end.
%% @doc The node operator may elect to make certain routes non-chargable, using
@@ -205,7 +267,12 @@ is_chargable_req(Req, NodeMsg) ->
?DEFAULT_NON_CHARGABLE_ROUTES,
NodeMsg
),
- Matches = dev_router:match_routes(Req, NonChargableRoutes, NodeMsg),
+ Matches =
+ dev_router:match(
+ #{ <<"routes">> => NonChargableRoutes },
+ Req,
+ NodeMsg
+ ),
?event(
{
is_chargable,
@@ -215,7 +282,7 @@ is_chargable_req(Req, NodeMsg) ->
}
),
case Matches of
- no_matches -> true;
+ {error, no_matching_route} -> true;
_ -> false
end.
@@ -229,12 +296,14 @@ test_opts(Opts, PricingDev, LedgerDev) ->
ProcessorMsg =
#{
<<"device">> => <<"p4@1.0">>,
- <<"pricing_device">> => PricingDev,
- <<"ledger_device">> => LedgerDev
+ <<"pricing-device">> => PricingDev,
+ <<"ledger-device">> => LedgerDev
},
Opts#{
- preprocessor => ProcessorMsg,
- postprocessor => ProcessorMsg
+ on => #{
+ <<"request">> => ProcessorMsg,
+ <<"response">> => ProcessorMsg
+ }
}.
%% @doc Simple test of p4's capabilities with the `faff@1.0' device.
@@ -268,18 +337,20 @@ non_chargable_route_test() ->
Processor =
#{
<<"device">> => <<"p4@1.0">>,
- <<"ledger_device">> => <<"simple-pay@1.0">>,
- <<"pricing_device">> => <<"simple-pay@1.0">>
+ <<"ledger-device">> => <<"simple-pay@1.0">>,
+ <<"pricing-device">> => <<"simple-pay@1.0">>
},
Node = hb_http_server:start_node(
#{
p4_non_chargable_routes =>
[
#{ <<"template">> => <<"/~p4@1.0/balance">> },
- #{ <<"template">> => <<"/~meta@1.0/*">> }
+ #{ <<"template">> => <<"/~meta@1.0/*/*">> }
],
- preprocessor => Processor,
- postprocessor => Processor,
+ on => #{
+ <<"request">> => Processor,
+ <<"response">> => Processor
+ },
operator => hb:address()
}
),
@@ -290,13 +361,116 @@ non_chargable_route_test() ->
Res = hb_http:get(Node, GoodSignedReq, #{}),
?event({res1, Res}),
?assertMatch({ok, 0}, Res),
- Req2 = #{ <<"path">> => <<"/~meta@1.0/info">> },
+ Req2 = #{ <<"path">> => <<"/~meta@1.0/info/operator">> },
GoodSignedReq2 = hb_message:commit(Req2, Wallet),
Res2 = hb_http:get(Node, GoodSignedReq2, #{}),
?event({res2, Res2}),
- ?assertMatch({ok, #{ <<"operator">> := _ }}, Res2),
+ OperatorAddress = hb_util:human_id(hb:address()),
+ ?assertEqual({ok, OperatorAddress}, Res2),
Req3 = #{ <<"path">> => <<"/~scheduler@1.0">> },
BadSignedReq3 = hb_message:commit(Req3, Wallet),
Res3 = hb_http:get(Node, BadSignedReq3, #{}),
?event({res3, Res3}),
- ?assertMatch({error, _}, Res3).
\ No newline at end of file
+ ?assertMatch({error, _}, Res3).
+
+%% @doc Ensure that Lua modules can be used as pricing and ledger devices. Our
+%% modules come in two parts:
+%% - A `process' module which is executed as a persistent `local-process' on the
+%% node, and which maintains the state of the ledger.
+%% - A `client' module, which is executed as a `p4@1.0' device, marshalling
+%% requests to the `process' module.
+lua_pricing_test() ->
+ HostWallet = ar_wallet:new(),
+ ClientWallet = ar_wallet:new(),
+ {ok, ProcessScript} = file:read_file("scripts/p4-payment-process.lua"),
+ {ok, ClientScript} = file:read_file("scripts/p4-payment-client.lua"),
+ Processor =
+ #{
+ <<"device">> => <<"p4@1.0">>,
+ <<"ledger-device">> => <<"lua@5.3a">>,
+ <<"pricing-device">> => <<"simple-pay@1.0">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"text/x-lua">>,
+ <<"name">> => <<"scripts/p4-payment-client.lua">>,
+ <<"body">> => ClientScript
+ },
+ <<"ledger-path">> => <<"/ledger~node-process@1.0">>
+ },
+ Node =
+ hb_http_server:start_node(
+ #{
+ priv_wallet => HostWallet,
+ p4_non_chargable_routes =>
+ [
+ #{
+ <<"template">> =>
+ <<"/*~node-process@1.0/*">>
+ }
+ ],
+ on => #{
+ <<"request">> => Processor,
+ <<"response">> => Processor
+ },
+ operator => ar_wallet:to_address(HostWallet),
+ node_processes => #{
+ <<"ledger">> => #{
+ <<"device">> => <<"process@1.0">>,
+ <<"execution-device">> => <<"lua@5.3a">>,
+ <<"scheduler-device">> => <<"scheduler@1.0">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"text/x-lua">>,
+ <<"name">> => <<"scripts/p4-payment-process.lua">>,
+ <<"body">> => ProcessScript
+ },
+ <<"operator">> =>
+ hb_util:human_id(ar_wallet:to_address(HostWallet))
+ }
+ }
+ }
+ ),
+ Req = #{
+ <<"path">> => <<"/greeting">>,
+ <<"greeting">> => <<"Hello, world!">>
+ },
+ SignedReq = hb_message:commit(Req, ClientWallet),
+ Res = hb_http:get(Node, SignedReq, #{}),
+ ?event({expected_failure, Res}),
+ ?assertMatch({error, _}, Res),
+ {ok, TopupRes} =
+ hb_http:post(
+ Node,
+ hb_message:commit(
+ #{
+ <<"path">> => <<"/ledger~node-process@1.0/schedule">>,
+ <<"body">> =>
+ hb_message:commit(
+ #{
+ <<"path">> => <<"credit-notice">>,
+ <<"quantity">> => 100,
+ <<"recipient">> =>
+ hb_util:human_id(
+ ar_wallet:to_address(ClientWallet)
+ )
+ },
+ HostWallet
+ )
+ },
+ HostWallet
+ ),
+ #{}
+ ),
+ ?event({topup_res, TopupRes}),
+ ResAfterTopup = hb_http:get(Node, SignedReq, #{}),
+ ?event({res_after_topup, ResAfterTopup}),
+ ?assertMatch({ok, <<"Hello, world!">>}, ResAfterTopup),
+ {ok, Balance} =
+ hb_http:get(
+ Node,
+ <<
+ "/ledger~node-process@1.0/now/balance/",
+ (hb_util:human_id(ar_wallet:to_address(ClientWallet)))/binary
+ >>,
+ #{}
+ ),
+ ?event({balance, Balance}),
+ ?assertMatch(#{ <<"body">> := <<"98">> }, Balance).
\ No newline at end of file
diff --git a/src/dev_patch.erl b/src/dev_patch.erl
index 306d8f716..f7ef33e26 100644
--- a/src/dev_patch.erl
+++ b/src/dev_patch.erl
@@ -1,83 +1,177 @@
-%%% @doc A device that finds `PATCH' requests in the `results/outbox'
-%%% of its message, and applies them to it. This can be useful for processes
-%%% whose computation would like to manipulate data outside of the `results' key
-%%% of its message.
+%%% @doc A device that can be used to reorganize a message: Moving data from
+%%% one path inside it to another. This device's function runs in two modes:
+%%%
+%%% 1. When using `all' to move all data at the path given in `from' to the
+%%% path given in `to'.
+%%% 2. When using `patches' to move all submessages in the source to the target,
+%%% _if_ they have a `method' key of `PATCH' or a `device' key of `patch@1.0'.
+%%%
+%%% Source and destination paths may be prepended by `base:` or `req:` keys to
+%%% indicate that they are relative to either of the message's that the
+%%% computation is being performed on.
+%%%
+%%% The search order for finding the source and destination keys is as follows,
+%%% where `X` is either `from' or `to`:
+%%%
+%%% 1. The `patch-X' key of the execution message.
+%%% 2. The `X' key of the execution message.
+%%% 3. The `patch-X' key of the request message.
+%%% 4. The `X' key of the request message.
+%%%
+%%% Additionally, this device implements the standard computation device keys,
+%%% allowing it to be used as an element of an execution stack pipeline, etc.
-module(dev_patch).
+-export([all/3, patches/3]).
+%%% `execution-device` standard hooks:
-export([init/3, compute/3, normalize/3, snapshot/3]).
-include_lib("eunit/include/eunit.hrl").
-include_lib("include/hb.hrl").
-%% @doc Default process device hooks.
+%% @doc Necessary hooks for compliance with the `execution-device' standard.
init(Msg1, _Msg2, _Opts) -> {ok, Msg1}.
normalize(Msg1, _Msg2, _Opts) -> {ok, Msg1}.
snapshot(Msg1, _Msg2, _Opts) -> {ok, Msg1}.
+compute(Msg1, Msg2, Opts) -> patches(Msg1, Msg2, Opts).
-%% @doc Find `PATCH' requests in the `results/outbox' of the message, and apply
-%% them to the state.
-compute(Msg1, Msg2, Opts) ->
- % Find the input keys.
- PatchFrom = hb_ao:get_first(
- [
- {Msg2, <<"patch-from">>},
- {Msg1, <<"patch-from">>}
- ],
- <<"/results/outbox">>,
- Opts
- ),
- PatchTo = hb_ao:get_first(
- [
- {Msg2, <<"patch-to">>},
- {Msg1, <<"patch-to">>}
- ],
- <<"/">>,
- Opts
- ),
- ?event({patch_from, PatchFrom}),
- ?event({patch_to, PatchTo}),
- % Get the outbox from the message.
- Outbox = hb_ao:get(PatchFrom, Msg1, #{}, Opts),
- % Find all messages with the PATCH request.
- Patches =
- maps:filter(
- fun(_, Msg) ->
- (hb_ao:get(<<"method">>, Msg, Opts) == <<"PATCH">>) orelse
- (hb_ao:get(<<"device">>, Msg, Opts) == <<"patch@1.0">>)
+%% @doc Get the value found at the `patch-from' key of the message, or the
+%% `from' key if the former is not present. Remove it from the message and set
+%% the new source to the value found.
+all(Msg1, Msg2, Opts) ->
+ move(all, Msg1, Msg2, Opts).
+
+%% @doc Find relevant `PATCH' messages in the given source key of the execution
+%% and request messages, and apply them to the given destination key of the
+%% request.
+patches(Msg1, Msg2, Opts) ->
+ move(patches, Msg1, Msg2, Opts).
+
+%% @doc Unified executor for the `all' and `patches' modes.
+move(Mode, Msg1, Msg2, Opts) ->
+ maybe
+ % Find the input paths.
+ % For `from' we parse the path to see if it is relative to the request
+ % or the base message. This is not needed for `to' because it is
+ % always relative to the request.
+ RawPatchFrom =
+ hb_ao:get_first(
+ [
+ {Msg2, <<"patch-from">>},
+ {Msg1, <<"patch-from">>},
+ {Msg2, <<"from">>},
+ {Msg1, <<"from">>}
+ ],
+ <<"/">>,
+ Opts
+ ),
+ {FromMsg, PatchFromParts} =
+ case hb_path:term_to_path_parts(RawPatchFrom) of
+ [BinKey|RestKeys] ->
+ case binary:split(BinKey, <<":">>) of
+ [<<"base">>, RestKey] ->
+ {Msg1, [RestKey|RestKeys]};
+ [<<"req">>, RestKey] ->
+ {Msg2, [RestKey|RestKeys]};
+ _ ->
+ {Msg1, RawPatchFrom}
+ end;
+ _ ->
+ {Msg1, RawPatchFrom}
end,
- Outbox
- ),
- OutboxWithoutPatches = maps:without(maps:keys(Patches), Outbox),
- % Remove the outbox from the message.
- Msg1WithoutOutbox = hb_ao:set(Msg1, PatchFrom, should_never_happen, Opts),
- % Set the new outbox.
- Msg1WithNewOutbox = hb_ao:set(Msg1WithoutOutbox, PatchFrom, OutboxWithoutPatches, Opts),
- % Find the state to apply the patches to.
- % Apply the patches to the state.
- PatchedSubmessage =
- maps:fold(
- fun(_, Patch, MsgN) ->
- ?event({patching, {patch, Patch}, {before, MsgN}}),
- Res = hb_ao:set(
- MsgN,
- maps:without([<<"method">>], Patch),
- Opts
- ),
- ?event({patched, {'after', Res}}),
- Res
+ ?event({patch_from_parts, {explicit, PatchFromParts}}),
+ PatchFrom =
+ case hb_path:to_binary(PatchFromParts) of
+ <<"">> -> <<"/">>;
+ Path -> Path
end,
- case PatchTo of
- not_found -> Msg1WithNewOutbox;
- PatchTo -> hb_ao:get(PatchTo, Msg1WithNewOutbox, Opts)
+ ?event({patch_from, PatchFrom}),
+ PatchTo =
+ hb_ao:get_first(
+ [
+ {Msg2, <<"patch-to">>},
+ {Msg1, <<"patch-to">>},
+ {Msg2, <<"to">>},
+ {Msg1, <<"to">>}
+ ],
+ <<"/">>,
+ Opts
+ ),
+ ?event({patch_from, PatchFrom}),
+ ?event({patch_to, PatchTo}),
+ % Get the source of the patches from the message. Makes the `maybe'
+ % statement return `{error, not_found}' if the source is not found.
+ {ok, Source} ?= hb_ao:resolve(FromMsg, PatchFrom, Opts),
+ % Find all messages with the PATCH request.
+ {ToWrite, NewSourceValue} =
+ case Mode of
+ patches ->
+ maps:fold(
+ fun(Key, Msg, {PatchAcc, NewSourceAcc}) ->
+ Method = hb_ao:get(<<"method">>, Msg, Opts)
+ == <<"PATCH">>,
+ Device = hb_ao:get(<<"device">>, Msg, Opts)
+ == <<"patch@1.0">>,
+ if Method orelse Device ->
+ {PatchAcc#{Key => Msg}, NewSourceAcc};
+ true ->
+ {PatchAcc, NewSourceAcc#{ Key => Msg }}
+ end
+ end,
+ {#{}, #{}},
+ Source
+ );
+ all ->
+ {Source, unset}
end,
- Patches
- ),
- PatchedState =
- case PatchTo of
- <<"/">> -> PatchedSubmessage;
- _ -> hb_ao:set(Msg1WithNewOutbox, PatchTo, PatchedSubmessage, Opts)
- end,
- % Return the patched message and the source, less the patches.
- ?event({patch_result, PatchedState}),
- {ok, PatchedState}.
+ ?event({source_data, ToWrite}),
+ ?event({new_data_for_source_path, NewSourceValue}),
+ % Remove the source from the message and set the new source.
+ FromMsgWithoutSource =
+ hb_ao:set(
+ FromMsg,
+ PatchFrom,
+ <<"patch-error">>,
+ Opts
+ ),
+ FromMsgWithNewSource =
+ hb_ao:set(
+ FromMsgWithoutSource,
+ #{ PatchFrom => NewSourceValue },
+ Opts
+ ),
+ % If the `mode` is `patches`, we need to remove the `method` key from
+ % them, if present.
+ ToWriteMod =
+ case Mode of
+ all -> ToWrite;
+ patches ->
+ maps:fold(
+ fun(_, Patch, MsgN) ->
+ ?event({patching, {patch, Patch}, {before, MsgN}}),
+ Res =
+ hb_ao:set(
+ MsgN,
+ maps:without([<<"method">>], Patch),
+ Opts
+ ),
+ ?event({patched, {'after', Res}}),
+ Res
+ end,
+ #{},
+ ToWrite
+ )
+ end,
+ % Find the target to apply the patches to, and apply them.
+ PatchedResult =
+ hb_ao:set(
+ FromMsgWithNewSource,
+ PatchTo,
+ ToWriteMod,
+ Opts
+ ),
+ % Return the patched message and the source, less the patches.
+ ?event({patch_result, PatchedResult}),
+ {ok, PatchedResult}
+ end.
%%% Tests
@@ -157,4 +251,94 @@ patch_to_submessage_test() ->
?assertEqual(
100,
hb_ao:get(<<"state/prices/apple">>, ResolvedState, #{})
+ ).
+
+all_mode_test() ->
+ InitState = #{
+ <<"device">> => <<"patch@1.0">>,
+ <<"input">> => #{
+ <<"zones">> => #{
+ <<"1">> => #{
+ <<"method">> => <<"PATCH">>,
+ <<"prices">> => #{
+ <<"apple">> => 100,
+ <<"banana">> => 200
+ }
+ },
+ <<"2">> => #{
+ <<"method">> => <<"GET">>,
+ <<"prices">> => #{
+ <<"orange">> => 300
+ }
+ }
+ }
+ },
+ <<"state">> => #{
+ <<"prices">> => #{
+ <<"apple">> => 1000
+ }
+ }
+ },
+ {ok, ResolvedState} =
+ hb_ao:resolve(
+ InitState,
+ #{
+ <<"path">> => <<"all">>,
+ <<"patch-to">> => <<"/state">>,
+ <<"patch-from">> => <<"/input/zones">>
+ },
+ #{}
+ ),
+ ?event({resolved_state, ResolvedState}),
+ ?assertEqual(
+ 100,
+ hb_ao:get(<<"state/1/prices/apple">>, ResolvedState, #{})
+ ),
+ ?assertEqual(
+ 300,
+ hb_ao:get(<<"state/2/prices/orange">>, ResolvedState, #{})
+ ),
+ ?assertEqual(
+ not_found,
+ hb_ao:get(<<"input/zones">>, ResolvedState, #{})
+ ).
+
+req_prefix_test() ->
+ BaseMsg = #{
+ <<"device">> => <<"patch@1.0">>,
+ <<"state">> => #{
+ <<"prices">> => #{
+ <<"apple">> => 1000
+ }
+ }
+ },
+ ReqMsg = #{
+ <<"path">> => <<"all">>,
+ <<"patch-from">> => <<"req:/results/outbox/1">>,
+ <<"patch-to">> => <<"/state">>,
+ <<"results">> => #{
+ <<"outbox">> => #{
+ <<"1">> => #{
+ <<"method">> => <<"PATCH">>,
+ <<"prices">> => #{
+ <<"apple">> => 100,
+ <<"banana">> => 200
+ }
+ }
+ }
+ }
+ },
+ {ok, ResolvedState} = hb_ao:resolve(BaseMsg, ReqMsg, #{}),
+ ?event({resolved_state, ResolvedState}),
+ ?assertEqual(
+ 100,
+ hb_ao:get(<<"state/prices/apple">>, ResolvedState, #{})
+ ),
+ ?assertEqual(
+ 200,
+ hb_ao:get(<<"state/prices/banana">>, ResolvedState, #{})
+ ),
+ ?assertEqual(
+ not_found,
+ hb_ao:get(<<"results/outbox/1">>, ResolvedState, #{})
).
\ No newline at end of file
diff --git a/src/dev_poda.erl b/src/dev_poda.erl
index bb92eb8f9..46960f285 100644
--- a/src/dev_poda.erl
+++ b/src/dev_poda.erl
@@ -1,11 +1,4 @@
--module(dev_poda).
--export([init/2, execute/3]).
--export([is_user_signed/1]).
--export([push/2]).
--include("include/hb.hrl").
--hb_debug(print).
-
-%%% A simple exemplar decentralized proof of authority consensus algorithm
+%%% @doc A simple exemplar decentralized proof of authority consensus algorithm
%%% for AO processes. This device is split into two flows, spanning three
%%% actions.
%%%
@@ -14,6 +7,12 @@
%%% 2. Validation of incoming messages before execution.
%%% Commitment flow:
%%% 1. Adding commitments to results, either on a CU or MU.
+-module(dev_poda).
+-export([init/2, execute/3]).
+-export([is_user_signed/1]).
+-export([push/2]).
+-include("include/hb.hrl").
+-hb_debug(print).
%%% Execution flow: Initialization.
@@ -168,6 +167,7 @@ return_error(S = #{ <<"wallet">> := Wallet }, Reason) ->
}
}}.
+%%% @doc Determines if a user committed
is_user_signed(#tx { data = #{ <<"body">> := Msg } }) ->
?no_prod(use_real_commitment_detection),
lists:keyfind(<<"from-process">>, 1, Msg#tx.tags) == false;
diff --git a/src/dev_process.erl b/src/dev_process.erl
index 963a0939e..1799e8a50 100644
--- a/src/dev_process.erl
+++ b/src/dev_process.erl
@@ -256,8 +256,13 @@ compute_to_slot(ProcID, Msg1, Msg2, TargetSlot, Opts) ->
% If the compute_slot function returns an error,
% we return the error details, along with the current
% slot.
+ ErrMsg =
+ if is_map(Error) ->
+ Error;
+ true -> #{ <<"error">> => Error }
+ end,
{error,
- Error#{
+ ErrMsg#{
<<"phase">> => <<"compute">>,
<<"attempted-slot">> => NextSlot
}
@@ -618,6 +623,7 @@ test_aos_process(Opts, Stack) ->
<<"execution-device">> => <<"stack@1.0">>,
<<"scheduler-device">> => <<"scheduler@1.0">>,
<<"output-prefix">> => <<"wasm">>,
+ <<"patch-from">> => <<"/results/outbox">>,
<<"passes">> => 2,
<<"stack-keys">> =>
[
@@ -728,7 +734,7 @@ get_scheduler_slot_test() ->
schedule_test_message(Msg1, <<"TEST TEXT 1">>),
schedule_test_message(Msg1, <<"TEST TEXT 2">>),
Msg2 = #{
- <<"path">> => <<"Slot">>,
+ <<"path">> => <<"slot">>,
<<"method">> => <<"GET">>
},
?assertMatch(
diff --git a/src/dev_push.erl b/src/dev_push.erl
index 2ed5b6edf..c555eefe9 100644
--- a/src/dev_push.erl
+++ b/src/dev_push.erl
@@ -7,28 +7,39 @@
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
-%% @doc Push either a message or an assigned slot number.
+%% @doc Push either a message or an assigned slot number. If a `Process' is
+%% provided in the `body' of the request, it will be scheduled (initializing
+%% it if it does not exist). Otherwise, the message specified by the given
+%% `slot' key will be pushed.
+%%
+%% Optional parameters:
+%% `/result-depth': The depth to which the full contents of the result
+%% will be included in the response. Default: 1, returning
+%% the full result of the first message, but only the 'tree'
+%% of downstream messages.
+%% `/push-mode': Whether or not the push should be done asynchronously.
+%% Default: `sync', pushing synchronously.
push(Base, Req, Opts) ->
- ModBase = dev_process:as_process(Base, Opts),
- ?event(push, {push_base, {base, ModBase}, {req, Req}}, Opts),
+ Process = dev_process:as_process(Base, Opts),
+ ?event(push, {push_base, {base, Process}, {req, Req}}, Opts),
case hb_ao:get(<<"slot">>, {as, <<"message@1.0">>, Req}, no_slot, Opts) of
no_slot ->
- case schedule_initial_message(ModBase, Req, Opts) of
+ case schedule_initial_message(Process, Req, Opts) of
{ok, Assignment} ->
case find_type(hb_ao:get(<<"body">>, Assignment, Opts), Opts) of
<<"Message">> ->
?event(push,
{pushing_message,
- {base, ModBase},
+ {base, Process},
{assignment, Assignment}
},
Opts
),
- push_with_mode(ModBase, Assignment, Opts);
+ push_with_mode(Process, Assignment, Opts);
<<"Process">> ->
?event(push,
{initializing_process,
- {base, ModBase},
+ {base, Process},
{assignment, Assignment}},
Opts
),
@@ -36,55 +47,55 @@ push(Base, Req, Opts) ->
end;
{error, Res} -> {error, Res}
end;
- _ -> push_with_mode(ModBase, Req, Opts)
+ _ -> push_with_mode(Process, Req, Opts)
end.
-push_with_mode(Base, Req, Opts) ->
- Mode = is_async(Base, Req, Opts),
+push_with_mode(Process, Req, Opts) ->
+ Mode = is_async(Process, Req, Opts),
case Mode of
<<"sync">> ->
- do_push(Base, Req, Opts);
+ do_push(Process, Req, Opts);
<<"async">> ->
- spawn(fun() -> do_push(Base, Req, Opts) end)
+ spawn(fun() -> do_push(Process, Req, Opts) end)
end.
%% @doc Determine if the push is asynchronous.
-is_async(Base, Req, Opts) ->
+is_async(Process, Req, Opts) ->
hb_ao:get_first(
[
{Req, <<"push-mode">>},
- {Base, <<"push-mode">>},
- {Base, <<"process/push-mode">>}
+ {Process, <<"push-mode">>},
+ {Process, <<"process/push-mode">>}
],
<<"sync">>,
Opts
).
-%% @doc Push a message or slot number.
-do_push(Base, Assignment, Opts) ->
+%% @doc Push a message or slot number, including its downstream results.
+do_push(Process, Assignment, Opts) ->
Slot = hb_ao:get(<<"slot">>, Assignment, Opts),
- ID = dev_process:process_id(Base, #{}, Opts),
+ ID = dev_process:process_id(Process, #{}, Opts),
?event(push, {push_computing_outbox, {process_id, ID}, {slot, Slot}}),
{Status, Result} = hb_ao:resolve(
- {as, <<"process@1.0">>, Base},
+ {as, <<"process@1.0">>, Process},
#{ <<"path">> => <<"compute/results">>, <<"slot">> => Slot },
Opts#{ hashpath => ignore }
),
+ % Determine if we should include the full compute result in our response.
+ IncludeDepth = hb_ao:get(<<"result-depth">>, Assignment, 1, Opts),
AdditionalRes =
- case IncludeDepth = hb_opts:get(push_include_result, 1, Opts) of
+ case IncludeDepth of
X when X > 0 -> Result;
_ -> #{}
end,
- NextOpts =
- Opts#{
- push_include_result => IncludeDepth - 1
- },
+ ?event(push_depth, {depth, IncludeDepth, {assignment, Assignment}}),
?event(push, {push_computed, {process, ID}, {slot, Slot}}),
case {Status, hb_ao:get(<<"outbox">>, Result, #{}, Opts)} of
{ok, NoResults} when ?IS_EMPTY_MESSAGE(NoResults) ->
- ?event(push_short, {push_complete, {process, {string, ID}}, {slot, Slot}}),
+ ?event(push_short, {done, {process, {string, ID}}, {slot, Slot}}),
{ok, AdditionalRes#{ <<"slot">> => Slot, <<"process">> => ID }};
{ok, Outbox} ->
+ ?event(push, {push_found_outbox, {outbox, Outbox}}),
Downstream =
maps:map(
fun(Key, MsgToPush = #{ <<"target">> := Target }) ->
@@ -92,17 +103,22 @@ do_push(Base, Assignment, Opts) ->
{ok, PushBase} ->
push_result_message(
PushBase,
- Slot,
- Key,
MsgToPush,
- NextOpts
+ #{
+ <<"process">> => ID,
+ <<"slot">> => Slot,
+ <<"outbox-key">> => Key,
+ <<"result-depth">> => IncludeDepth
+ },
+ Opts
);
not_found ->
#{
<<"response">> => <<"error">>,
<<"status">> => 404,
<<"target">> => Target,
- <<"reason">> => <<"Could not access target process!">>
+ <<"reason">> =>
+ <<"Could not access target process!">>
}
end;
(Key, Msg) ->
@@ -114,33 +130,45 @@ do_push(Base, Assignment, Opts) ->
<<"message">> => Msg
}
end,
- Outbox
+ hb_ao:normalize_keys(Outbox)
),
{ok, maps:merge(Downstream, AdditionalRes#{
<<"slot">> => Slot,
<<"process">> => ID
})};
- {Err, Error} when Err == error; Err == failure -> {error, Error}
+ {Err, Error} when Err == error; Err == failure ->
+ ?event(push, {push_failed_to_find_outbox, {error, Error}}, Opts),
+ {error, Error}
end.
-push_result_message(Base, FromSlot, Key, MsgToPush, Opts) ->
+%% @doc Push a downstream message result. The `Origin' map contains information
+%% about the origin of the message: The process that originated the message,
+%% the slot number from which it was sent, and the outbox key of the message,
+%% and the depth to which downstream results should be included in the message.
+push_result_message(TargetProcess, MsgToPush, Origin, Opts) ->
case hb_ao:get(<<"target">>, MsgToPush, undefined, Opts) of
undefined ->
- ?event(push, {skip_no_target, {key, Key}, MsgToPush}, Opts),
+ ?event(push,
+ {skip_no_target, {msg, MsgToPush}, {origin, Origin}},
+ Opts
+ ),
#{};
TargetID ->
?event(push,
{pushing_child,
- {originates_from_slot, FromSlot},
- {outbox_key, Key},
- {target_id, TargetID}
+ {target, TargetID},
+ {msg, MsgToPush},
+ {origin, Origin}
},
Opts
),
- case schedule_result(Base, MsgToPush, Opts) of
+ case schedule_result(TargetProcess, MsgToPush, Origin, Opts) of
{ok, Assignment} ->
+ % Analyze the result of the message push.
NextSlotOnProc = hb_ao:get(<<"slot">>, Assignment, Opts),
PushedMsg = hb_ao:get(<<"body">>, Assignment, Opts),
+ % Get the ID of the message that was pushed. We already have
+ % the 'origin' message, but we need the signed ID.
PushedMsgID = hb_message:id(PushedMsg, all, Opts),
?event(push_short,
{pushed_message_to,
@@ -152,11 +180,23 @@ push_result_message(Base, FromSlot, Key, MsgToPush, Opts) ->
TargetAsProcess = dev_process:ensure_process_key(TargetBase, Opts),
RecvdID = hb_message:id(TargetBase, all),
?event(push, {recvd_id, {id, RecvdID}, {msg, TargetAsProcess}}),
- Resurse = hb_ao:resolve(
- {as, <<"process@1.0">>, TargetAsProcess},
- #{ <<"path">> => <<"push">>, <<"slot">> => NextSlotOnProc },
- Opts#{ cache_control => <<"always">> }
- ),
+ % Push the message downstream. We decrease the result-depth.
+ Resurse =
+ hb_ao:resolve(
+ {as, <<"process@1.0">>, TargetAsProcess},
+ #{
+ <<"path">> => <<"push">>,
+ <<"slot">> => NextSlotOnProc,
+ <<"result-depth">> =>
+ hb_ao:get(
+ <<"result-depth">>,
+ Origin,
+ 1,
+ Opts
+ ) - 1
+ },
+ Opts#{ cache_control => <<"always">> }
+ ),
case Resurse of
{ok, Downstream} ->
#{
@@ -208,22 +248,27 @@ extract(target, Raw) ->
{Target, _} = split_target(Raw),
Target.
+%% @doc Split the target into the process ID and the optional query string.
split_target(RawTarget) ->
case binary:split(RawTarget, [<<"?">>, <<"&">>]) of
[Target, QStr] -> {Target, QStr};
_ -> {RawTarget, <<>>}
end.
-schedule_result(Base, MsgToPush, Opts) ->
- schedule_result(Base, MsgToPush, <<"httpsig@1.0">>, Opts).
-schedule_result(Base, MsgToPush, Codec, Opts) ->
+%% @doc Add the necessary keys to the message to be scheduled, then schedule it.
+%% If the remote scheduler does not support the given codec, it will be
+%% downgraded and re-signed.
+schedule_result(TargetProcess, MsgToPush, Origin, Opts) ->
+ schedule_result(TargetProcess, MsgToPush, <<"httpsig@1.0">>, Origin, Opts).
+schedule_result(TargetProcess, MsgToPush, Codec, Origin, Opts) ->
Target = hb_ao:get(<<"target">>, MsgToPush, Opts),
?event(push,
{push_scheduling_result,
{target, {string, Target}},
- {target_process, Base},
+ {target_process, TargetProcess},
{msg, MsgToPush},
- {codec, Codec}
+ {codec, Codec},
+ {origin, Origin}
},
Opts
),
@@ -233,7 +278,7 @@ schedule_result(Base, MsgToPush, Codec, Opts) ->
<<"path">> => <<"schedule">>,
<<"body">> =>
SignedMsg = hb_message:commit(
- additional_keys(Base, MsgToPush, Opts),
+ additional_keys(Origin, MsgToPush, Opts),
Opts,
Codec
)
@@ -246,11 +291,11 @@ schedule_result(Base, MsgToPush, Codec, Opts) ->
),
{ErlStatus, Res} =
hb_ao:resolve(
- {as, <<"process@1.0">>, Base},
+ {as, <<"process@1.0">>, TargetProcess},
SignedReq,
Opts#{ cache_control => <<"always">> }
),
- ?event(push, {push_scheduling_result, {status, ErlStatus}, {response, Res}}, Opts),
+ ?event(push, {push_sched_result, {status, ErlStatus}, {response, Res}}, Opts),
case {ErlStatus, hb_ao:get(<<"status">>, Res, 200, Opts)} of
{ok, 200} ->
{ok, Res};
@@ -261,13 +306,26 @@ schedule_result(Base, MsgToPush, Codec, Opts) ->
SignedNormMsg = hb_message:commit(NormMsg, Opts),
remote_schedule_result(Location, SignedNormMsg, Opts);
{error, 422} ->
- ?event(push, {received_wrong_format, {422, Res}, {codec, Codec}}, Opts),
+ ?event(push, {wrong_format, {422, Res}, {codec, Codec}}, Opts),
case Codec of
<<"ans104@1.0">> ->
{error, Res};
<<"httpsig@1.0">> ->
- ?event(push, {downgrading_to_ans104, {422, Res}, {codec, Codec}}, Opts),
- schedule_result(Base, MsgToPush, <<"ans104@1.0">>, Opts)
+ ?event(push,
+ {downgrading_to_ans104,
+ {422, Res},
+ {codec, Codec},
+ {origin, Origin}
+ },
+ Opts
+ ),
+ schedule_result(
+ TargetProcess,
+ MsgToPush,
+ <<"ans104@1.0">>,
+ Origin,
+ Opts
+ )
end;
{error, _} ->
{error, Res}
@@ -275,14 +333,15 @@ schedule_result(Base, MsgToPush, Codec, Opts) ->
%% @doc Set the necessary keys in order for the recipient to know where the
%% message came from.
-additional_keys(FromMsg, ToSched, Opts) ->
+additional_keys(Origin, ToSched, Opts) ->
+ ?event(push, {adding_keys, {origin, Origin}, {to, ToSched}}, Opts),
hb_ao:set(
ToSched,
#{
<<"data-protocol">> => <<"ao">>,
<<"variant">> => <<"ao.N.1">>,
<<"type">> => <<"Message">>,
- <<"from-process">> => hb_message:id(FromMsg, all, Opts)
+ <<"from-process">> => maps:get(<<"process">>, Origin)
},
Opts#{ hashpath => ignore }
).
@@ -398,12 +457,18 @@ full_push_test_() ->
)
end}.
-multi_process_push_test_disabled() ->
+multi_process_push_test_() ->
{timeout, 30, fun() ->
dev_process:init(),
Opts = #{
priv_wallet => hb:wallet(),
- cache_control => <<"always">>
+ cache_control => <<"always">>,
+ store => [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST">>
+ }
+ ]
},
Proc1 = dev_process:test_aos_process(Opts),
hb_cache:write(Proc1, Opts),
@@ -426,18 +491,8 @@ multi_process_push_test_disabled() ->
},
Opts
),
- ProcID1 =
- hb_ao:get(
- <<"process/id">>,
- dev_process:ensure_process_key(Proc1, Opts),
- Opts
- ),
- ProcID2 =
- hb_ao:get(
- <<"process/id">>,
- dev_process:ensure_process_key(Proc2, Opts),
- Opts
- ),
+ ProcID1 = hb_message:id(Proc1, all, Opts),
+ ProcID2 = hb_message:id(Proc2, all, Opts),
?event(push, {testing_with, {proc1_id, ProcID1}, {proc2_id, ProcID2}}),
{ok, ToPush} = dev_process:schedule_aos_call(
Proc2,
@@ -448,7 +503,7 @@ multi_process_push_test_disabled() ->
" print(\"GOT PONG\")\n"
" end\n"
")\n"
- "Send({ Target = \"", (ProcID1)/binary, "\", Action = \"Ping\" })\n"
+ "Send({ Target = \"", (ProcID1)/binary, "\", Action = \"Ping\" })"
>>
),
SlotToPush = hb_ao:get(<<"slot">>, ToPush, Opts),
@@ -456,7 +511,8 @@ multi_process_push_test_disabled() ->
Msg3 =
#{
<<"path">> => <<"push">>,
- <<"slot">> => SlotToPush
+ <<"slot">> => SlotToPush,
+ <<"result-depth">> => 1
},
{ok, PushResult} = hb_ao:resolve(Proc2, Msg3, Opts),
?event(push, {push_result_proc2, PushResult}),
@@ -468,7 +524,13 @@ multi_process_push_test_disabled() ->
push_with_redirect_hint_test_disabled() ->
{timeout, 30, fun() ->
dev_process:init(),
- Stores = [#{ <<"store-module">> => hb_store_fs, <<"prefix">> => <<"cache-TEST">> }],
+ Stores =
+ [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST">>
+ }
+ ],
ExtOpts = #{ priv_wallet => ar_wallet:new(), store => Stores },
LocalOpts = #{ priv_wallet => hb:wallet(), store => Stores },
ExtScheduler = hb_http_server:start_node(ExtOpts),
@@ -477,7 +539,13 @@ push_with_redirect_hint_test_disabled() ->
Client = dev_process:test_aos_process(),
PongServer = dev_process:test_aos_process(ExtOpts),
% Push the new process that runs on the external scheduler
- {ok, ServerSchedResp} = hb_http:post(ExtScheduler, <<"/push">>, PongServer, ExtOpts),
+ {ok, ServerSchedResp} =
+ hb_http:post(
+ ExtScheduler,
+ <<"/push">>,
+ PongServer,
+ ExtOpts
+ ),
?event(push, {pong_server_sched_resp, ServerSchedResp}),
% Get the IDs of the server process
PongServerID =
@@ -594,13 +662,15 @@ ping_pong_script(Limit) ->
reply_script() ->
<<
- "Handlers.add(\"Reply\",\n"
- " function (test) return true end,\n"
- " function(m)\n"
- " print(\"Replying to...\")\n"
- " print(m.From)\n"
- " Send({ Target = m.From, Action = \"Reply\", Message = \"Pong!\" })\n"
- " print(\"Done.\")\n"
- " end\n"
- ")\n"
+ """
+ Handlers.add("Reply",
+ { Action = "Ping" },
+ function(m)
+ print("Replying to...")
+ print(m.From)
+ Send({ Target = m.From, Action = "Reply", Message = "Pong!" })
+ print("Done.")
+ end
+ )
+ """
>>.
\ No newline at end of file
diff --git a/src/dev_relay.erl b/src/dev_relay.erl
index d0077cb33..cfe4b322c 100644
--- a/src/dev_relay.erl
+++ b/src/dev_relay.erl
@@ -17,7 +17,7 @@
-export([call/3, cast/3]).
%%% Re-route requests that would be executed locally to other peers, according
%%% to the node's routing table.
--export([preprocess/3]).
+-export([request/3]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
@@ -89,19 +89,23 @@ cast(M1, M2, Opts) ->
{ok, <<"OK">>}.
%% @doc Preprocess a request to check if it should be relayed to a different node.
-preprocess(_M1, M2, Opts) ->
+request(_Msg1, Msg2, Opts) ->
{ok,
- [
- #{ <<"device">> => <<"relay@1.0">> },
- #{
- <<"path">> => <<"call">>,
- <<"target">> => <<"body">>,
- <<"body">> =>
- hb_ao:get(<<"request">>, M2, Opts#{ hashpath => ignore })
- }
- ]
+ #{
+ <<"body">> =>
+ [
+ #{ <<"device">> => <<"relay@1.0">> },
+ #{
+ <<"path">> => <<"call">>,
+ <<"target">> => <<"body">>,
+ <<"body">> =>
+ hb_ao:get(<<"request">>, Msg2, Opts#{ hashpath => ignore })
+ }
+ ]
+ }
}.
+
%%% Tests
call_get_test() ->
@@ -120,7 +124,7 @@ call_get_test() ->
%% @doc Test that the `preprocess/3' function re-routes a request to remote
%% peers, according to the node's routing table.
-preprocessor_reroute_to_nearest_test() ->
+request_hook_reroute_to_nearest_test() ->
Peer1 = <<"https://compute-1.forward.computer">>,
Peer2 = <<"https://compute-2.forward.computer">>,
HTTPSOpts = #{ http_client => httpc },
@@ -147,7 +151,7 @@ preprocessor_reroute_to_nearest_test() ->
]
}
],
- preprocessor => #{ <<"device">> => <<"relay@1.0">> }
+ on => #{ <<"request">> => #{ <<"device">> => <<"relay@1.0">> } }
}),
{ok, Res} =
hb_http:get(
@@ -162,4 +166,4 @@ preprocessor_reroute_to_nearest_test() ->
end,
Peers
),
- ?assert(HasValidSigner).
+ ?assert(HasValidSigner).
\ No newline at end of file
diff --git a/src/dev_router.erl b/src/dev_router.erl
index 72dc38cd8..a7ff1c9b3 100644
--- a/src/dev_router.erl
+++ b/src/dev_router.erl
@@ -24,13 +24,123 @@
%%% map or a path regex.
%%%
-module(dev_router).
-%%% Device API:
--export([routes/3, route/2, route/3]).
-%%% Public utilities:
--export([match_routes/3]).
+-export([info/1, info/3, routes/3, route/2, route/3, preprocess/3]).
+-export([match/3, register/3]).
-include_lib("eunit/include/eunit.hrl").
-include("include/hb.hrl").
+%% @doc Exported function for getting device info, controls which functions are
+%% exposed via the device API.
+info(_) ->
+ #{ exports => [info, routes, route, match, register, preprocess] }.
+
+%% @doc HTTP info response providing information about this device
+info(_Msg1, _Msg2, _Opts) ->
+ InfoBody = #{
+ <<"description">> => <<"Router device for handling outbound message routing">>,
+ <<"version">> => <<"1.0">>,
+ <<"api">> => #{
+ <<"info">> => #{
+ <<"description">> => <<"Get device info">>
+ },
+ <<"routes">> => #{
+ <<"description">> => <<"Get or add routes">>,
+ <<"method">> => <<"GET or POST">>
+ },
+ <<"route">> => #{
+ <<"description">> => <<"Find a route for a message">>,
+ <<"required_params">> => #{
+ <<"route-path">> => <<"Path to route">>
+ }
+ },
+ <<"match">> => #{
+ <<"description">> => <<"Match a message against available routes">>
+ },
+ <<"register">> => #{
+ <<"description">> => <<"Register a route with a remote router node">>,
+ <<"required_node_opts">> => #{
+ <<"router_peer_location">> => <<"Location of the router peer">>,
+ <<"router_prefix">> => <<"Prefix for the route">>,
+ <<"router_price">> => <<"Price for the route">>,
+ <<"router_template">> => <<"Template to match the route">>
+ }
+ },
+ <<"preprocess">> => #{
+ <<"description">> => <<"Preprocess a request to check if it should be relayed">>
+ }
+ }
+ },
+ {ok, #{<<"status">> => 200, <<"body">> => InfoBody}}.
+
+%% A exposed register function that allows telling the current node to register
+%% a new route with a remote router node. This function should also be itempotent
+%% so that it can be called only once.
+register(_M1, _M2, Opts) ->
+ Registered = hb_opts:get(router_registered, false, Opts),
+ % Check if the route is already registered
+ case Registered of
+ true ->
+ {error, <<"Route already registered.">>};
+ false ->
+ % Validate node history
+ case hb_opts:validate_node_history(Opts) of
+ {ok, _} ->
+ RouterNode = hb_opts:get(<<"router_peer_location">>, not_found, Opts),
+ Prefix = hb_opts:get(<<"router_prefix">>, not_found, Opts),
+ Price = hb_opts:get(<<"router_price">>, not_found, Opts),
+ Template = hb_opts:get(<<"router_template">>, not_found, Opts),
+ {ok, Attestion} = dev_snp:generate(
+ #{},
+ #{},
+ #{
+ priv_wallet => hb:wallet(),
+ snp_trusted => hb_opts:get(snp_trusted, [#{}], Opts)
+ }
+ ),
+ ?event(debug_register, {attestion, Attestion}),
+ % Check if any required parameters are missing
+ case hb_opts:check_required_opts([
+ {<<"router_peer_location">>, RouterNode},
+ {<<"router_prefix">>, Prefix},
+ {<<"router_price">>, Price},
+ {<<"router_template">>, Template}
+ ], Opts) of
+ {ok, _} ->
+ case hb_http:post(RouterNode, #{
+ <<"path">> => <<"/router~node-process@1.0/schedule">>,
+ <<"method">> => <<"POST">>,
+ <<"body">> =>
+ hb_message:commit(
+ #{
+ <<"path">> => <<"register">>,
+ <<"route">> =>
+ #{
+ <<"prefix">> => Prefix,
+ <<"template">> => Template,
+ <<"price">> => Price
+ },
+ <<"body">> => Attestion
+ },
+ Opts
+ )
+ }, Opts) of
+ {ok, _} ->
+ hb_http_server:set_opts(
+ Opts#{ router_registered => true }
+ ),
+ {ok, <<"Route registered.">>};
+ {error, _} ->
+ {error, <<"Failed to register route.">>}
+ end;
+ {error, ErrorMsg} ->
+ {error, ErrorMsg}
+ end;
+ {error, Reason} ->
+ % Node history validation failed
+ {error, Reason}
+ end
+ end.
+
%% @doc Device function that returns all known routes.
routes(M1, M2, Opts) ->
?event({routes_msg, M1, M2}),
@@ -121,7 +231,7 @@ route(_, Msg, Opts) ->
[Node] when is_map(Node) ->
apply_route(Msg, Node);
[NodeURI] -> {ok, NodeURI};
- ChosenNodes ->
+ _ChosenNodes ->
{ok,
hb_ao:set(
<<"nodes">>,
@@ -143,7 +253,7 @@ find_target_path(Msg, Opts) ->
case hb_ao:get(<<"route-path">>, Msg, not_found, Opts) of
not_found ->
?event({find_target_path, {msg, Msg}, {opts, Opts}, not_found}),
- hb_path:from_message(request, Msg);
+ hb_ao:get(<<"path">>, Msg, no_path, Opts);
RoutePath -> RoutePath
end.
@@ -171,7 +281,7 @@ extract_base(RawPath, Opts) when is_binary(RawPath) ->
case ?IS_ID(BasePath) of
true -> BasePath;
false ->
- case binary:split(BasePath, [<<"~">>, <<"?">>, <<"&">>], [global]) of
+ case binary:split(BasePath, [<<"\~">>, <<"?">>, <<"&">>], [global]) of
[BaseMsgID|_] when ?IS_ID(BaseMsgID) -> BaseMsgID;
_ -> hb_crypto:sha256(BasePath)
end
@@ -183,15 +293,16 @@ apply_routes(Msg, R, Opts) ->
NodesWithRouteApplied =
lists:map(
fun(N) ->
- ?event({apply_route, {msg, Msg}, {node, N}}),
+ ?event(debug, {apply_route, {msg, Msg}, {node, N}}),
case apply_route(Msg, N) of
{ok, URI} when is_binary(URI) -> N#{ <<"uri">> => URI };
- {ok, Map} -> Map;
+ {ok, RMsg} -> maps:merge(N, RMsg);
{error, _} -> N
end
end,
hb_util:message_to_ordered_list(Nodes)
),
+ ?event(debug, {nodes_after_apply, NodesWithRouteApplied}),
R#{ <<"nodes">> => NodesWithRouteApplied }.
%% @doc Apply a node map's rules for transforming the path of the message.
@@ -219,7 +330,26 @@ apply_route(#{ <<"path">> := Path }, #{ <<"match">> := Match, <<"with">> := With
_ -> {error, invalid_replace_args}
end.
-%% @doc Find the first matching template in a list of known routes.
+%% @doc Find the first matching template in a list of known routes. Allows the
+%% path to be specified by either the explicit `path' (for internal use by this
+%% module), or `route-path' for use by external devices and users.
+match(Base, Req, Opts) ->
+ ?event(debug_preprocess, {routeReq, Req}),
+ ?event(debug_preprocess,
+ {routes,
+ hb_ao:get(<<"routes">>, {as, <<"message@1.0">>, Base}, [], Opts)}
+ ),
+ Match =
+ match_routes(
+ Req#{ <<"path">> => find_target_path(Req, Opts) },
+ hb_ao:get(<<"routes">>, {as, <<"message@1.0">>, Base}, [], Opts),
+ Opts
+ ),
+ case Match of
+ no_matches -> {error, no_matching_route};
+ _ -> {ok, Match}
+ end.
+
match_routes(ToMatch, Routes, Opts) ->
match_routes(
ToMatch,
@@ -229,9 +359,9 @@ match_routes(ToMatch, Routes, Opts) ->
).
match_routes(#{ <<"path">> := Explicit = <<"http://", _/binary>> }, _, _, _) ->
% If the route is an explicit HTTP URL, we can match it directly.
- #{ <<"node">> => Explicit };
+ #{ <<"node">> => Explicit, <<"reference">> => <<"explicit">> };
match_routes(#{ <<"path">> := Explicit = <<"https://", _/binary>> }, _, _, _) ->
- #{ <<"node">> => Explicit };
+ #{ <<"node">> => Explicit, <<"reference">> => <<"explicit">> };
match_routes(_, _, [], _) -> no_matches;
match_routes(ToMatch, Routes, [XKey|Keys], Opts) ->
XM = hb_ao:get(XKey, Routes, Opts),
@@ -243,7 +373,7 @@ match_routes(ToMatch, Routes, [XKey|Keys], Opts) ->
Opts#{ hashpath => ignore }
),
case template_matches(ToMatch, Template, Opts) of
- true -> XM;
+ true -> XM#{ <<"reference">> => hb_path:to_binary([<<"routes">>, XKey]) };
false -> match_routes(ToMatch, Routes, Keys, Opts)
end.
@@ -252,7 +382,9 @@ template_matches(ToMatch, Template, _Opts) when is_map(Template) ->
hb_message:match(Template, ToMatch, primary);
template_matches(ToMatch, Regex, Opts) when is_binary(Regex) ->
MsgPath = find_target_path(ToMatch, Opts),
- hb_path:regex_matches(MsgPath, Regex).
+ Matches = hb_path:regex_matches(MsgPath, Regex),
+ ?event(debug_template_matches, {matches, Matches, msg_path, MsgPath, regex, Regex}),
+ Matches.
%% @doc Implements the load distribution strategies if given a cluster.
choose(0, _, _, _, _) -> [];
@@ -260,9 +392,10 @@ choose(N, <<"Random">>, _, Nodes, _Opts) ->
Node = lists:nth(rand:uniform(length(Nodes)), Nodes),
[Node | choose(N - 1, <<"Random">>, nop, lists:delete(Node, Nodes), _Opts)];
choose(N, <<"By-Weight">>, _, Nodes, Opts) ->
+ ?event(debug, {nodes, Nodes}),
NodesWithWeight =
[
- { Node, hb_util:int(hb_ao:get(<<"weight">>, Node, Opts)) }
+ { Node, hb_util:float(hb_ao:get(<<"weight">>, Node, Opts)) }
||
Node <- Nodes
],
@@ -342,6 +475,57 @@ binary_to_bignum(Bin) when ?IS_ID(Bin) ->
<< Num:256/unsigned-integer >> = hb_util:native_id(Bin),
Num.
+%% @doc Preprocess a request to check if it should be relayed to a different node.
+preprocess(_Msg1, Msg2, Opts) ->
+ Req = hb_ao:get(<<"request">>, Msg2, Opts),
+ ?event(debug_preprocess, {called_preprocess,Req}),
+ TemplateRoutes = load_routes(Opts),
+ ?event(debug_preprocess, {template_routes, TemplateRoutes}),
+ {_, Match} = match(#{ <<"routes">> => TemplateRoutes }, Req, Opts),
+ ?event(debug_preprocess, {match, Match}),
+ case Match of
+ no_matching_route ->
+ ?event(debug_preprocess, preprocessor_did_not_match),
+ case hb_opts:get(router_preprocess_default, <<"local">>, Opts) of
+ <<"local">> ->
+ ?event(debug_preprocess, executing_locally),
+ {ok, #{
+ <<"body">> =>
+ hb_ao:get(<<"body">>, Msg2, Opts#{ hashpath => ignore })
+ }};
+ <<"error">> ->
+ ?event(debug_preprocess, preprocessor_returning_error),
+ {ok, #{
+ <<"body">> =>
+ [#{
+ <<"status">> => 404,
+ <<"message">> =>
+ <<"No matching template found in the given routes.">>
+ }]
+ }}
+ end;
+ _ ->
+ ?event(debug_preprocess, {matched_route, Match}),
+ {ok,
+ #{
+ <<"body">> =>
+ [
+ #{ <<"device">> => <<"relay@1.0">> },
+ #{
+ <<"path">> => <<"call">>,
+ <<"target">> => <<"body">>,
+ <<"body">> =>
+ hb_ao:get(
+ <<"request">>,
+ Msg2,
+ Opts#{ hashpath => ignore }
+ )
+ }
+ ]
+ }
+ }
+ end.
+
%%% Tests
route_provider_test() ->
@@ -369,7 +553,10 @@ dynamic_route_provider_test() ->
route_provider => #{
<<"device">> => <<"lua@5.3a">>,
<<"path">> => <<"route_provider">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Script
+ },
<<"node">> => <<"test-dynamic-node">>
},
priv_wallet => ar_wallet:new()
@@ -391,7 +578,10 @@ local_process_route_provider_test() ->
<<"device">> => <<"process@1.0">>,
<<"execution-device">> => <<"lua@5.3a">>,
<<"scheduler-device">> => <<"scheduler@1.0">>,
- <<"script">> => Script,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"body">> => Script
+ },
<<"node">> => <<"router-node">>,
<<"function">> => <<"compute_routes">>
}
@@ -421,6 +611,405 @@ local_process_route_provider_test() ->
?event({responses, Responses}),
?assertEqual(2, sets:size(sets:from_list(Responses))).
+%% @doc Example of a Lua module being used as the `route_provider' for a
+%% HyperBEAM node. The module utilized in this example dynamically adjusts the
+%% likelihood of routing to a given node, depending upon price and performance.
+local_dynamic_router_test() ->
+ BenchRoutes = 50,
+ {ok, Module} = file:read_file(<<"scripts/dynamic-router.lua">>),
+ Run = hb_util:bin(rand:uniform(1337)),
+ Node = hb_http_server:start_node(Opts = #{
+ store => [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST/dynrouter-", Run/binary>>
+ }
+ ],
+ priv_wallet => ar_wallet:new(),
+ route_provider => #{
+ <<"path">> =>
+ RouteProvider =
+ <<"/router~node-process@1.0/compute/routes~message@1.0">>
+ },
+ node_processes => #{
+ <<"router">> => #{
+ <<"device">> => <<"process@1.0">>,
+ <<"execution-device">> => <<"lua@5.3a">>,
+ <<"scheduler-device">> => <<"scheduler@1.0">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"module">> => <<"dynamic-router">>,
+ <<"body">> => Module
+ },
+ % Set module-specific factors for the test
+ <<"pricing-weight">> => 9,
+ <<"performance-weight">> => 1,
+ <<"score-preference">> => 4
+ }
+ }
+ }),
+ Store = hb_opts:get(store, no_store, Opts),
+ ?event(debug_dynrouter, {store, Store}),
+ % Register workers with the dynamic router with varied prices.
+ lists:foreach(fun(X) ->
+ hb_http:post(
+ Node,
+ #{
+ <<"path">> => <<"/router~node-process@1.0/schedule">>,
+ <<"method">> => <<"POST">>,
+ <<"body">> =>
+ hb_message:commit(
+ #{
+ <<"path">> => <<"register">>,
+ <<"route">> =>
+ #{
+ <<"prefix">> =>
+ <<
+ "https://test-node-",
+ (hb_util:bin(X))/binary,
+ ".com"
+ >>,
+ <<"template">> => <<"/.*~process@1.0/.*">>,
+ <<"price">> => X * 250
+ }
+ },
+ Opts
+ )
+ },
+ Opts
+ )
+ end, lists:seq(1, 5)),
+ % Force computation of the current state. This should be done with a
+ % background worker (ex: a `~cron@1.0/every' task).
+ hb_http:get(Node, <<"/router~node-process@1.0/now">>, #{}),
+ {ok, Routes} = hb_http:get(Node, RouteProvider, Opts),
+ ?event(debug_dynrouter, {got_routes, Routes}),
+ % Query the route 10 times with the same path. This should yield 2 different
+ % results, as the route provider should choose 1 node of a set of 2 at random.
+ BeforeExec = os:system_time(millisecond),
+ Responses =
+ lists:map(
+ fun(_) ->
+ hb_util:ok(
+ hb_http:get(
+ Node,
+ <<"/~router@1.0/route/uri?route-path=/procID~process@1.0/now">>,
+ #{}
+ )
+ )
+ end,
+ lists:seq(1, BenchRoutes)
+ ),
+ AfterExec = os:system_time(millisecond),
+ hb_util:eunit_print(
+ "Calculated ~p routes in ~ps (~.2f routes/s)",
+ [
+ BenchRoutes,
+ (AfterExec - BeforeExec) / 1000,
+ BenchRoutes / ((AfterExec - BeforeExec) / 1000)
+ ]
+ ),
+ % Calculate the distribution of the responses.
+ UniqueResponses = sets:to_list(sets:from_list(Responses)),
+ Dist =
+ [
+ {
+ Resp,
+ hb_util:count(Resp, Responses) / length(Responses)
+ }
+ ||
+ Resp <- UniqueResponses
+ ],
+ ?event(debug_distribution, {distribution_of_responses, Dist}),
+ ?assert(length(UniqueResponses) > 1).
+
+%% @doc Example of a Lua module being used as the `route_provider' for a
+%% HyperBEAM node. The module utilized in this example dynamically adjusts the
+%% likelihood of routing to a given node, depending upon price and performance.
+%% also include preprocessing support for routing
+dynamic_router_test() ->
+ {ok, Module} = file:read_file(<<"scripts/dynamic-router.lua">>),
+ Run = hb_util:bin(rand:uniform(1337)),
+ ExecWallet = hb:wallet(<<"test/admissible-report-wallet.json">>),
+ ProxyWallet = ar_wallet:new(),
+ ExecNode =
+ hb_http_server:start_node(
+ ExecOpts = #{ priv_wallet => ExecWallet }
+ ),
+ Node = hb_http_server:start_node(ProxyOpts = #{
+ snp_trusted => [
+ #{
+ <<"vcpus">> => 32,
+ <<"vcpu_type">> => 5,
+ <<"vmm_type">> => 1,
+ <<"guest_features">> => 1,
+ <<"firmware">> =>
+ <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
+ <<"kernel">> =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ <<"initrd">> =>
+ <<"544045560322dbcd2c454bdc50f35edf0147829ec440e6cb487b4a1503f923c1">>,
+ <<"append">> =>
+ <<"95a34faced5e487991f9cc2253a41cbd26b708bf00328f98dddbbf6b3ea2892e">>
+ }
+ ],
+ store => [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST/dynrouter-", Run/binary>>
+ }
+ ],
+ priv_wallet => ProxyWallet,
+ on =>
+ #{
+ <<"request">> => #{
+ <<"device">> => <<"router@1.0">>,
+ <<"path">> => <<"preprocess">>
+ }
+ },
+ route_provider => #{
+ <<"path">> => <<"/router~node-process@1.0/compute/routes~message@1.0">>
+ },
+ node_processes => #{
+ <<"router">> => #{
+ <<"type">> => <<"Process">>,
+ <<"device">> => <<"process@1.0">>,
+ <<"execution-device">> => <<"lua@5.3a">>,
+ <<"scheduler-device">> => <<"scheduler@1.0">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"module">> => <<"dynamic-router">>,
+ <<"body">> => Module
+ },
+ % Set module-specific factors for the test
+ <<"pricing-weight">> => 9,
+ <<"performance-weight">> => 1,
+ <<"score-preference">> => 4,
+ <<"is-admissible">> => #{
+ <<"device">> => <<"snp@1.0">>,
+ <<"path">> => <<"verify">>
+ }
+ }
+ }
+ }), % mergeRight this takes our defined Opts and merges them into the
+ % node opts configs.
+ Store = hb_opts:get(store, no_store, ProxyOpts),
+ ?event(debug_dynrouter, {store, Store}),
+ % Register workers with the dynamic router with varied prices.
+ {ok, [Req]} = file:consult(<<"test/admissible-report.eterm">>),
+ lists:foreach(fun(X) ->
+ {ok, Res} =
+ hb_http:post(
+ Node,
+ #{
+ <<"path">> => <<"/router~node-process@1.0/schedule">>,
+ <<"method">> => <<"POST">>,
+ <<"body">> =>
+ hb_message:commit(
+ #{
+ <<"path">> => <<"register">>,
+ <<"route">> =>
+ #{
+ <<"prefix">> => ExecNode,
+ <<"template">> => <<"/c">>,
+ <<"price">> => X * 250
+ },
+ <<"body">> => hb_message:commit(Req, ExecOpts)
+ },
+ ExecOpts
+ )
+ },
+ ExecOpts
+ ),
+ Res
+ end, lists:seq(1, 1)),
+ % Force computation of the current state. This should be done with a
+ % background worker (ex: a `~cron@1.0/every' task).
+ {Status, NodeRoutes} = hb_http:get(Node, <<"/router~node-process@1.0/now">>, #{}),
+ ?event(debug_dynrouter, {got_node_routes, NodeRoutes}),
+ ?assertEqual(ok, Status),
+ ProxyWalletAddr = hb_util:human_id(ar_wallet:to_address(ProxyWallet)),
+ ExecNodeAddr = hb_util:human_id(ar_wallet:to_address(ExecWallet)),
+ % Ensure that the `~meta@1.0/info/address' response is produced by the
+ % proxy wallet.
+ ?event(debug_dynrouter,
+ {addresses,
+ {proxy_wallet_addr, ProxyWalletAddr},
+ {exec_node_addr, ExecNodeAddr}
+ }
+ ),
+ ?assertEqual(
+ {ok, ProxyWalletAddr},
+ hb_http:get(Node, <<"/~meta@1.0/info/address">>, ProxyOpts)
+ ),
+ % Ensure that computation is done by the exec node.
+ {ok, ResMsg} = hb_http:get(Node, <<"/c?c+list=1">>, ExecOpts),
+ ?assertEqual([ExecNodeAddr], hb_message:signers(ResMsg)).
+
+%% @doc Demonstrates routing tables being dynamically created and adjusted
+%% according to the real-time performance of nodes. This test utilizes the
+%% `dynamic-router' script to manage routes and recalculate weights based on the
+%% reported performance.
+dynamic_routing_by_performance_test_() ->
+ {timeout, 30, fun dynamic_routing_by_performance/0}.
+dynamic_routing_by_performance() ->
+ % Setup test parameters
+ TestNodes = 4,
+ BenchRoutes = 16,
+ TestPath = <<"/worker">>,
+ % Start the main node for the test, loading the `dynamic-router' script and
+ % the http_monitor to generate performance messages.
+ {ok, Script} = file:read_file(<<"scripts/dynamic-router.lua">>),
+ Run = hb_util:bin(rand:uniform(1337_000)),
+ Node = hb_http_server:start_node(Opts = #{
+ relay_http_client => gun,
+ store => [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST/dynrouter-", Run/binary>>
+ }
+ ],
+ priv_wallet => ar_wallet:new(),
+ route_provider => #{
+ <<"path">> =>
+ <<"/perf-router~node-process@1.0/compute/routes~message@1.0">>
+ },
+ node_processes => #{
+ <<"perf-router">> => #{
+ <<"device">> => <<"process@1.0">>,
+ <<"execution-device">> => <<"lua@5.3a">>,
+ <<"scheduler-device">> => <<"scheduler@1.0">>,
+ <<"module">> => #{
+ <<"content-type">> => <<"application/lua">>,
+ <<"name">> => <<"dynamic-router">>,
+ <<"body">> => Script
+ },
+ % Set module-specific factors for the test
+ <<"pricing-weight">> => 1,
+ <<"performance-weight">> => 99,
+ <<"score-preference">> => 4,
+ <<"performance-period">> => 2, % Adjust quickly
+ <<"initial-performance">> => 1000
+ }
+ },
+ % Define the request that should be called in order to record performance
+ % information into the process. The `body' of the `http_monitor' message
+ % is filled with the signed performance report.
+ http_monitor => #{
+ <<"method">> => <<"POST">>,
+ <<"path">> => <<"/perf-router~node-process@1.0/schedule">>
+ }
+ }),
+ % Start and add a series of nodes with decreasing performance, via lag
+ % introduced with a hook set to `~test@1.0/delay'.
+ _XNodes =
+ lists:map(
+ fun(X) ->
+ % Start the node, applying a delay that increases for each additional
+ % node.
+ XNode =
+ hb_http_server:start_node(
+ #{
+ on =>
+ #{
+ <<"request">> => #{
+ <<"device">> => <<"test-device@1.0">>,
+ <<"path">> => <<"delay">>,
+ <<"duration">> => (X - 1) * 100,
+ <<"return">> => #{
+ <<"body">> => [
+ #{ <<"worker">> => X },
+ <<"worker">>
+ ]
+ }
+ }
+ }
+ }
+ ),
+ % Register the node with the router.
+ hb_http:post(
+ Node,
+ #{
+ <<"path">> => <<"/perf-router~node-process@1.0/schedule">>,
+ <<"method">> => <<"POST">>,
+ <<"body">> =>
+ hb_message:commit(
+ #{
+ <<"path">> => <<"register">>,
+ <<"route">> =>
+ #{
+ <<"prefix">> => XNode,
+ <<"template">> => TestPath,
+ <<"price">> => 1000 + X
+ }
+ },
+ Opts
+ )
+ },
+ Opts
+ ),
+ XNode
+ end,
+ lists:seq(1, TestNodes)
+ ),
+ % Force calculation of the process state.
+ {ok, ResBefore} =
+ hb_http:get(
+ Node,
+ PerfPath =
+ <<"/perf-router~node-process@1.0/now/routes~message@1.0/1/nodes">>,
+ Opts
+ ),
+ ?event(debug_dynrouter, {nodes_before, ResBefore}),
+ % Send `BenchRoutes' request messages to the nodes.
+ lists:foreach(
+ fun(_XNode) ->
+ % We send the requests to the main node's `relay@1.0' device, which
+ % will then apply the routes and the request to the test node set.
+ Res = hb_http:get(
+ Node,
+ << "/~relay@1.0/call?relay-path=/worker" >>,
+ Opts
+ ),
+ ?event(debug_dynrouter, {recvd, Res})
+ end,
+ lists:seq(1, BenchRoutes)
+ ),
+ % Call `recalculate' on the router process and get the resulting weight
+ % table.
+ hb_http:post(
+ Node,
+ #{
+ <<"path">> => <<"/perf-router~node-process@1.0/schedule">>,
+ <<"method">> => <<"POST">>,
+ <<"body">> =>
+ hb_message:commit(#{ <<"path">> => <<"recalculate">> }, Opts)
+ },
+ Opts
+ ),
+ % Get the new weights
+ {ok, After} = hb_http:get(Node, PerfPath, Opts),
+ WeightsByWorker =
+ maps:from_list(
+ lists:map(
+ fun(N) ->
+ {
+ N,
+ hb_ao:get(
+ <<(integer_to_binary(N))/binary, "/weight">>,
+ After,
+ Opts
+ )
+ }
+ end,
+ lists:seq(1, TestNodes)
+ )
+ ),
+ ?event(debug_dynrouter, {worker_weights, {explicit, WeightsByWorker}}),
+ ?assert(maps:get(1, WeightsByWorker) > 0.4),
+ ?assert(maps:get(TestNodes, WeightsByWorker) < 0.3),
+ ok.
+
weighted_random_strategy_test() ->
Nodes =
[
@@ -576,6 +1165,19 @@ explicit_route_test() ->
#{ <<"path">> => <<"http://google.com">> },
#{ routes => Routes }
)
+ ),
+ % Test that `route-path' can also be used to specify the path, via an AO
+ % call.
+ ?assertMatch(
+ {ok, #{ <<"node">> := <<"http://google.com">> }},
+ hb_ao:resolve(
+ #{ <<"device">> => <<"router@1.0">>, routes => Routes },
+ #{
+ <<"path">> => <<"match">>,
+ <<"route-path">> => <<"http://google.com">>
+ },
+ #{}
+ )
).
device_call_from_singleton_test() ->
diff --git a/src/dev_scheduler.erl b/src/dev_scheduler.erl
index cb5d4e1a0..51f55f7c2 100644
--- a/src/dev_scheduler.erl
+++ b/src/dev_scheduler.erl
@@ -18,7 +18,7 @@
%%% AO-Core API functions:
-export([info/0]).
%%% Local scheduling functions:
--export([schedule/3, router/4, register/3]).
+-export([schedule/3, router/4, location/3]).
%%% CU-flow functions:
-export([slot/3, status/3, next/3]).
-export([start/0, checkpoint/1]).
@@ -46,7 +46,7 @@ info() ->
#{
exports =>
[
- register,
+ location,
status,
next,
schedule,
@@ -287,52 +287,123 @@ status(_M1, _M2, _Opts) ->
}
}.
+%% @doc Router for `record' requests. Expects either a `POST' or `GET' request.
+location(Msg1, Msg2, Opts) ->
+ case hb_ao:get(<<"method">>, Msg2, <<"GET">>, Opts) of
+ <<"POST">> -> post_location(Msg1, Msg2, Opts);
+ <<"GET">> -> get_location(Msg1, Msg2, Opts)
+ end.
+
+%% @doc Search for the location of the scheduler in the scheduler-location
+%% cache. If an address is provided, we search for the location of that
+%% specific scheduler. Otherwise, we return the location record for the current
+%% node's scheduler, if it has been established.
+get_location(_Msg1, Req, Opts) ->
+ % Get the address of the scheduler from the request.
+ Address =
+ hb_ao:get(
+ <<"address">>,
+ Req,
+ hb_util:human_id(ar_wallet:to_address(
+ hb_opts:get(priv_wallet, hb:wallet(), Opts)
+ )),
+ Opts
+ ),
+ % Search for the location of the scheduler in the scheduler-location cache.
+ case dev_scheduler_cache:read_location(Address, Opts) of
+ not_found ->
+ {ok,
+ #{
+ <<"status">> => 404,
+ <<"body">> =>
+ <<"No location found for address: ", Address/binary>>
+ }
+ };
+ {ok, Location} -> {ok, #{ <<"body">> => Location }}
+ end.
+
%% @doc Generate a new scheduler location record and register it. We both send
%% the new scheduler-location to the given registry, and return it to the caller.
-register(_Msg1, Req, Opts) ->
+post_location(Msg1, RawReq, Opts) ->
% Ensure that the request is signed by the operator.
- ?event({registering_scheduler, {msg1, _Msg1}, {req, Req}, {opts, Opts}}),
+ Req =
+ case hb_ao:get(<<"target">>, RawReq, not_found, Opts) of
+ not_found -> RawReq;
+ Target -> hb_ao:get(Target, RawReq, not_found, Opts)
+ end,
{ok, OnlyCommitted} = hb_message:with_only_committed(Req),
- ?event({only_committed, OnlyCommitted}),
+ ?event(scheduler_location,
+ {scheduler_location_registration_request, OnlyCommitted}
+ ),
+ % Gather metadata for request validation.
Signers = hb_message:signers(OnlyCommitted),
- Operator =
+ Self =
hb_util:human_id(
ar_wallet:to_address(
hb_opts:get(priv_wallet, hb:wallet(), Opts)
)
),
ExistingNonce =
- case hb_gateway_client:scheduler_location(Operator, Opts) of
+ case hb_gateway_client:scheduler_location(Self, Opts) of
{ok, SchedulerLocation} ->
hb_ao:get(<<"nonce">>, SchedulerLocation, 0, Opts);
{error, _} -> -1
end,
- NewNonce = hb_ao:get(<<"nonce">>, OnlyCommitted, 0, Opts),
- case lists:member(Operator, Signers) andalso NewNonce > ExistingNonce of
- false ->
+ NewNonce = hb_ao:get(<<"nonce">>, OnlyCommitted, ExistingNonce + 1, Opts),
+ case {NewNonce > ExistingNonce, lists:member(Self, Signers)} of
+ {false, _} ->
+ % Invalid request: Known nonce is already higher than requested nonce
+ % for the given operator.
{ok,
#{
<<"status">> => 400,
- <<"body">> => <<"Invalid request.">>,
+ <<"body">> => <<"Known nonce higher than requested nonce.">>,
<<"requested-nonce">> => NewNonce,
<<"existing-nonce">> => ExistingNonce,
<<"signers">> => Signers
}
};
- true ->
+ {true, false} ->
+ % Received request to store a new scheduler location from a peer
+ % that is not the operator.
+ case dev_scheduler_cache:write_location(OnlyCommitted, Opts) of
+ ok ->
+ ?event(scheduler_location,
+ {cached_foreign_peer_location, OnlyCommitted}
+ ),
+ {ok, OnlyCommitted};
+ {error, Reason} ->
+ {error,
+ #{
+ <<"status">> => 400,
+ <<"body">> =>
+ <<"Failed to store new scheduler location.">>,
+ <<"reason">> => Reason
+ }
+ }
+ end;
+ {true, true} ->
% The operator has asked to replace the scheduler location. Get the
- % details and register the new location.
- DefaultTTL = hb_opts:get(scheduler_location_ttl, 1000 * 60 * 60, Opts),
- TimeToLive = hb_ao:get(
- <<"time-to-live">>,
- OnlyCommitted,
- DefaultTTL,
+ % details and register the new location. Registration occurs in the
+ % following steps:
+ % 1. Generate a new scheduler location message.
+ % 2. Sign the message.
+ % 3. Upload the message to Arweave.
+ % 4. Post the message to the peers specified in the
+ % `scheduler_location_notify_peers' option.
+ TimeToLive =
+ hb_ao:get_first(
+ [
+ {Msg1, <<"time-to-live">>},
+ {OnlyCommitted, <<"time-to-live">>}
+ ],
+ hb_opts:get(scheduler_location_ttl, 1000 * 60 * 60, Opts),
Opts
),
URL =
case hb_ao:get(<<"url">>, OnlyCommitted, Opts) of
not_found ->
- Port = hb_opts:get(port, 8734, Opts),
+ Port = hb_util:bin(hb_opts:get(port, 8734, Opts)),
Host = hb_opts:get(host, <<"localhost">>, Opts),
Protocol = hb_opts:get(protocol, http1, Opts),
ProtoStr =
@@ -344,7 +415,15 @@ register(_Msg1, Req, Opts) ->
GivenURL -> GivenURL
end,
% Construct the new scheduler location message.
- Codec = hb_ao:get(<<"accept-codec">>, OnlyCommitted, <<"httpsig@1.0">>, Opts),
+ Codec =
+ hb_ao:get_first(
+ [
+ {Msg1, <<"accept-codec">>},
+ {OnlyCommitted, <<"accept-codec">>}
+ ],
+ <<"httpsig@1.0">>,
+ Opts
+ ),
NewSchedulerLocation =
#{
<<"data-protocol">> => <<"ao">>,
@@ -356,9 +435,34 @@ register(_Msg1, Req, Opts) ->
<<"codec-device">> => Codec
},
Signed = hb_message:commit(NewSchedulerLocation, Opts, Codec),
- ?event({uploading_signed_scheduler_location, Signed}),
- Res = hb_client:upload(Signed, Opts),
- ?event({upload_response, Res}),
+ dev_scheduler_cache:write_location(Signed, Opts),
+ ?event(scheduler_location,
+ {uploading_signed_scheduler_location, Signed}
+ ),
+ {UploadStatus, _} = hb_client:upload(Signed, Opts),
+ % Post the new scheduler location to the peers specified in the
+ % `scheduler_location_notify_peers' option.
+ Results =
+ lists:map(
+ fun(Node) ->
+ PostRes = hb_http:post(
+ Node,
+ <<"/~scheduler@1.0/record">>,
+ Signed,
+ Opts
+ ),
+ ?event(scheduler_location,
+ {outbound_request, {res, PostRes}}
+ )
+ end,
+ hb_opts:get(scheduler_location_notify_peers, [], Opts)
+ ),
+ ?event(scheduler_location,
+ {scheduler_location_registration_success,
+ {arweave_publication_status, UploadStatus},
+ {foreign_peers_notified, length(Results)}
+ }
+ ),
{ok, Signed}
end.
@@ -755,7 +859,7 @@ get_schedule(Msg1, Msg2, Opts) ->
true ->
case get_remote_schedule(ProcID, From, To, Redirect, Opts) of
{ok, Res} ->
- case Format of
+ case uri_string:percent_decode(Format) of
<<"application/aos-2">> ->
{ok, Formatted} = dev_scheduler_formats:assignments_to_aos2(
ProcID,
@@ -888,7 +992,6 @@ do_get_remote_schedule(ProcID, LocalAssignments, From, To, Redirect, Opts) ->
?event({getting_remote_schedule, {node, {string, Node}}, {path, {string, Path}}}),
case hb_http:get(Node, Path, Opts#{ http_client => httpc, protocol => http2 }) of
{ok, Res} ->
- ?event(push, {remote_schedule_result, {res, Res}}, Opts),
case hb_util:int(hb_ao:get(<<"status">>, Res, 200, Opts)) of
200 ->
{ok, NormSched} =
@@ -1066,7 +1169,12 @@ post_legacy_schedule(ProcID, OnlyCommitted, Node, Opts) ->
<<"ans104@1.0">>,
Opts
),
- ?event({encoded_for_legacy_scheduler, {item, Item}, {exact, {explicit, Item}}}),
+ ?event(
+ {encoded_for_legacy_scheduler,
+ {item, Item},
+ {exact, {explicit, Item}}
+ }
+ ),
{ok, ar_bundles:serialize(Item)}
catch
_:_ ->
@@ -1206,7 +1314,7 @@ generate_local_schedule(Format, ProcID, From, To, Opts) ->
% Determine and apply the formatting function to use for generation
% of the response, based on the `Accept' header.
FormatterFun =
- case Format of
+ case uri_string:percent_decode(Format) of
<<"application/aos-2">> ->
fun dev_scheduler_formats:assignments_to_aos2/4;
_ ->
@@ -1311,6 +1419,57 @@ register_new_process_test() ->
)
).
+%% @doc Test that a scheduler location is registered on boot.
+register_location_on_boot_test() ->
+ NotifiedPeerWallet = ar_wallet:new(),
+ RegisteringNodeWallet = ar_wallet:new(),
+ start(),
+ NotifiedPeer =
+ hb_http_server:start_node(#{
+ priv_wallet => NotifiedPeerWallet,
+ store => [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST/scheduler-location-notified">>
+ }
+ ]
+ }),
+ RegisteringNode = hb_http_server:start_node(
+ #{
+ priv_wallet => RegisteringNodeWallet,
+ on =>
+ #{
+ <<"start">> => #{
+ <<"device">> => <<"scheduler@1.0">>,
+ <<"path">> => <<"location">>,
+ <<"method">> => <<"POST">>,
+ <<"accept-codec">> => <<"ans104@1.0">>,
+ <<"hook">> =>#{
+ <<"result">> => <<"ignore">>,
+ <<"commit-request">> => true
+ }
+ }
+ },
+ scheduler_location_notify_peers => [NotifiedPeer]
+ }
+ ),
+ {ok, CurrentLocation} =
+ hb_http:get(
+ RegisteringNode,
+ <<"/~scheduler@1.0/location">>,
+ #{
+ <<"method">> => <<"GET">>,
+ <<"address">> =>
+ hb_util:human_id(ar_wallet:to_address(RegisteringNodeWallet))
+ }
+ ),
+ ?event({current_location, CurrentLocation}),
+ ?assertMatch(
+ #{ <<"url">> := Location, <<"nonce">> := 0 }
+ when is_binary(Location),
+ hb_ao:get(<<"body">>, CurrentLocation, #{})
+ ).
+
schedule_message_and_get_slot_test() ->
start(),
Msg1 = test_process(),
@@ -1444,13 +1603,13 @@ register_scheduler_test() ->
start(),
{Node, Wallet} = http_init(),
Msg1 = hb_message:commit(#{
- <<"path">> => <<"/~scheduler@1.0/register">>,
+ <<"path">> => <<"/~scheduler@1.0/location">>,
<<"url">> => <<"https://hyperbeam-test-ignore.com">>,
<<"method">> => <<"POST">>,
<<"nonce">> => 1,
<<"accept-codec">> => <<"ans104@1.0">>
}, Wallet),
- {ok, Res} = hb_http:get(Node, Msg1, #{}),
+ {ok, Res} = hb_http:post(Node, Msg1, #{}),
?assertMatch(#{ <<"url">> := Location } when is_binary(Location), Res).
http_post_schedule_sign(Node, Msg, ProcessMsg, Wallet) ->
diff --git a/src/dev_scheduler_cache.erl b/src/dev_scheduler_cache.erl
index 030773696..488aea515 100644
--- a/src/dev_scheduler_cache.erl
+++ b/src/dev_scheduler_cache.erl
@@ -120,13 +120,13 @@ read_location(Address, Opts) ->
Res.
%% @doc Write the latest known scheduler location for an address.
-write_location(LocationMsg, Opts) ->
- Signers = hb_message:signers(LocationMsg),
+write_location(LocMsg, Opts) ->
+ Signers = hb_message:signers(LocMsg),
?event({writing_location_msg,
{signers, Signers},
- {location_msg, LocationMsg}
+ {location_msg, LocMsg}
}),
- case hb_cache:write(LocationMsg, Opts) of
+ case hb_message:verify(LocMsg, all) andalso hb_cache:write(LocMsg, Opts) of
{ok, RootPath} ->
lists:foreach(
fun(Signer) ->
@@ -145,6 +145,9 @@ write_location(LocationMsg, Opts) ->
Signers
),
ok;
+ false ->
+ % The message is not valid, so we don't cache it.
+ {error, <<"Invalid scheduler location message. Not caching.">>};
{error, Reason} ->
?event(warning, {failed_to_cache_location_msg, {reason, Reason}}),
{error, Reason}
diff --git a/src/dev_scheduler_registry.erl b/src/dev_scheduler_registry.erl
index 484e3e60a..5d209586c 100644
--- a/src/dev_scheduler_registry.erl
+++ b/src/dev_scheduler_registry.erl
@@ -14,15 +14,23 @@ get_wallet() ->
% TODO: We might want to use a different wallet per SU later.
hb:wallet().
+%%% @doc Find a process associated with the processor ID in the local registry
+%%% If the process is not found, it will not create a new one
find(ProcID) -> find(ProcID, false).
+
+%%% @doc Find a process associated with the processor ID in the local registry
+%%% If the process is not found and `GenIfNotHosted' is true, it attemps to create a new one
find(ProcID, GenIfNotHosted) ->
find(ProcID, GenIfNotHosted, #{ priv_wallet => hb:wallet() }).
+
+%%% @doc Same as `find/2' but with additional options passed when spawning a new process (if needed)
find(ProcID, GenIfNotHosted, Opts) ->
case hb_name:lookup({dev_scheduler, ProcID}) of
undefined -> maybe_new_proc(ProcID, GenIfNotHosted, Opts);
Pid -> Pid
end.
+%%% @doc Return a list of all currently registered ProcID.
get_processes() ->
?event({getting_processes, hb_name:all()}),
[ ProcID || {{dev_scheduler, ProcID}, _} <- hb_name:all() ].
diff --git a/src/dev_simple_pay.erl b/src/dev_simple_pay.erl
index ef0eb5d41..6c68653c6 100644
--- a/src/dev_simple_pay.erl
+++ b/src/dev_simple_pay.erl
@@ -15,15 +15,17 @@
%% not pay for their own requests.
estimate(_, EstimateReq, NodeMsg) ->
Req = hb_ao:get(<<"request">>, EstimateReq, NodeMsg#{ hashpath => ignore }),
- ReqType = hb_ao:get(<<"type">>, EstimateReq, undefined, NodeMsg),
- case {is_operator(Req, NodeMsg), ReqType} of
- {true, _} -> {ok, 0};
- {_, <<"post">>} ->
- % We do not charge after the request has been processed, as balances
- % in the ledger are updated in the pre-processing step.
+ case is_operator(Req, NodeMsg) of
+ true ->
+ ?event(payment,
+ {estimate_preprocessing, caller_is_operator}
+ ),
{ok, 0};
- {_, <<"pre">>} ->
- Messages = hb_ao:get(<<"body">>, EstimateReq, NodeMsg#{ hashpath => ignore }),
+ false ->
+ Messages =
+ hb_singleton:from(
+ hb_ao:get(<<"request">>, EstimateReq, NodeMsg)
+ ),
{ok, length(Messages) * hb_opts:get(simple_pay_price, 1, NodeMsg)}
end.
@@ -31,28 +33,49 @@ estimate(_, EstimateReq, NodeMsg) ->
%% can charge the user at this stage because we know statically what the price
%% will be
debit(_, RawReq, NodeMsg) ->
- case hb_ao:get(<<"type">>, RawReq, undefined, NodeMsg) of
- <<"post">> -> {ok, true};
- <<"pre">> ->
- ?event(payment, {debit_preprocessing, RawReq}),
- Req = hb_ao:get(<<"request">>, RawReq, NodeMsg#{ hashpath => ignore }),
- case hb_message:signers(Req) of
- [] -> {ok, false};
- [Signer] ->
- UserBalance = get_balance(Signer, NodeMsg),
- Price = hb_ao:get(<<"amount">>, RawReq, 0, NodeMsg),
+ ?event(payment, {debit, RawReq}),
+ Req = hb_ao:get(<<"request">>, RawReq, NodeMsg#{ hashpath => ignore }),
+ case hb_message:signers(Req) of
+ [] ->
+ ?event(payment, {debit, {error, <<"No signers">>}}),
+ {ok, false};
+ [Signer] ->
+ UserBalance = get_balance(Signer, NodeMsg),
+ Price = hb_ao:get(<<"quantity">>, RawReq, 0, NodeMsg),
+ ?event(payment,
+ {debit,
+ {user, Signer},
+ {balance, UserBalance},
+ {price, Price}
+ }),
+ {ok, _} =
+ set_balance(
+ Signer,
+ NewBalance = UserBalance - Price,
+ NodeMsg
+ ),
+ case NewBalance >= 0 of
+ true ->
+ {ok, true};
+ false ->
?event(payment,
{debit,
{user, Signer},
{balance, UserBalance},
{price, Price}
- }),
- case UserBalance >= Price of
- true ->
- set_balance(Signer, UserBalance - Price, NodeMsg),
- {ok, true};
- false -> {ok, false}
- end
+ }
+ ),
+ {error, #{
+ <<"status">> => 429,
+ <<"body">> => <<"Insufficient funds. "
+ "User balance before debit: ",
+ (hb_util:bin(UserBalance))/binary,
+ ". Price of request: ",
+ (hb_util:bin(Price))/binary,
+ ". New balance: ",
+ (hb_util:bin(NewBalance))/binary,
+ ".">>
+ }}
end
end.
@@ -60,7 +83,11 @@ debit(_, RawReq, NodeMsg) ->
balance(_, RawReq, NodeMsg) ->
Target =
case hb_ao:get(<<"request">>, RawReq, NodeMsg#{ hashpath => ignore }) of
- not_found -> hd(hb_message:signers(RawReq));
+ not_found ->
+ case hb_message:signers(RawReq) of
+ [] -> hb_ao:get(<<"target">>, RawReq, undefined, NodeMsg);
+ [Signer] -> Signer
+ end;
Req -> hd(hb_message:signers(Req))
end,
{ok, get_balance(Target, NodeMsg)}.
@@ -77,6 +104,7 @@ set_balance(Signer, Amount, NodeMsg) ->
}
),
hb_http_server:set_opts(
+ #{},
NewMsg = NodeMsg#{
simple_pay_ledger =>
hb_ao:set(
@@ -135,15 +163,14 @@ is_operator(Req, NodeMsg) ->
%%% Tests
-test_opts() -> test_opts(#{}).
test_opts(Ledger) ->
Wallet = ar_wallet:new(),
Address = hb_util:human_id(ar_wallet:to_address(Wallet)),
ProcessorMsg =
#{
<<"device">> => <<"p4@1.0">>,
- <<"ledger_device">> => <<"simple-pay@1.0">>,
- <<"pricing_device">> => <<"simple-pay@1.0">>
+ <<"ledger-device">> => <<"simple-pay@1.0">>,
+ <<"pricing-device">> => <<"simple-pay@1.0">>
},
{
Address,
@@ -152,26 +179,34 @@ test_opts(Ledger) ->
simple_pay_ledger => Ledger,
simple_pay_price => 10,
operator => Address,
- preprocessor => ProcessorMsg,
- postprocessor => ProcessorMsg
+ on => #{
+ <<"request">> => ProcessorMsg,
+ <<"response">> => ProcessorMsg
+ }
}
}.
get_balance_and_top_up_test() ->
ClientWallet = ar_wallet:new(),
ClientAddress = hb_util:human_id(ar_wallet:to_address(ClientWallet)),
- {_HostAddress, HostWallet, Opts} = test_opts(#{ClientAddress => 100}),
+ {HostAddress, HostWallet, Opts} = test_opts(#{ ClientAddress => 100 }),
Node = hb_http_server:start_node(Opts),
+ ?event({host_address, HostAddress}),
+ ?event({client_address, ClientAddress}),
{ok, Res} =
hb_http:get(
Node,
- hb_message:commit(
+ Req = hb_message:commit(
#{<<"path">> => <<"/~simple-pay@1.0/balance">>},
ClientWallet
),
#{}
),
- ?assertEqual(80, Res),
+ ?event({req_signers, hb_message:signers(Req)}),
+ % Balance is given during the request, before the charge is made, so we
+ % should expect to see the original balance.
+ ?assertEqual(100, Res),
+ % The balance should now be 80, as the check will have charged us 20.
{ok, NewBalance} =
hb_http:post(
Node,
@@ -185,6 +220,9 @@ get_balance_and_top_up_test() ->
),
#{}
),
+ % The balance should now be 180, as the topup will have been added and will
+ % not have generated a charge in itself. The top-up did not generate a charge
+ % because it is the operator that performed it, and not a user.
?assertEqual(180, NewBalance),
{ok, Res2} =
hb_http:get(
@@ -195,4 +233,4 @@ get_balance_and_top_up_test() ->
),
#{}
),
- ?assertEqual(160, Res2).
+ ?assertEqual(180, Res2).
diff --git a/src/dev_snp.erl b/src/dev_snp.erl
index a234ed591..96a68f48a 100644
--- a/src/dev_snp.erl
+++ b/src/dev_snp.erl
@@ -2,11 +2,10 @@
%%% as well as generating them, if called in an appropriate environment.
-module(dev_snp).
-export([generate/3, verify/3, trusted/3]).
--export([init/3]).
-include("include/hb.hrl").
-include_lib("eunit/include/eunit.hrl").
-define(COMMITTED_PARAMETERS, [vcpus, vcpu_type, vmm_type, guest_features,
- firmware, kernel, initrd, append]).
+ firmware, kernel, initrd, append]).
%%% Test constants
%% Matching commitment report is found in `test/snp-commitment' in
@@ -18,10 +17,17 @@
vcpu_type => 5,
vmm_type => 1,
guest_features => 1,
- firmware => <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
- kernel => <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
- initrd => <<"853ebf56bc6ba5f08bd5583055a457898ffa3545897bee00103d3066b8766f5c">>,
- append => <<"6cb8a0082b483849054f93b203aa7d98439736e44163d614f79380ca368cc77e">>
+ firmware =>
+ <<
+ "b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e"
+ "7e408c582ee48a74b289f3acec78510"
+ >>,
+ kernel =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ initrd =>
+ <<"853ebf56bc6ba5f08bd5583055a457898ffa3545897bee00103d3066b8766f5c">>,
+ append =>
+ <<"6cb8a0082b483849054f93b203aa7d98439736e44163d614f79380ca368cc77e">>
}).
real_node_test() ->
@@ -43,34 +49,12 @@ real_node_test() ->
verify(
Report,
#{ <<"target">> => <<"self">> },
- #{ trusted => ?TEST_TRUSTED_SOFTWARE }
+ #{ snp_trusted => [?TEST_TRUSTED_SOFTWARE] }
),
?event({snp_validation_res, Result}),
?assertEqual({ok, true}, Result)
end.
-%% @doc Should take in options to set for the device such as kernel, initrd, firmware,
-%% and append hashes and make them available to the device. Only runnable once,
-%% and only if the operator is not set to an address (and thus, the node has not
-%% had any priviledged access).
-init(M1, _M2, Opts) ->
- case {hb_opts:get(trusted, #{}, Opts), hb_opts:get(operator, undefined, Opts)} of
- {#{snp_hashes := _}, _} ->
- {error, <<"Already initialized.">>};
- {_, Addr} when is_binary(Addr) ->
- {error, <<"Cannot enable SNP if operator is already set.">>};
- _ ->
- SnpHashes = hb_ao:get(<<"body">>, M1, Opts),
- SNPDecoded = hb_json:decode(SnpHashes),
- Hashes = maps:get(<<"snp_hashes">>, SNPDecoded),
- ok = hb_http_server:set_opts(Opts#{
- % Add our trusted hashes to the device's trusted software list
- trusted => maps:merge(hb_opts:get(trusted, #{}, Opts), Hashes),
- % Set our hashes to the given hashes
- snp_hashes => Hashes
- }),
- {ok, <<"SNP node initialized successfully.">>}
- end.
%% @doc Verify an commitment report message; validating the identity of a
%% remote node, its ephemeral private address, and the integrity of the report.
@@ -85,16 +69,24 @@ init(M1, _M2, Opts) ->
%% 5. Verify the measurement is valid.
%% 6. Verify the report's certificate chain to hardware root of trust.
verify(M1, M2, NodeOpts) ->
- {ok, MsgWithJSONReport} = hb_message:find_target(M1, M2, NodeOpts),
- % Normalize the request message
- ReportJSON = hb_ao:get(<<"report">>, MsgWithJSONReport, NodeOpts),
- Report = hb_json:decode(ReportJSON),
- Msg =
- maps:merge(
- maps:without([<<"report">>], MsgWithJSONReport),
- Report
- ),
- ?event({verify, Msg}),
+ ?event(snp_verify, verify_called),
+ % Search for a `body' key in the message, and if found use it as the source
+ % of the report. If not found, use the message itself as the source.
+ MsgWithJSONReport =
+ hb_util:ok(
+ hb_message:with_only_committed(
+ hb_ao:get(<<"body">>, M2, M2, NodeOpts#{ hashpath => ignore }),
+ NodeOpts
+ )
+ ),
+ % Normalize the request message
+ ReportJSON = hb_ao:get(<<"report">>, MsgWithJSONReport, NodeOpts),
+ Report = hb_json:decode(ReportJSON),
+ Msg =
+ maps:merge(
+ maps:without([<<"report">>], MsgWithJSONReport),
+ Report
+ ),
% Step 1: Verify the nonce.
Address = hb_ao:get(<<"address">>, Msg, NodeOpts),
?event({snp_address, Address}),
@@ -115,7 +107,6 @@ verify(M1, M2, NodeOpts) ->
% Step 2: Verify the address and the signature.
Signers = hb_message:signers(MsgWithJSONReport),
?event({snp_signers, {explicit, Signers}}),
- ?event({msg_with_json_report, {explicit, MsgWithJSONReport}}),
SigIsValid = hb_message:verify(MsgWithJSONReport, Signers),
?event({snp_sig_is_valid, SigIsValid}),
AddressIsValid = lists:member(Address, Signers),
@@ -127,29 +118,43 @@ verify(M1, M2, NodeOpts) ->
IsTrustedSoftware = execute_is_trusted(M1, Msg, NodeOpts),
?event({trusted_software, IsTrustedSoftware}),
% Step 5: Verify the measurement against the report's measurement.
- Args =
- maps:from_list(
- lists:map(
- fun({Key, Val}) -> {binary_to_existing_atom(Key), Val} end,
- maps:to_list(maps:with(lists:map(fun atom_to_binary/1, ?COMMITTED_PARAMETERS), Msg))
- )
- ),
- ?event({args, Args}),
+ Args =
+ maps:from_list(
+ lists:map(
+ fun({Key, Val}) -> {binary_to_existing_atom(Key), Val} end,
+ maps:to_list(
+ maps:with(
+ lists:map(
+ fun atom_to_binary/1,
+ ?COMMITTED_PARAMETERS
+ ),
+ hb_ao:get(<<"local-hashes">>, Msg, NodeOpts)
+ )
+ )
+ )
+ ),
+ ?event({args, { explicit, Args}}),
{ok,Expected} = dev_snp_nif:compute_launch_digest(Args),
- ?event({expected_measurement, Expected}),
+ ExpectedBin = list_to_binary(Expected),
+ ?event({expected_measurement, ExpectedBin}),
Measurement = hb_ao:get(<<"measurement">>, Msg, NodeOpts),
?event({measurement, {explicit,Measurement}}),
- {ok, MeasurementIsValid} = dev_snp_nif:verify_measurement(ReportJSON, list_to_binary(Expected)),
+ {Status, MeasurementIsValid} =
+ dev_snp_nif:verify_measurement(
+ ReportJSON,
+ ExpectedBin
+ ),
+ ?event({status, Status}),
?event({measurement_is_valid, MeasurementIsValid}),
% Step 6: Check the report's integrity.
{ok, ReportIsValid} = dev_snp_nif:verify_signature(ReportJSON),
- ?event({report_is_valid, ReportIsValid}),
+ ?event({report_is_valid, ReportIsValid}),
Valid =
lists:all(
fun({ok, Bool}) -> Bool; (Bool) -> Bool end,
[
NonceMatches,
- SigIsValid,
+ SigIsValid,
AddressIsValid,
DebugDisabled,
IsTrustedSoftware,
@@ -158,49 +163,55 @@ verify(M1, M2, NodeOpts) ->
]
),
?event({final_validation_result, Valid}),
- {ok, Valid}.
+ {ok, hb_util:bin(Valid)}.
%% @doc Generate an commitment report and emit it as a message, including all of
%% the necessary data to generate the nonce (ephemeral node address + node
%% message ID), as well as the expected measurement (firmware, kernel, and VMSAs
%% hashes).
generate(_M1, _M2, Opts) ->
- ?event({generate_opts, {explicit, Opts}}),
+ ?event({generate_opts, {explicit, Opts}}),
Wallet = hb_opts:get(priv_wallet, no_viable_wallet, Opts),
Address = hb_util:human_id(ar_wallet:to_address(Wallet)),
% ?event({snp_wallet, Wallet}),
% Remove the `priv*' keys from the options.
{ok, PublicNodeMsgID} =
dev_message:id(
- NodeMsg = hb_private:reset(Opts),
+ NodeMsg = hb_private:reset(Opts),
#{ <<"committers">> => <<"none">> },
Opts
),
- RawPublicNodeMsgID = hb_util:native_id(PublicNodeMsgID),
- ?event({snp_node_msg, NodeMsg}),
+ RawPublicNodeMsgID = hb_util:native_id(PublicNodeMsgID),
+ ?event({snp_node_msg, NodeMsg}),
?event({snp_node_msg_id, byte_size(RawPublicNodeMsgID)}),
+ ?event({snp_node_msg_id_bin, {explicit, io:format("~p", [RawPublicNodeMsgID])}}),
% Generate the commitment report.
- ?event({snp_address, byte_size(Address)}),
+ ?event({snp_address, byte_size(Address)}),
ReportData = generate_nonce(Address, RawPublicNodeMsgID),
- ?event({snp_report_data, byte_size(ReportData)}),
+ ?event({snp_report_data, byte_size(ReportData)}),
+
+ LocalHashes = hd(hb_opts:get(snp_trusted, [#{}], Opts)),
+ ?event(snp_local_hashes, {explicit, LocalHashes}),
+
{ok, ReportJSON} = dev_snp_nif:generate_attestation_report(ReportData, 1),
- ?event({snp_report_json, ReportJSON}),
- LocalHashes = hb_opts:get(snp_hashes, {error, not_configured}, Opts),
+ ?event({snp_report_json, ReportJSON}),
+
?event(
{snp_report_generated,
{nonce, ReportData},
{report, ReportJSON}
}
),
- ReportMsg = hb_message:commit(LocalHashes#{
+ ReportMsg = hb_message:commit(#{
+ <<"local-hashes">> => LocalHashes,
<<"nonce">> => hb_util:encode(ReportData),
<<"address">> => Address,
<<"node-message">> => NodeMsg,
- <<"report">> => ReportJSON
+ <<"report">> => ReportJSON
}, Wallet),
-
- ?event({verify_res, hb_message:verify(ReportMsg)}),
- ?event({snp_report_msg, ReportMsg}),
+
+ ?event({verify_res, hb_message:verify(ReportMsg)}),
+ ?event({snp_report_msg, ReportMsg}),
{ok, ReportMsg}.
%% @doc Ensure that the node's debug policy is disabled.
@@ -220,15 +231,18 @@ execute_is_trusted(M1, Msg, NodeOpts) ->
not_found -> M1#{ <<"device">> => <<"snp@1.0">> };
Device -> {as, Device, M1}
end,
- %?event({starting_to_validate_software, {mod_m1, {explicit, ModM1}}, {m2, {explicit, Msg}}, {node_opts, {explicit, NodeOpts}}}),
+ LocalHashes = hb_ao:get(<<"local-hashes">>, Msg, NodeOpts),
Result = lists:all(
fun(ReportKey) ->
- ReportVal = hb_ao:get(ReportKey, Msg, NodeOpts),
+ ?event(trusted, {report_key, {explicit, ReportKey}}),
+ ReportVal = hb_ao:get(ReportKey, LocalHashes, NodeOpts),
+ ?event(trusted, {report_val, {explicit, ReportVal}}),
QueryMsg = #{
<<"path">> => <<"trusted">>,
<<"key">> => ReportKey,
<<"body">> => ReportVal
},
+ ?event(trusted, {query_msg, {explicit, QueryMsg}}),
% ?event({is_trusted_query, {base, ModM1}, {query, QueryMsg}}),
% Resolve the query message against the modified base message.
{ok, KeyIsTrusted} = hb_ao:resolve(ModM1, QueryMsg, NodeOpts),
@@ -241,34 +255,63 @@ execute_is_trusted(M1, Msg, NodeOpts) ->
% ),
KeyIsTrusted
end,
- ?COMMITTED_PARAMETERS
+ ?COMMITTED_PARAMETERS
),
?event({is_all_software_trusted, Result}),
{ok, Result}.
-%% @doc Default implementation of a resolver for trusted software. Searches the
-%% `trusted' key in the base message for a list of trusted values, and checks
-%% if the value in the request message is a member of that list.
+%% @doc Validates if a given message parameter matches a trusted value from the SNP trusted list
+%% Returns {ok, true} if the message is trusted, {ok, false} otherwise
trusted(_Msg1, Msg2, NodeOpts) ->
+ % Extract the key name to check and the expected value from the message
Key = hb_ao:get(<<"key">>, Msg2, NodeOpts),
Body = hb_ao:get(<<"body">>, Msg2, not_found, NodeOpts),
- %% Ensure Trusted is always a map
- TrustedSoftware = hb_opts:get(trusted, #{}, NodeOpts),
- PropertyName = hb_ao:get(Key, TrustedSoftware, not_found, NodeOpts),
- % ?event({trust_key, PropertyName, maps:is_key(Key, TrustedSoftware)}),
- %% Final trust validation
- {ok, PropertyName == Body}.
+ ?event(trusted, {key, {explicit, Key}}),
+ ?event(trusted, {body, {explicit, Body}}),
+ %% Get trusted software list from node options
+ % This is the set of approved configurations for attestation
+ TrustedSoftware = hb_opts:get(snp_trusted, [#{}], NodeOpts),
+ %% Check if the value exists in any of the trusted maps in the list
+ IsTrusted =
+ case TrustedSoftware of
+ % Handle empty trusted software list
+ [] ->
+ false;
+ % Process list of trusted configurations
+ [_|_] when is_list(TrustedSoftware) ->
+ % Check if any trusted configuration matches
+ lists:any(
+ fun(TrustedMap) ->
+ % Check if this entry is a valid map
+ is_map(TrustedMap) andalso
+ % Get the value for the specified key from the trusted entry
+ case hb_ao:get(Key, TrustedMap, not_found, NodeOpts) of
+ not_found -> false;
+ PropertyName ->
+ ?event(trusted, {property_name, { explicit, PropertyName}}),
+ % Compare to see if it matches the expected value
+ PropertyName == Body
+ end
+ end,
+ TrustedSoftware
+ );
+
+ % Handle other cases (should not normally happen)
+ _ -> false
+ end,
+ %% Return the trust validation result
+ {ok, IsTrusted}.
%% @doc Ensure that the report data matches the expected report data.
report_data_matches(Address, NodeMsgID, ReportData) ->
- ?event({generated_nonce, binary_to_list(generate_nonce(Address, NodeMsgID))}),
- ?event({expected_nonce, binary_to_list(ReportData)}),
+ ?event({generated_nonce, {explicit, generate_nonce(Address, NodeMsgID)}}),
+ ?event({expected_nonce, {explicit, ReportData}}),
generate_nonce(Address, NodeMsgID) == ReportData.
%% @doc Generate the nonce to use in the commitment report.
generate_nonce(RawAddress, RawNodeMsgID) ->
- Address = hb_util:native_id(RawAddress),
- NodeMsgID = hb_util:native_id(RawNodeMsgID),
+ Address = hb_util:native_id(RawAddress),
+ NodeMsgID = hb_util:native_id(RawNodeMsgID),
<< Address/binary, NodeMsgID/binary >>.
%% Generate an commitment report and emit it via HTTP.
@@ -279,10 +322,15 @@ generate_nonce(RawAddress, RawNodeMsgID) ->
% vcpu_type => 5,
% vmm_type => 1,
% guest_features => 16#1,
-% firmware => "b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510",
-% kernel => "69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576",
-% initrd => "02e28b6c718bf0a5260d6f34d3c8fe0d71bf5f02af13e1bc695c6bc162120da1",
-% append => "56e1e5190622c8c6b9daa4fe3ad83f3831c305bb736735bf795b284cb462c9e7"
+% firmware =>
+% "b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b7"
+% "88e7e408c582ee48a74b289f3acec78510",
+% kernel =>
+% "69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576",
+% initrd =>
+% "02e28b6c718bf0a5260d6f34d3c8fe0d71bf5f02af13e1bc695c6bc162120da1",
+% append =>
+% "56e1e5190622c8c6b9daa4fe3ad83f3831c305bb736735bf795b284cb462c9e7"
% },
% Wallet = ar_wallet:new(),
% Addr = hb_util:human_id(ar_wallet:to_address(Wallet)),
diff --git a/src/dev_snp_nif.erl b/src/dev_snp_nif.erl
index 88a2ef136..bacf338c5 100644
--- a/src/dev_snp_nif.erl
+++ b/src/dev_snp_nif.erl
@@ -49,7 +49,7 @@ generate_attestation_report_test() ->
compute_launch_digest_test() ->
%% Define the data structure
ArgsMap = #{
- vcpus => 1,
+ vcpus => 32,
vcpu_type => 5,
vmm_type => 1,
guest_features => 16#1,
@@ -63,10 +63,9 @@ compute_launch_digest_test() ->
%% Call the NIF
{ok, Result} = dev_snp_nif:compute_launch_digest(ArgsMap),
-
%% Expected result
EncTestVector =
- <<"Lhgbg_pneEf5Ebaj1ru3lIFu7RXHY4jBVnjSd-Yk7D0jIryZ3aLdks4YOWfjajKW">>,
+ <<"wmSDSQYuzE2M3rQcourJnDJHgalADM8TBev3gyjM5ObRNOn8oglvVznFbaWhajU_">>,
?assertMatch(EncTestVector, hb_util:encode(Result)).
verify_measurement_test() ->
diff --git a/src/dev_test.erl b/src/dev_test.erl
index cd8933ce2..e1afec706 100644
--- a/src/dev_test.erl
+++ b/src/dev_test.erl
@@ -1,9 +1,7 @@
-module(dev_test).
-export([info/1, test_func/1, compute/3, init/3, restore/3, snapshot/3, mul/2]).
--export([update_state/3, increment_counter/3]).
--export([postprocess/3]).
+-export([update_state/3, increment_counter/3, delay/3]).
-export([info/3]).
--export([long_task/3]).
-include_lib("eunit/include/eunit.hrl").
-include("include/hb.hrl").
@@ -41,7 +39,7 @@ info(_Msg1, _Msg2, _Opts) ->
<<"restore">> => <<"Restore function">>,
<<"mul">> => <<"Multiply function">>,
<<"snapshot">> => <<"Snapshot function">>,
- <<"postprocess">> => <<"Postprocess function">>,
+ <<"response">> => <<"Response function">>,
<<"update_state">> => <<"Update state function">>
}
},
@@ -108,12 +106,6 @@ mul(Msg1, Msg2) ->
snapshot(_Msg1, _Msg2, _Opts) ->
{ok, #{}}.
-%% @doc Set the `postprocessor-called' key to true in the HTTP server.
-postprocess(_Msg, #{ <<"body">> := Msgs }, Opts) ->
- ?event({postprocess_called, Opts}),
- hb_http_server:set_opts(Opts#{ <<"postprocessor-called">> => true }),
- {ok, Msgs}.
-
%% @doc Find a test worker's PID and send it an update message.
update_state(_Msg, Msg2, _Opts) ->
case hb_ao:get(<<"test-id">>, Msg2) of
@@ -148,12 +140,30 @@ increment_counter(_Msg1, Msg2, _Opts) ->
end
end.
-%% @doc Does nothing, just sleeps for 3 seconds to simulate a long-running task.
-long_task(_Msg1, _Msg2, _Opts) ->
- ?event({'dev_test:long_task:sleeping'}),
- timer:sleep(750),
- ?event({'dev_test:long_task:waking'}),
- {ok, #{<<"result">> => <<"slept">>}}.
+%% @doc Does nothing, just sleeps `Req/duration or 750' ms and returns the
+%% appropriate form in order to be used as a hook.
+delay(Msg1, Req, Opts) ->
+ Duration =
+ hb_ao:get_first(
+ [
+ {Msg1, <<"duration">>},
+ {Req, <<"duration">>}
+ ],
+ 750,
+ Opts
+ ),
+ ?event(delay, {delay, {sleeping, Duration}}),
+ timer:sleep(Duration),
+ ?event({delay, waking}),
+ Return =
+ case hb_ao:get(<<"return">>, Msg1, Opts) of
+ not_found ->
+ hb_ao:get(<<"body">>, Req, #{ <<"result">> => <<"slept">> }, Opts);
+ ReturnMsgs ->
+ ReturnMsgs
+ end,
+ ?event(delay, {returning, Return}),
+ {ok, Return}.
%%% Tests
diff --git a/src/dev_volume.erl b/src/dev_volume.erl
new file mode 100644
index 000000000..62a0a4705
--- /dev/null
+++ b/src/dev_volume.erl
@@ -0,0 +1,409 @@
+%%% @doc Secure Volume Management for HyperBEAM Nodes
+%%%
+%%% This module handles encrypted storage operations for HyperBEAM, providing
+%%% a robust and secure approach to data persistence. It manages the complete
+%%% lifecycle of encrypted volumes from detection to creation, formatting, and
+%%% mounting.
+%%%
+%%% Key responsibilities:
+%%% - Volume detection and initialization
+%%% - Encrypted partition creation and formatting
+%%% - Secure mounting using cryptographic keys
+%%% - Store path reconfiguration to use mounted volumes
+%%% - Automatic handling of various system states
+%%% (new device, existing partition, etc.)
+%%%
+%%% The primary entry point is the `mount/3' function, which orchestrates the
+%%% entire process based on the provided configuration parameters. This module
+%%% works alongside `hb_volume' which provides the low-level operations for
+%%% device manipulation.
+%%%
+%%% Security considerations:
+%%% - Ensures data at rest is protected through LUKS encryption
+%%% - Provides proper volume sanitization and secure mounting
+%%% - IMPORTANT: This module only applies configuration set in node options and
+%%% does NOT accept disk operations via HTTP requests. It cannot format arbitrary
+%%% disks as all operations are safeguarded by host operating system permissions
+%%% enforced upon the HyperBEAM environment.
+-module(dev_volume).
+-export([info/1, info/3, mount/3, public_key/3]).
+-include("include/hb.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("public_key/include/public_key.hrl").
+
+%% @doc Exported function for getting device info, controls which functions are
+%% exposed via the device API.
+info(_) ->
+ #{ exports => [info, mount, public_key] }.
+
+%% @doc HTTP info response providing information about this device
+info(_Msg1, _Msg2, _Opts) ->
+ InfoBody = #{
+ <<"description">> => <<"Secure Volume Management for HyperBEAM Nodes">>,
+ <<"version">> => <<"1.0">>,
+ <<"api">> => #{
+ <<"info">> => #{
+ <<"description">> => <<"Get device info">>
+ },
+ <<"mount">> => #{
+ <<"description">> => <<"Mount an encrypted volume">>,
+ <<"required_node_opts">> => #{
+ <<"volume_key">> => <<"The encryption key">>,
+ <<"volume_device">> => <<"The base device path">>,
+ <<"volume_partition">> => <<"The partition path">>,
+ <<"volume_partition_type">> => <<"The partition type">>,
+ <<"volume_name">> => <<"The name for the encrypted volume">>,
+ <<"volume_mount_point">> => <<"Where to mount the volume">>,
+ <<"volume_store_path">> => <<"The store path on the volume">>
+ }
+ },
+ <<"public_key">> => #{
+ <<"description">> => <<"Get the node's public key for encrypted key exchange">>
+ }
+ }
+ },
+ {ok, #{<<"status">> => 200, <<"body">> => InfoBody}}.
+
+%% @doc Handles the complete process of secure encrypted volume mounting.
+%%
+%% This function performs the following operations depending on the state:
+%% 1. Validates the encryption key is present
+%% 2. Checks if the base device exists
+%% 3. Checks if the partition exists on the device
+%% 4. If the partition exists, attempts to mount it
+%% 5. If the partition doesn't exist, creates it, formats it with encryption
+%% and mounts it
+%% 6. Updates the node's store configuration to use the mounted volume
+%%
+%% Config options in Opts map:
+%% - volume_key: (Required) The encryption key
+%% - volume_device: Base device path
+%% - volume_partition: Partition path
+%% - volume_partition_type: Filesystem type
+%% - volume_name: Name for encrypted volume
+%% - volume_mount_point: Where to mount
+%% - volume_store_path: Store path on volume
+%%
+%% @param M1 Base message for context.
+%% @param M2 Request message with operation details.
+%% @param Opts A map of configuration options for volume operations.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec mount(term(), term(), map()) -> {ok, binary()} | {error, binary()}.
+mount(_M1, _M2, Opts) ->
+ % Check if an encrypted key was sent in the request
+ EncryptedKey = hb_opts:get(volume_key, not_found, Opts),
+ % Determine if we need to decrypt a key or use one from config
+ ?event(debug_mount, {mount, encrypted_key, EncryptedKey}),
+ Key = case decrypt_volume_key(EncryptedKey, Opts) of
+ {ok, DecryptedKey} ->
+ ?event(debug_mount, {mount, decrypted_key, DecryptedKey}),
+ DecryptedKey;
+ {error, DecryptError} ->
+ ?event(debug_mount, {mount, key_decrypt_error, DecryptError}),
+ not_found
+ end,
+ Device = hb_opts:get(volume_device, not_found, Opts),
+ Partition = hb_opts:get(volume_partition, not_found, Opts),
+ PartitionType = hb_opts:get(volume_partition_type, not_found, Opts),
+ VolumeName = hb_opts:get(volume_name, not_found, Opts),
+ MountPoint = hb_opts:get(volume_mount_point, not_found, Opts),
+ StorePath = hb_opts:get(volume_store_path, not_found, Opts),
+ % Check for missing required node options
+ case hb_opts:check_required_opts([
+ {<<"volume_key">>, Key},
+ {<<"volume_device">>, Device},
+ {<<"volume_partition">>, Partition},
+ {<<"volume_partition_type">>, PartitionType},
+ {<<"volume_name">>, VolumeName},
+ {<<"volume_mount_point">>, MountPoint},
+ {<<"volume_store_path">>, StorePath}
+ ], Opts) of
+ {ok, _} ->
+ ?event(debug_mount, {mount, device, Device}),
+ ?event(debug_mount, {mount, partition, Partition}),
+ ?event(debug_mount, {mount, partition_type, PartitionType}),
+ ?event(debug_mount, {mount, mount_point, MountPoint}),
+ check_base_device(
+ Device, Partition, PartitionType, VolumeName,
+ MountPoint, StorePath, Key, Opts
+ );
+ {error, ErrorMsg} ->
+ ?event(mount, {error, ErrorMsg}),
+ {error, ErrorMsg}
+ end.
+
+%% @doc Returns the node's public key for secure key exchange.
+%%
+%% This function retrieves the node's wallet and extracts the public key
+%% for encryption purposes. It allows users to securely exchange encryption keys
+%% by first encrypting their volume key with the node's public key.
+%%
+%% The process ensures that sensitive keys are never transmitted in plaintext.
+%% The encrypted key can then be securely sent to the node, which will decrypt it
+%% using its private key before using it for volume encryption.
+%%
+%% @param _M1 Ignored parameter.
+%% @param _M2 Ignored parameter.
+%% @param Opts A map of configuration options.
+%% @returns `{ok, Map}' containing the node's public key on success, or
+%% `{error, Binary}' if the node's wallet is not available.
+-spec public_key(term(), term(), map()) -> {ok, map()} | {error, binary()}.
+public_key(_M1, _M2, Opts) ->
+ ?event(volume, {public_key, start}),
+ % Retrieve the node's wallet
+ case hb_opts:get(priv_wallet, undefined, Opts) of
+ undefined ->
+ % Node doesn't have a wallet yet
+ ?event(volume, {public_key, error, <<"no wallet found">>}),
+ {error, <<"Node wallet not available">>};
+ {{_KeyType, _Priv, Pub}, _PubKey} ->
+ % Convert to a standard RSA format (PKCS#1 or X.509)
+ RsaPubKey = #'RSAPublicKey'{
+ publicExponent = 65537, % Common RSA exponent
+ modulus = crypto:bytes_to_integer(Pub)
+ },
+ % Convert to DER format
+ DerEncoded = public_key:der_encode('RSAPublicKey', RsaPubKey),
+ % Base64 encode for transmission
+ Base64Key = base64:encode(DerEncoded),
+ {ok, #{
+ <<"status">> => 200,
+ <<"public_key">> => Base64Key,
+ <<"message">> => <<"Use this public key to encrypt your volume key">>
+ }}
+ end.
+
+%% @doc Decrypts an encrypted volume key using the node's private key.
+%%
+%% This function takes an encrypted key (typically sent by a client who encrypted
+%% it with the node's public key) and decrypts it using the node's private RSA key.
+%%
+%% @param EncryptedKey The encrypted volume key (Base64 encoded).
+%% @param Opts A map of configuration options.
+%% @returns `{ok, DecryptedKey}' on successful decryption, or
+%% `{error, Binary}' if decryption fails.
+-spec decrypt_volume_key(binary(), map()) -> {ok, binary()} | {error, binary()}.
+decrypt_volume_key(EncryptedKeyBase64, Opts) ->
+ % Decode the encrypted key
+ try
+ EncryptedKey = base64:decode(EncryptedKeyBase64),
+ ?event(debug_mount, {decrypt_volume_key, encrypted_key, EncryptedKey}),
+ % Retrieve the node's wallet with private key
+ case hb_opts:get(priv_wallet, undefined, Opts) of
+ undefined ->
+ {error, <<"Node wallet not available for decryption">>};
+ {{_KeyType = {rsa, E}, Priv, Pub}, _PubKey} ->
+ % Create RSA private key record for decryption
+ RsaPrivKey = #'RSAPrivateKey'{
+ publicExponent = E,
+ modulus = crypto:bytes_to_integer(Pub),
+ privateExponent = crypto:bytes_to_integer(Priv)
+ },
+ % Decrypt the key
+ DecryptedKey = public_key:decrypt_private(EncryptedKey, RsaPrivKey),
+ {ok, DecryptedKey}
+ end
+ catch
+ _:Error ->
+ ?event(debug_mount, {decrypt_volume_key, error, Error}),
+ {error, <<"Failed to decrypt volume key">>}
+ end.
+
+%% @doc Check if the base device exists and if it does, check if the partition exists.
+%% @param Device The base device to check.
+%% @param Partition The partition to check.
+%% @param PartitionType The type of partition to check.
+%% @param VolumeName The name of the volume to check.
+%% @param MountPoint The mount point to check.
+%% @param StorePath The store path to check.
+%% @param Key The key to check.
+%% @param Opts The options to check.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec check_base_device(
+ term(), term(), term(), term(), term(), term(), term(), map()
+) -> {ok, binary()} | {error, binary()}.
+check_base_device(
+ Device, Partition, PartitionType, VolumeName, MountPoint, StorePath,
+ Key, Opts
+) ->
+ case hb_volume:check_for_device(Device) of
+ false ->
+ % Base device doesn't exist
+ ?event(debug_mount,
+ {device_check, error, <<"Base device not found">>}
+ ),
+ {error, <<"Base device not found">>};
+ true ->
+ check_partition(
+ Device, Partition, PartitionType, VolumeName,
+ MountPoint, StorePath, Key, Opts
+ )
+ end.
+
+%% @doc Check if the partition exists. If it does, attempt to mount it.
+%% If it doesn't exist, create it, format it with encryption and mount it.
+%% @param Device The base device to check.
+%% @param Partition The partition to check.
+%% @param PartitionType The type of partition to check.
+%% @param VolumeName The name of the volume to check.
+%% @param MountPoint The mount point to check.
+%% @param StorePath The store path to check.
+%% @param Key The key to check.
+%% @param Opts The options to check.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec check_partition(
+ term(), term(), term(), term(), term(), term(), term(), map()
+) -> {ok, binary()} | {error, binary()}.
+check_partition(
+ Device, Partition, PartitionType, VolumeName, MountPoint, StorePath,
+ Key, Opts
+) ->
+ case hb_volume:check_for_device(Partition) of
+ true ->
+ % Partition exists, try mounting it
+ mount_existing_partition(
+ Partition, Key, MountPoint, VolumeName, StorePath, Opts
+ );
+ false ->
+ % Partition doesn't exist, create it
+ create_and_mount_partition(
+ Device, Partition, PartitionType, Key,
+ MountPoint, VolumeName, StorePath, Opts
+ )
+ end.
+
+%% @doc Mount an existing partition.
+%% @param Partition The partition to mount.
+%% @param Key The key to mount.
+%% @param MountPoint The mount point to mount.
+%% @param VolumeName The name of the volume to mount.
+%% @param StorePath The store path to mount.
+%% @param Opts The options to mount.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec mount_existing_partition(
+ term(), term(), term(), term(), term(), map()
+) -> {ok, binary()} | {error, binary()}.
+mount_existing_partition(
+ Partition, Key, MountPoint, VolumeName, StorePath, Opts
+) ->
+ ?event(debug_mount, {mount_volume, attempt, Partition}),
+ case hb_volume:mount_disk(Partition, Key, MountPoint, VolumeName) of
+ {ok, MountResult} ->
+ ?event(debug_mount, {mount_volume, success, MountResult}),
+ update_store_path(StorePath, Opts);
+ {error, MountError} ->
+ ?event(debug_mount, {mount_volume, error, MountError}),
+ {error, <<"Failed to mount volume">>}
+ end.
+
+%% @doc Create, format and mount a new partition.
+%% @param Device The device to create the partition on.
+%% @param Partition The partition to create.
+%% @param PartitionType The type of partition to create.
+%% @param Key The key to create the partition with.
+%% @param MountPoint The mount point to mount the partition to.
+%% @param VolumeName The name of the volume to mount.
+%% @param StorePath The store path to mount.
+%% @param Opts The options to mount.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec create_and_mount_partition(
+ term(), term(), term(), term(), term(), term(), term(), map()
+) -> {ok, binary()} | {error, binary()}.
+create_and_mount_partition(
+ Device, Partition, PartitionType, Key,
+ MountPoint, VolumeName, StorePath, Opts
+) ->
+ ?event(debug_mount, {create_partition, attempt, Device}),
+ case hb_volume:create_partition(Device, PartitionType) of
+ {ok, PartitionResult} ->
+ ?event(debug_mount, {partition_create, success, PartitionResult}),
+ format_and_mount(
+ Partition, Key, MountPoint, VolumeName, StorePath, Opts
+ );
+ {error, PartitionError} ->
+ ?event(debug_mount, {partition_create, error, PartitionError}),
+ {error, <<"Failed to create partition">>}
+ end.
+
+%% @doc Format and mount a newly created partition.
+%% @param Partition The partition to format and mount.
+%% @param Key The key to format and mount the partition with.
+%% @param MountPoint The mount point to mount the partition to.
+%% @param VolumeName The name of the volume to mount.
+%% @param StorePath The store path to mount.
+%% @param Opts The options to mount.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec format_and_mount(
+ term(), term(), term(), term(), term(), map()
+) -> {ok, binary()} | {error, binary()}.
+format_and_mount(
+ Partition, Key, MountPoint, VolumeName, StorePath, Opts
+) ->
+ case hb_volume:format_disk(Partition, Key) of
+ {ok, FormatResult} ->
+ ?event(debug_mount, {format_disk, success, FormatResult}),
+ mount_formatted_partition(
+ Partition, Key, MountPoint, VolumeName, StorePath, Opts
+ );
+ {error, FormatError} ->
+ ?event(debug_mount, {format_disk, error, FormatError}),
+ {error, <<"Failed to format disk">>}
+ end.
+
+%% @doc Mount a newly formatted partition.
+%% @param Partition The partition to mount.
+%% @param Key The key to mount the partition with.
+%% @param MountPoint The mount point to mount the partition to.
+%% @param VolumeName The name of the volume to mount.
+%% @param StorePath The store path to mount.
+%% @param Opts The options to mount.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec mount_formatted_partition(
+ term(), term(), term(), term(), term(), map()
+) -> {ok, binary()} | {error, binary()}.
+mount_formatted_partition(
+ Partition, Key, MountPoint, VolumeName, StorePath, Opts
+) ->
+ case hb_volume:mount_disk(Partition, Key, MountPoint, VolumeName) of
+ {ok, RetryMountResult} ->
+ ?event(debug_mount, {mount_volume, success, RetryMountResult}),
+ update_store_path(StorePath, Opts);
+ {error, RetryMountError} ->
+ ?event(debug_mount, {mount_volume, error, RetryMountError}),
+ {error, <<"Failed to mount newly formatted volume">>}
+ end.
+
+%% @doc Update the store path to use the mounted volume.
+%% @param StorePath The store path to update.
+%% @param Opts The options to update.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec update_store_path(term(), map()) -> {ok, binary()} | {error, binary()}.
+update_store_path(StorePath, Opts) ->
+ CurrentStore = hb_opts:get(store, [], Opts),
+ case hb_volume:change_node_store(StorePath, CurrentStore) of
+ {ok, #{<<"store">> := NewStore} = StoreResult} ->
+ ?event(debug_mount, {store_update, success, StoreResult}),
+ update_node_config(NewStore, Opts);
+ {error, StoreError} ->
+ ?event(debug_mount, {store_update, error, StoreError}),
+ {error, <<"Failed to update store">>}
+ end.
+
+%% @doc Update the node's configuration with the new store.
+%% @param NewStore The new store to update the node's configuration with.
+%% @param Opts The options to update the node's configuration with.
+%% @returns `{ok, Binary}' on success with operation result message, or
+%% `{error, Binary}' on failure with error message.
+-spec update_node_config(term(), map()) -> {ok, binary()} | {error, binary()}.
+update_node_config(NewStore, Opts) ->
+ ok = hb_http_server:set_opts(Opts#{store => NewStore}),
+ ?event(debug_mount, {store_update, config_updated}),
+ {ok, <<"Volume mounted and store updated successfully">>}.
diff --git a/src/hb.erl b/src/hb.erl
index 9296c97d7..9525cc4d5 100644
--- a/src/hb.erl
+++ b/src/hb.erl
@@ -163,13 +163,15 @@ do_start_simple_pay(Opts) ->
Processor =
#{
<<"device">> => <<"p4@1.0">>,
- <<"ledger_device">> => <<"simple-pay@1.0">>,
- <<"pricing_device">> => <<"simple-pay@1.0">>
+ <<"ledger-device">> => <<"simple-pay@1.0">>,
+ <<"pricing-device">> => <<"simple-pay@1.0">>
},
hb_http_server:start_node(
Opts#{
- preprocessor => Processor,
- postprocessor => Processor
+ on => #{
+ <<"request">> => Processor,
+ <<"response">> => Processor
+ }
}
),
io:format(
diff --git a/src/hb_ao.erl b/src/hb_ao.erl
index 2b8226ee5..dfff9ce30 100644
--- a/src/hb_ao.erl
+++ b/src/hb_ao.erl
@@ -116,13 +116,14 @@
%% 4: Persistent-resolver lookup.
%% 5: Device lookup.
%% 6: Execution.
-%% 7: Cryptographic linking.
-%% 8: Result caching.
-%% 9: Notify waiters.
-%% 10: Fork worker.
-%% 11: Recurse or terminate.
-
-resolve(SingletonMsg, Opts) when is_map(SingletonMsg) ->
+%% 7: Execution of the `step' hook.
+%% 8: Subresolution.
+%% 9: Cryptographic linking.
+%% 10: Result caching.
+%% 11: Notify waiters.
+%% 12: Fork worker.
+%% 13: Recurse or terminate.
+resolve(SingletonMsg, Opts) ->
resolve_many(hb_singleton:from(SingletonMsg), Opts).
resolve(Msg1, Path, Opts) when not is_map(Path) ->
@@ -170,13 +171,15 @@ resolve_many(ListMsg, Opts) when is_map(ListMsg) ->
resolve_many(ListOfMessages, Opts);
resolve_many({as, DevID, Msg}, Opts) ->
subresolve(#{}, DevID, Msg, Opts);
+resolve_many([{resolve, Subres}], Opts) ->
+ resolve_many(Subres, Opts);
resolve_many(MsgList, Opts) ->
?event(ao_core, {resolve_many, MsgList}, Opts),
Res = do_resolve_many(MsgList, Opts),
?event(ao_core, {resolve_many_complete, {res, Res}, {req, MsgList}}, Opts),
Res.
do_resolve_many([Msg3], _Opts) ->
- ?event(ao_core, {stage, 11, resolve_complete, Msg3}),
+ ?event(ao_core, {stage, 13, resolve_complete, Msg3}),
{ok, Msg3};
do_resolve_many([Msg1, Msg2 | MsgList], Opts) ->
?event(ao_core, {stage, 0, resolve_many, {msg1, Msg1}, {msg2, Msg2}, {opts, Opts}}),
@@ -185,8 +188,8 @@ do_resolve_many([Msg1, Msg2 | MsgList], Opts) ->
?event(ao_core,
{
stage,
- 11,
- resolved_message_of_many,
+ 13,
+ resolved_step,
{msg3, Msg3},
{opts, Opts}
},
@@ -194,7 +197,8 @@ do_resolve_many([Msg1, Msg2 | MsgList], Opts) ->
),
do_resolve_many([Msg3 | MsgList], Opts);
Res ->
- ?event(ao_core, {stage, 11, resolve_many_terminating_early, Res}),
+ % The result is not a resolvable message. Return it.
+ ?event(ao_core, {stage, 13, resolve_many_terminating_early, Res}),
Res
end.
@@ -247,6 +251,59 @@ resolve_stage(1, RawMsg1, Msg2Outer = #{ <<"path">> := {as, DevID, Msg2Inner} },
if is_map(Msg2Inner) -> Msg2Inner; true -> #{ <<"path">> => Msg2Inner } end
),
subresolve(RawMsg1, DevID, Msg2, Opts);
+resolve_stage(1, {resolve, Subres}, Msg2, Opts) ->
+ % If the first message is a `{resolve, Subres}' tuple, we should execute it
+ % directly, then apply the request to the result.
+ ?event(ao_core, {stage, 1, subresolving_base_message, {subres, Subres}}, Opts),
+ % Unlike the `request' case for pre-subresolutions, we do not need to unset
+ % the `force_message' option, because the result should be a message, anyway.
+ % If it is not, it is more helpful to have the message placed into the `body'
+ % of a result, which can then be executed upon.
+ case resolve_many(Subres, Opts) of
+ {ok, Msg1} ->
+ ?event(ao_core, {stage, 1, subresolve_success, {new_base, Msg1}}, Opts),
+ resolve_stage(1, Msg1, Msg2, Opts);
+ OtherRes ->
+ ?event(ao_core,
+ {stage,
+ 1,
+ subresolve_failed,
+ {subres, Subres},
+ {res, OtherRes}},
+ Opts
+ ),
+ OtherRes
+ end;
+resolve_stage(1, Msg1, {resolve, Subres}, Opts) ->
+ % If the second message is a `{resolve, Subresolution}' tuple, we should
+ % execute the subresolution directly to gain the underlying `Msg2' for
+ % our execution. We assume that the subresolution is already in a normalized,
+ % executable form, so we pass it to `resolve_many' for execution.
+ ?event(ao_core, {stage, 1, subresolving_request_message, {subres, Subres}}, Opts),
+ % We make sure to unset the `force_message' option so that if the subresolution
+ % returns a literal, the rest of `resolve' will normalize it to a path.
+ case resolve_many(Subres, maps:without([force_message], Opts)) of
+ {ok, Msg2} ->
+ ?event(
+ ao_core,
+ {stage, 1, request_subresolve_success, {msg2, Msg2}},
+ Opts
+ ),
+ resolve_stage(1, Msg1, Msg2, Opts);
+ OtherRes ->
+ ?event(
+ ao_core,
+ {
+ stage,
+ 1,
+ request_subresolve_failed,
+ {subres, Subres},
+ {res, OtherRes}
+ },
+ Opts
+ ),
+ OtherRes
+ end;
resolve_stage(1, Msg1, Msg2, Opts) when is_list(Msg1) ->
% Normalize lists to numbered maps (base=1) if necessary.
?event(ao_core, {stage, 1, list_normalize}, Opts),
@@ -495,11 +552,48 @@ resolve_stage(6, Func, Msg1, Msg2, ExecName, Opts) ->
)
end,
resolve_stage(7, Msg1, Msg2, Res, ExecName, Opts);
-resolve_stage(7, Msg1, Msg2, {ok, Msg3}, ExecName, Opts) when is_map(Msg3) ->
- ?event(ao_core, {stage, 7, ExecName, generate_hashpath}, Opts),
+resolve_stage(7, Msg1, Msg2, {St, Res}, ExecName, Opts = #{ on := On = #{ <<"step">> := _ }}) ->
+ ?event(ao_core, {stage, 7, ExecName, executing_step_hook, {on, On}}, Opts),
+ % If the `step' hook is defined, we execute it. Note: This function clause
+ % matches directly on the `on' key of the `Opts' map. This is in order to
+ % remove the expensive lookup check that would otherwise be performed on every
+ % execution.
+ HookReq = #{
+ <<"base">> => Msg1,
+ <<"request">> => Msg2,
+ <<"status">> => St,
+ <<"body">> => Res
+ },
+ case dev_hook:on(<<"step">>, HookReq, Opts) of
+ {ok, #{ <<"status">> := NewStatus, <<"body">> := NewRes }} ->
+ resolve_stage(8, Msg1, Msg2, {NewStatus, NewRes}, ExecName, Opts);
+ Error ->
+ ?event(
+ ao_core,
+ {step_hook_error,
+ {error, Error},
+ {hook_req, HookReq}
+ },
+ Opts
+ ),
+ Error
+ end;
+resolve_stage(7, Msg1, Msg2, Res, ExecName, Opts) ->
+ ?event(ao_core, {stage, 7, ExecName, no_step_hook}, Opts),
+ resolve_stage(8, Msg1, Msg2, Res, ExecName, Opts);
+resolve_stage(8, Msg1, Msg2, {ok, {resolve, Sublist}}, ExecName, Opts) ->
+ ?event(ao_core, {stage, 8, ExecName, subresolve_result}, Opts),
+ % If the result is a `{resolve, Sublist}' tuple, we need to execute it
+ % as a sub-resolution.
+ resolve_stage(9, Msg1, Msg2, resolve_many(Sublist, Opts), ExecName, Opts);
+resolve_stage(8, Msg1, Msg2, Res, ExecName, Opts) ->
+ ?event(ao_core, {stage, 8, ExecName, no_subresolution_necessary}, Opts),
+ resolve_stage(9, Msg1, Msg2, Res, ExecName, Opts);
+resolve_stage(9, Msg1, Msg2, {ok, Msg3}, ExecName, Opts) when is_map(Msg3) ->
+ ?event(ao_core, {stage, 9, ExecName, generate_hashpath}, Opts),
% Cryptographic linking. Now that we have generated the result, we
% need to cryptographically link the output to its input via a hashpath.
- resolve_stage(8, Msg1, Msg2,
+ resolve_stage(10, Msg1, Msg2,
case hb_opts:get(hashpath, update, Opts#{ only => local }) of
update ->
Priv = hb_private:from_message(Msg3),
@@ -523,37 +617,37 @@ resolve_stage(7, Msg1, Msg2, {ok, Msg3}, ExecName, Opts) when is_map(Msg3) ->
ExecName,
Opts
);
-resolve_stage(7, Msg1, Msg2, {Status, Msg3}, ExecName, Opts) when is_map(Msg3) ->
- ?event(ao_core, {stage, 7, ExecName, abnormal_status_reset_hashpath}, Opts),
+resolve_stage(9, Msg1, Msg2, {Status, Msg3}, ExecName, Opts) when is_map(Msg3) ->
+ ?event(ao_core, {stage, 9, ExecName, abnormal_status_reset_hashpath}, Opts),
?event(hashpath, {resetting_hashpath_msg3, {msg1, Msg1}, {msg2, Msg2}, {opts, Opts}}),
% Skip cryptographic linking and reset the hashpath if the result is abnormal.
Priv = hb_private:from_message(Msg3),
resolve_stage(
- 8, Msg1, Msg2,
+ 10, Msg1, Msg2,
{Status, Msg3#{ <<"priv">> => maps:without([<<"hashpath">>], Priv) }},
ExecName, Opts);
-resolve_stage(7, Msg1, Msg2, Res, ExecName, Opts) ->
- ?event(ao_core, {stage, 7, ExecName, non_map_result_skipping_hash_path}, Opts),
+resolve_stage(9, Msg1, Msg2, Res, ExecName, Opts) ->
+ ?event(ao_core, {stage, 9, ExecName, non_map_result_skipping_hash_path}, Opts),
% Skip cryptographic linking and continue if we don't have a map that can have
% a hashpath at all.
- resolve_stage(8, Msg1, Msg2, Res, ExecName, Opts);
-resolve_stage(8, Msg1, Msg2, {ok, Msg3}, ExecName, Opts) ->
- ?event(ao_core, {stage, 8, ExecName, result_caching}, Opts),
+ resolve_stage(10, Msg1, Msg2, Res, ExecName, Opts);
+resolve_stage(10, Msg1, Msg2, {ok, Msg3}, ExecName, Opts) ->
+ ?event(ao_core, {stage, 10, ExecName, result_caching}, Opts),
% Result caching: Optionally, cache the result of the computation locally.
hb_cache_control:maybe_store(Msg1, Msg2, Msg3, Opts),
- resolve_stage(9, Msg1, Msg2, {ok, Msg3}, ExecName, Opts);
-resolve_stage(8, Msg1, Msg2, Res, ExecName, Opts) ->
- ?event(ao_core, {stage, 8, ExecName, abnormal_status_skip_caching}, Opts),
+ resolve_stage(11, Msg1, Msg2, {ok, Msg3}, ExecName, Opts);
+resolve_stage(10, Msg1, Msg2, Res, ExecName, Opts) ->
+ ?event(ao_core, {stage, 10, ExecName, abnormal_status_skip_caching}, Opts),
% Skip result caching if the result is abnormal.
- resolve_stage(9, Msg1, Msg2, Res, ExecName, Opts);
-resolve_stage(9, Msg1, Msg2, Res, ExecName, Opts) ->
- ?event(ao_core, {stage, 9, ExecName}, Opts),
+ resolve_stage(11, Msg1, Msg2, Res, ExecName, Opts);
+resolve_stage(11, Msg1, Msg2, Res, ExecName, Opts) ->
+ ?event(ao_core, {stage, 11, ExecName}, Opts),
% Notify processes that requested the resolution while we were executing and
% unregister ourselves from the group.
hb_persistent:unregister_notify(ExecName, Msg2, Res, Opts),
- resolve_stage(10, Msg1, Msg2, Res, ExecName, Opts);
-resolve_stage(10, _Msg1, _Msg2, {ok, Msg3} = Res, ExecName, Opts) ->
- ?event(ao_core, {stage, 10, ExecName, maybe_spawn_worker}, Opts),
+ resolve_stage(12, Msg1, Msg2, Res, ExecName, Opts);
+resolve_stage(12, _Msg1, _Msg2, {ok, Msg3} = Res, ExecName, Opts) ->
+ ?event(ao_core, {stage, 12, ExecName, maybe_spawn_worker}, Opts),
% Check if we should spawn a worker for the current execution
case {is_map(Msg3), hb_opts:get(spawn_worker, false, Opts#{ prefer => local })} of
{A, B} when (A == false) or (B == false) ->
@@ -564,8 +658,8 @@ resolve_stage(10, _Msg1, _Msg2, {ok, Msg3} = Res, ExecName, Opts) ->
hb_persistent:forward_work(WorkerPID, Opts),
Res
end;
-resolve_stage(10, _Msg1, _Msg2, OtherRes, ExecName, Opts) ->
- ?event(ao_core, {stage, 10, ExecName, abnormal_status_skip_spawning}, Opts),
+resolve_stage(12, _Msg1, _Msg2, OtherRes, ExecName, Opts) ->
+ ?event(ao_core, {stage, 12, ExecName, abnormal_status_skip_spawning}, Opts),
OtherRes.
%% @doc Execute a sub-resolution.
@@ -594,7 +688,11 @@ subresolve(RawMsg1, DevID, Req, Opts) ->
case map_size(maps:without([<<"path">>], Req)) of
0 -> Msg1b;
_ ->
- set(Msg1b, maps:without([<<"path">>], Req), Opts#{ force_message => false })
+ set(
+ Msg1b,
+ maps:without([<<"path">>], Req),
+ Opts#{ force_message => false }
+ )
end,
?event(subresolution,
{subresolve_modified_base, Msg1c},
@@ -712,6 +810,8 @@ maybe_force_message({Status, Res}, Opts) ->
force_message({Status, Res}, Opts) when is_list(Res) ->
force_message({Status, normalize_keys(Res)}, Opts);
+force_message({Status, Subres = {resolve, _}}, _Opts) ->
+ {Status, Subres};
force_message({Status, Literal}, _Opts) when not is_map(Literal) ->
?event({force_message_from_literal, Literal}),
{Status, #{ <<"ao-result">> => <<"body">>, <<"body">> => Literal }};
@@ -851,22 +951,33 @@ set(Msg1, Key, Value, Opts) ->
deep_set(Msg1, Path, Value, Opts).
%% @doc Recursively search a map, resolving keys, and set the value of the key
-%% at the given path.
+%% at the given path. This function has special cases for handling `set' calls
+%% where the path is an empty list (`/'). In this case, if the value is an
+%% immediate, non-complex term, we can set it directly. Otherwise, we use the
+%% device's `set' function to set the value.
+deep_set(Msg, [], Value, Opts) when is_map(Msg) or is_list(Msg) ->
+ device_set(Msg, <<"/">>, Value, Opts);
+deep_set(_Msg, [], Value, _Opts) ->
+ Value;
deep_set(Msg, [Key], Value, Opts) ->
- device_set(Msg, Key, Value, Opts);
+ DevRes = device_set(Msg, Key, Value, Opts),
+ ?event(debug, {deep_device_set_result, {msg, Msg}, {key, Key}, {res, DevRes}}),
+ DevRes;
deep_set(Msg, [Key|Rest], Value, Opts) ->
case resolve(Msg, Key, Opts) of
{ok, SubMsg} ->
- ?event(
+ ?event(debug,
{traversing_deeper_to_set,
{current_key, Key},
{current_value, SubMsg},
{rest, Rest}
}
),
- device_set(Msg, Key, deep_set(SubMsg, Rest, Value, Opts), Opts);
+ Res = device_set(Msg, Key, deep_set(SubMsg, Rest, Value, Opts), <<"explicit">>, Opts),
+ ?event(debug, {deep_set_result, {msg, Msg}, {key, Key}, {res, Res}}),
+ Res;
_ ->
- ?event(
+ ?event(debug,
{creating_new_map,
{current_key, Key},
{rest, Rest}
@@ -877,30 +988,43 @@ deep_set(Msg, [Key|Rest], Value, Opts) ->
%% @doc Call the device's `set' function.
device_set(Msg, Key, Value, Opts) ->
- Req =
+ device_set(Msg, Key, Value, <<"deep">>, Opts).
+device_set(Msg, Key, Value, Mode, Opts) ->
+ ReqWithoutMode =
case Key of
<<"path">> ->
#{ <<"path">> => <<"set_path">>, <<"value">> => Value };
+ <<"/">> when is_map(Value) ->
+ % The value is a map and it is to be `set' at the root of the
+ % message. Subsequently, we call the device's `set' function
+ % with all of the keys found in the message, leading it to be
+ % merged into the message.
+ Value#{ <<"path">> => <<"set">> };
_ ->
#{ <<"path">> => <<"set">>, Key => Value }
end,
+ Req =
+ case Mode of
+ <<"deep">> -> ReqWithoutMode;
+ <<"explicit">> -> ReqWithoutMode#{ <<"set-mode">> => Mode }
+ end,
?event(
- ao_internal,
+ debug,
{
calling_device_set,
{msg, Msg},
{applying_set, Req}
- },
- Opts
+ }
),
- Res = hb_util:ok(
- resolve(
- Msg,
- Req,
+ Res =
+ hb_util:ok(
+ resolve(
+ Msg,
+ Req,
+ internal_opts(Opts)
+ ),
internal_opts(Opts)
),
- internal_opts(Opts)
- ),
?event(
ao_internal,
{device_set_result, Res},
@@ -1079,7 +1203,7 @@ find_exported_function(Msg, Dev, Key, MaxArity, Opts) when is_map(Dev) ->
find_exported_function(_Msg, _Mod, _Key, Arity, _Opts) when Arity < 0 ->
not_found;
find_exported_function(Msg, Mod, Key, Arity, Opts) when not is_atom(Key) ->
- try binary_to_existing_atom(normalize_key(Key), latin1) of
+ try hb_util:key_to_atom(Key, false) of
KeyAtom -> find_exported_function(Msg, Mod, KeyAtom, Arity, Opts)
catch _:_ -> not_found
end;
diff --git a/src/hb_ao_test_vectors.erl b/src/hb_ao_test_vectors.erl
index d739b3dec..fd62382a9 100644
--- a/src/hb_ao_test_vectors.erl
+++ b/src/hb_ao_test_vectors.erl
@@ -8,7 +8,7 @@
%% `rebar3 eunit --test hb_ao_test_vectors:run_test'
%% Comment/uncomment out as necessary.
run_test() ->
- hb_test_utils:run(start_as, normal, test_suite(), test_opts()).
+ hb_test_utils:run(step_hook, normal, test_suite(), test_opts()).
%% @doc Run each test in the file with each set of options. Start and reset
%% the store for each test.
@@ -73,7 +73,9 @@ test_suite() ->
{denormalized_device_key, "denormalized device key",
fun denormalized_device_key_test/1},
{list_transform, "list transform",
- fun list_transform_test/1}
+ fun list_transform_test/1},
+ {step_hook, "step hook",
+ fun step_hook_test/1}
].
test_opts() ->
@@ -263,8 +265,9 @@ resolve_binary_key_test(Opts) ->
{ok, <<"1">>},
hb_ao:resolve(
#{
- <<"Test-Header">> => <<"1">> },
- <<"Test-Header">>,
+ <<"Test-Header">> => <<"1">>
+ },
+ <<"Test-Header">>,
Opts
)
).
@@ -767,4 +770,50 @@ continue_as_test(Opts) ->
],
Opts
)
- ).
\ No newline at end of file
+ ).
+
+step_hook_test(InitOpts) ->
+ % Test that the step hook is called correctly. We do this by sending ourselves
+ % a message each time the hook is called. We also send a `reference', such
+ % that this test is uniquely identified and further/prior tests do not affect
+ % it.
+ Self = self(),
+ Ref = make_ref(),
+ Opts =
+ InitOpts#{
+ on =>
+ #{
+ <<"step">> =>
+ #{
+ <<"device">> =>
+ #{
+ <<"step">> =>
+ fun(_, Req, _) ->
+ ?event(ao_core, {step_hook, {self(), Ref}}),
+ Self ! {step, Ref},
+ {ok, Req}
+ end
+ }
+ }
+ }
+ },
+ Msg = #{
+ <<"a">> =>
+ #{
+ <<"b">> =>
+ #{
+ <<"c">> => <<"1">>
+ }
+ }
+ },
+ % Test that the response has completed and is correct.
+ ?assertMatch(
+ {ok, <<"1">>},
+ hb_ao:resolve(
+ Msg,
+ #{ <<"path">> => <<"a/b/c">> },
+ Opts
+ )
+ ),
+ % Test that the step hook was called.
+ ?assert(receive {step, Ref} -> true after 100 -> false end).
\ No newline at end of file
diff --git a/src/hb_cache.erl b/src/hb_cache.erl
index 6bc315537..6665343c4 100644
--- a/src/hb_cache.erl
+++ b/src/hb_cache.erl
@@ -33,7 +33,7 @@ list_numbered(Path, Opts) ->
[ to_integer(Name) || Name <- list(SlotDir, Opts) ].
%% @doc List all items under a given path.
-list(Path, Opts) when is_map(Opts)->
+list(Path, Opts) when is_map(Opts) and not is_map_key(<<"store-module">>, Opts) ->
case hb_opts:get(store, no_viable_store, Opts) of
no_viable_store -> [];
Store ->
diff --git a/src/hb_cache_render.erl b/src/hb_cache_render.erl
index 800d80115..43f5f99f9 100644
--- a/src/hb_cache_render.erl
+++ b/src/hb_cache_render.erl
@@ -4,6 +4,7 @@
% Preparing data for testing
-export([prepare_unsigned_data/0, prepare_signed_data/0,
prepare_deeply_nested_complex_message/0]).
+-export([cache_path_to_graph/3, get_graph_data/1]).
-include("include/hb.hrl").
%% @doc Render the given Key into svg
@@ -200,6 +201,100 @@ collect_output(Port, Acc) ->
{error, timeout}
end.
+%% @doc Get graph data for the Three.js visualization
+get_graph_data(Opts) ->
+ % Get the store from options
+ Store = hb_opts:get(store, no_viable_store, Opts),
+
+ % Try to generate graph using hb_cache_render
+ Graph = try
+ % Use hb_cache_render to build the graph
+ {ok, Keys} = hb_store:list(Store, "/"),
+ cache_path_to_graph(Store, #{}, Keys)
+ catch
+ Error:Reason:Stack ->
+ ?event({hyperbuddy_graph_error, Error, Reason, Stack}),
+ #{nodes => #{}, arcs => #{}, visited => #{}}
+ end,
+
+ % Extract nodes and links for the visualization
+ NodesMap = maps:get(nodes, Graph, #{}),
+ ArcsMap = maps:get(arcs, Graph, #{}),
+
+ % Limit to top 500 nodes if there are too many
+ NodesList =
+ case maps:size(NodesMap) > 50000 of
+ true ->
+ % Take a subset of nodes
+ {ReducedNodes, _} = lists:split(
+ 500,
+ maps:to_list(NodesMap)
+ ),
+ ReducedNodes;
+ false ->
+ maps:to_list(NodesMap)
+ end,
+
+ % Get node IDs for filtering links
+ NodeIds = [ID || {ID, _} <- NodesList],
+
+ % Convert to JSON format for web visualization
+ Nodes = [
+ #{
+ <<"id">> => ID,
+ <<"label">> => get_label(hb_util:bin(ID)),
+ <<"type">> => get_node_type(Color)
+ }
+ || {ID, {_, Color}} <- NodesList
+ ],
+
+ % Filter links to only include those between nodes we're showing
+ FilteredLinks = [
+ {From, To, Label}
+ || {From, To, Label} <- maps:keys(ArcsMap),
+ lists:member(From, NodeIds) andalso lists:member(To, NodeIds)
+ ],
+
+ Links = [
+ #{
+ <<"source">> => From,
+ <<"target">> => To,
+ <<"label">> => Label
+ }
+ || {From, To, Label} <- FilteredLinks
+ ],
+
+ % Return the JSON data
+ JsonData = hb_json:encode(#{
+ <<"nodes">> => Nodes,
+ <<"links">> => Links
+ }),
+
+ {ok, #{
+ <<"body">> => JsonData,
+ <<"content-type">> => <<"application/json">>
+ }}.
+
+%% @doc Convert node color from hb_cache_render to node type for visualization
+get_node_type(Color) ->
+ case Color of
+ "lightblue" -> <<"simple">>;
+ "lightcoral" -> <<"composite">>;
+ _ -> <<"unknown">>
+ end.
+
+%% @doc Extract a readable label from a path
+get_label(Path) ->
+ case binary:split(Path, <<"/">>, [global]) of
+ [] -> Path;
+ Parts ->
+ FilteredParts = [P || P <- Parts, P /= <<>>],
+ case FilteredParts of
+ [] -> Path;
+ _ -> lists:last(FilteredParts)
+ end
+ end.
+
% Test data preparation functions
prepare_unsigned_data() ->
Opts = #{
diff --git a/src/hb_event.erl b/src/hb_event.erl
index ec9c07c85..f7cf99283 100644
--- a/src/hb_event.erl
+++ b/src/hb_event.erl
@@ -16,7 +16,7 @@ log(Topic, X, Mod, undefined, Line, Opts) -> log(Topic, X, Mod, "", Line, Opts);
log(Topic, X, Mod, Func, undefined, Opts) -> log(Topic, X, Mod, Func, "", Opts);
log(Topic, X, ModAtom, Func, Line, Opts) when is_atom(ModAtom) ->
% Increment by message adding Topic as label
- increment(Topic, X, Opts),
+ try increment(Topic, X, Opts) catch _:_ -> ignore_error end,
% Check if the module has the `hb_debug' attribute set to `print'.
case lists:member({hb_debug, [print]}, ModAtom:module_info(attributes)) of
true -> hb_util:debug_print(X, atom_to_list(ModAtom), Func, Line);
@@ -24,38 +24,43 @@ log(Topic, X, ModAtom, Func, Line, Opts) when is_atom(ModAtom) ->
% Check if the module has the `hb_debug' attribute set to `no_print'.
case lists:keyfind(hb_debug, 1, ModAtom:module_info(attributes)) of
{hb_debug, [no_print]} -> X;
- _ -> log(Topic, X, atom_to_list(ModAtom), Func, Line, Opts)
+ _ -> log(Topic, X, hb_util:bin(ModAtom), Func, Line, Opts)
end
end;
-log(Topic, X, ModStr, Func, Line, Opts) ->
+log(Topic, X, Mod, Func, Line, Opts) ->
% Check if the debug_print option has the topic in it if set.
- case hb_opts:get(debug_print, false, Opts) of
- ModList when is_list(ModList) ->
- case lists:member(ModStr, ModList)
- orelse lists:member(atom_to_list(Topic), ModList)
+ case Printable = hb_opts:get(debug_print, false, Opts) of
+ EventList when is_list(EventList) ->
+ case lists:member(Mod, EventList)
+ orelse lists:member(hb_util:bin(Topic), EventList)
of
- true -> hb_util:debug_print(X, ModStr, Func, Line);
+ true -> hb_util:debug_print(X, Mod, Func, Line);
false -> X
end;
- true -> hb_util:debug_print(X, ModStr, Func, Line);
+ true -> hb_util:debug_print(X, Mod, Func, Line);
false -> X
end,
- handle_tracer(Topic, X, Opts).
+ handle_tracer(Topic, X, Opts),
+ % Return the logged value to the caller. This allows callers to insert
+ % `?event(...)' macros into the flow of other executions, without having to
+ % break functional style.
+ X.
handle_tracer(Topic, X, Opts) ->
AllowedTopics = [http, ao_core, ao_result],
case lists:member(Topic, AllowedTopics) of
true ->
- case maps:get(trace, Opts, undefined) of
+ case hb_opts:get(trace, undefined, Opts) of
undefined ->
case tuple_to_list(X) of
[_ | Rest] ->
try
Map = maps:from_list(Rest),
- TopicOpts = maps:get(opts, Map, #{}),
- case maps:get(trace, TopicOpts, undefined) of
+ TopicOpts = hb_opts:get(opts, #{}, Map),
+ case hb_opts:get(trace, undefined, TopicOpts) of
undefined -> ok;
- TracePID -> hb_tracer:record_step(TracePID, {Topic, X})
+ TracePID ->
+ hb_tracer:record_step(TracePID, {Topic, X})
end
catch
_:_ -> ok
@@ -140,4 +145,5 @@ parse_name(Name) when is_atom(Name) ->
parse_name(Name) when is_binary(Name) ->
Name;
parse_name(Name) when is_list(Name) ->
- iolist_to_binary(Name).
+ iolist_to_binary(Name);
+parse_name(_) -> no_event_name.
\ No newline at end of file
diff --git a/src/hb_examples.erl b/src/hb_examples.erl
index 933021fcb..93fd2680f 100644
--- a/src/hb_examples.erl
+++ b/src/hb_examples.erl
@@ -21,15 +21,17 @@ relay_with_payments_test() ->
ProcessorMsg =
#{
<<"device">> => <<"p4@1.0">>,
- <<"ledger_device">> => <<"simple-pay@1.0">>,
- <<"pricing_device">> => <<"simple-pay@1.0">>
+ <<"ledger-device">> => <<"simple-pay@1.0">>,
+ <<"pricing-device">> => <<"simple-pay@1.0">>
},
HostNode =
hb_http_server:start_node(
#{
operator => ar_wallet:to_address(HostWallet),
- preprocessor => ProcessorMsg,
- postprocessor => ProcessorMsg
+ on => #{
+ <<"request">> => ProcessorMsg,
+ <<"response">> => ProcessorMsg
+ }
}
),
% Create a message for the client to relay.
@@ -72,8 +74,8 @@ paid_wasm_test() ->
ProcessorMsg =
#{
<<"device">> => <<"p4@1.0">>,
- <<"ledger_device">> => <<"simple-pay@1.0">>,
- <<"pricing_device">> => <<"simple-pay@1.0">>
+ <<"ledger-device">> => <<"simple-pay@1.0">>,
+ <<"pricing-device">> => <<"simple-pay@1.0">>
},
HostNode =
hb_http_server:start_node(
@@ -81,8 +83,10 @@ paid_wasm_test() ->
simple_pay_ledger => #{ ClientAddress => 100 },
simple_pay_price => 10,
operator => ar_wallet:to_address(HostWallet),
- preprocessor => ProcessorMsg,
- postprocessor => ProcessorMsg
+ on => #{
+ <<"request">> => ProcessorMsg,
+ <<"response">> => ProcessorMsg
+ }
}
),
% Read the WASM file from disk, post it to the host and execute it.
@@ -110,7 +114,7 @@ paid_wasm_test() ->
ClientWallet
),
{ok, Res2} = hb_http:get(HostNode, ClientMessage2, #{}),
- ?assertMatch(40, Res2).
+ ?assertMatch(60, Res2).
create_schedule_aos2_test_disabled() ->
% The legacy process format, according to the ao.tn.1 spec:
diff --git a/src/hb_gateway_client.erl b/src/hb_gateway_client.erl
index 354d3abde..8a8660bbc 100644
--- a/src/hb_gateway_client.erl
+++ b/src/hb_gateway_client.erl
@@ -33,7 +33,26 @@
%% ar: String!
%% }
read(ID, Opts) ->
- Query =
+ Query = case maps:is_key(<<"subindex">>, Opts) of
+ true ->
+ Tags = subindex_to_tags(maps:get(<<"subindex">>, Opts)),
+ #{
+ <<"query">> =>
+ <<
+ "query($transactionIds: [ID!]!) { ",
+ "transactions(ids: $transactionIds,",
+ "tags: ", (Tags)/binary , ",",
+ "first: 1){ ",
+ "edges { ", (item_spec())/binary , " } ",
+ "} ",
+ "} "
+ >>,
+ <<"variables">> =>
+ #{
+ <<"transactionIds">> => [hb_util:human_id(ID)]
+ }
+ };
+ false ->
#{
<<"query">> =>
<<
@@ -47,7 +66,8 @@ read(ID, Opts) ->
#{
<<"transactionIds">> => [hb_util:human_id(ID)]
}
- },
+ }
+ end,
case query(Query, Opts) of
{error, Reason} -> {error, Reason};
{ok, GqlMsg} ->
@@ -176,13 +196,19 @@ result_to_message(ExpectedID, Item, Opts) ->
?event(gateway, {data, {id, ExpectedID}, {data, Data}, {item, Item}}, Opts),
% Convert the response to an ANS-104 message.
Tags = hb_ao:get(<<"tags">>, Item, GQLOpts),
+ Signature = hb_util:decode(hb_ao:get(<<"signature">>, Item, GQLOpts)),
+ SignatureType = case byte_size(Signature) of
+ 65 -> {ecdsa, 256};
+ 512 -> {rsa, 65537};
+ _ -> unsupported_tx_signature_type
+ end,
TX =
#tx {
format = ans104,
id = hb_util:decode(ExpectedID),
last_tx = normalize_null(hb_ao:get(<<"anchor">>, Item, GQLOpts)),
- signature =
- hb_util:decode(hb_ao:get(<<"signature">>, Item, GQLOpts)),
+ signature = Signature,
+ signature_type = SignatureType,
target =
decode_or_null(
hb_ao:get_first(
@@ -267,6 +293,28 @@ decode_or_null(Bin) when is_binary(Bin) ->
decode_or_null(_) ->
<<>>.
+%% @doc Takes a list of messages with `name' and `value' fields, and formats
+%% them as a GraphQL `tags' argument.
+subindex_to_tags(Subindex) ->
+ Formatted =
+ lists:map(
+ fun(Spec) ->
+ io_lib:format(
+ "{ name: \"~s\", values: [\"~s\"]}",
+ [
+ hb_ao:get(<<"name">>, Spec),
+ hb_ao:get(<<"value">>, Spec)
+ ]
+ )
+ end,
+ hb_util:message_to_ordered_list(Subindex)
+ ),
+ ListInner =
+ hb_util:bin(
+ string:join([lists:flatten(E) || E <- Formatted], ", ")
+ ),
+ <<"[", ListInner/binary, "]">>.
+
%%% Tests
ans104_no_data_item_test() ->
% Start a random node so that all of the services come up.
@@ -286,3 +334,27 @@ scheduler_location_test() ->
?event(gateway, {scheduler_location, {explicit, hb_ao:get(<<"url">>, Res, #{})}}),
% Will need updating when Legacynet terminates.
?assertEqual(<<"https://su-router.ao-testnet.xyz">>, hb_ao:get(<<"url">>, Res, #{})).
+
+%% @doc Test l1 message from graphql
+l1_transaction_test() ->
+ _Node = hb_http_server:start_node(#{}),
+ {ok, Res} = read(<<"uJBApOt4ma3pTfY6Z4xmknz5vAasup4KcGX7FJ0Of8w">>, #{}),
+ ?event(gateway, {l1_transaction, Res}),
+ Data = maps:get(<<"data">>, Res),
+ ?assertEqual(<<"Hello World">>, Data).
+
+%% @doc Test l2 message from graphql
+l2_dataitem_test() ->
+ _Node = hb_http_server:start_node(#{}),
+ {ok, Res} = read(<<"oyo3_hCczcU7uYhfByFZ3h0ELfeMMzNacT-KpRoJK6g">>, #{}),
+ ?event(gateway, {l2_dataitem, Res}),
+ Data = maps:get(<<"data">>, Res),
+ ?assertEqual(<<"Hello World">>, Data).
+
+%% @doc Test optimistic index
+ao_dataitem_test() ->
+ _Node = hb_http_server:start_node(#{}),
+ {ok, Res} = read(<<"oyo3_hCczcU7uYhfByFZ3h0ELfeMMzNacT-KpRoJK6g">>, #{ }),
+ ?event(gateway, {l2_dataitem, Res}),
+ Data = maps:get(<<"data">>, Res),
+ ?assertEqual(<<"Hello World">>, Data).
\ No newline at end of file
diff --git a/src/hb_http.erl b/src/hb_http.erl
index 42cbda2c0..a6f1edf1f 100644
--- a/src/hb_http.erl
+++ b/src/hb_http.erl
@@ -137,7 +137,19 @@ request(Method, Peer, Path, RawMessage, Opts) ->
Msg = http_response_to_httpsig(Status, NormHeaderMap, Body, Opts),
?event(http_outbound, {result_is_single_key, {key, Key}, {msg, Msg}}, Opts),
case maps:get(Key, Msg, undefined) of
- undefined -> {failure, result_key_not_found};
+ undefined ->
+ {failure,
+ <<
+ "Result key '",
+ Key/binary,
+ "' not found in response from '",
+ Peer/binary,
+ "' for path '",
+ Path/binary,
+ "': ",
+ Body/binary
+ >>
+ };
Value -> {BaseStatus, Value}
end;
undefined ->
@@ -264,6 +276,11 @@ prepare_request(Format, Method, Peer, Path, RawMessage, Opts) ->
ar_bundles:serialize(
hb_message:convert(Message, <<"ans104@1.0">>, Opts)
)
+ };
+ _ ->
+ ReqBase#{
+ headers => maps:without([<<"body">>], Message),
+ body => maps:get(<<"body">>, Message, <<>>)
}
end.
@@ -661,6 +678,27 @@ req_to_tabm_singleton(Req, Body, Opts) ->
maybe_add_unsigned(Req, ANS104, Opts);
false ->
throw({invalid_ans104_signature, Item})
+ end;
+ Codec ->
+ % Assume that the codec stores the encoded message in the `body' field.
+ Decoded =
+ hb_message:convert(
+ Body,
+ <<"structured@1.0">>,
+ Codec,
+ Opts
+ ),
+ ?event(debug,
+ {verifying_encoded_message,
+ {body, {string, Body}},
+ {decoded, Decoded}
+ }
+ ),
+ case hb_message:verify(Decoded, all) of
+ true ->
+ maybe_add_unsigned(Req, Decoded, Opts);
+ false ->
+ throw({invalid_signature, Decoded})
end
end.
@@ -845,4 +883,72 @@ ans104_wasm_test() ->
?event({msg, Msg}),
{ok, Res} = post(URL, Msg, #{}),
?event({res, Res}),
- ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, #{})).
\ No newline at end of file
+ ?assertEqual(6.0, hb_ao:get(<<"output/1">>, Res, #{})).
+
+send_large_signed_request_test() ->
+ % Note: If the signature scheme ever changes, we will need to do
+ % `hb_message:commit(hb_message:uncommitted(Req), #{})' to get a freshly
+ % signed request.
+ {ok, [Req]} = file:consult(<<"test/large-message.eterm">>),
+ % Get the short trace length from the node message in the large, stored
+ % request.
+ ?event({request_message, Req}),
+ ?assertMatch(
+ {ok, 5},
+ post(
+ hb_http_server:start_node(),
+ <<"/node-message/short_trace_len">>,
+ Req,
+ #{ http_client => httpc }
+ )
+ ).
+
+send_encoded_node_message_test(Config, Codec) ->
+ NodeURL = hb_http_server:start_node(
+ #{
+ priv_wallet => ar_wallet:new(),
+ operator => <<"unclaimed">>
+ }
+ ),
+ {ok, Res} =
+ post(
+ NodeURL,
+ <<"/~meta@1.0/info">>,
+ #{
+ <<"codec-device">> => Codec,
+ <<"body">> => Config
+ },
+ #{}
+ ),
+ ?event(debug, {res, Res}),
+ ?assertEqual(
+ {ok, <<"b">>},
+ hb_http:get(
+ NodeURL,
+ <<"/~meta@1.0/info/test_optionb">>,
+ #{}
+ )
+ ),
+ ?assertEqual(
+ {ok, <<"c">>},
+ hb_http:get(
+ NodeURL,
+ <<"/~meta@1.0/info/test_deep/c">>,
+ #{}
+ )
+ ).
+
+send_flat_encoded_node_message_test() ->
+ send_encoded_node_message_test(
+ <<"test_option: a\ntest_optionb: b\ntest_deep/c: c">>,
+ <<"flat@1.0">>
+ ).
+
+send_json_encoded_node_message_test() ->
+ send_encoded_node_message_test(
+ <<
+ "{\"test_option\": \"a\", \"test_optionb\": \"b\", \"test_deep\": "
+ "{\"c\": \"c\"}}"
+ >>,
+ <<"json@1.0">>
+ ).
\ No newline at end of file
diff --git a/src/hb_http_client.erl b/src/hb_http_client.erl
index b79b35724..eda7482ed 100644
--- a/src/hb_http_client.erl
+++ b/src/hb_http_client.erl
@@ -64,8 +64,10 @@ httpc_req(Args, _, Opts) ->
end,
?event(http, {httpc_req, Method, URL, Request}),
HTTPCOpts = [{full_result, true}, {body_format, binary}],
+ StartTime = os:system_time(millisecond),
case httpc:request(Method, Request, [], HTTPCOpts) of
{ok, {{_, Status, _}, RawRespHeaders, RespBody}} ->
+ EndTime = os:system_time(millisecond),
RespHeaders =
[
{list_to_binary(Key), list_to_binary(Value)}
@@ -73,6 +75,14 @@ httpc_req(Args, _, Opts) ->
{Key, Value} <- RawRespHeaders
],
?event(http, {httpc_resp, Status, RespHeaders, RespBody}),
+ record_duration(#{
+ <<"request-method">> => method_to_bin(Method),
+ <<"request-path">> => hb_util:bin(Path),
+ <<"status-class">> => get_status_class(Status),
+ <<"duration">> => EndTime - StartTime
+ },
+ Opts
+ ),
{ok, Status, RespHeaders, RespBody};
{error, Reason} ->
?event(http, {httpc_error, Reason}),
@@ -80,7 +90,7 @@ httpc_req(Args, _, Opts) ->
end.
gun_req(Args, ReestablishedConnection, Opts) ->
- StartTime = erlang:monotonic_time(),
+ StartTime = os:system_time(millisecond),
#{ peer := Peer, path := Path, method := Method } = Args,
Response =
case catch gen_server:call(?MODULE, {get_connection, Args, Opts}, infinity) of
@@ -103,23 +113,96 @@ gun_req(Args, ReestablishedConnection, Opts) ->
Error ->
Error
end,
- EndTime = erlang:monotonic_time(),
+ EndTime = os:system_time(millisecond),
%% Only log the metric for the top-level call to req/2 - not the recursive call
%% that happens when the connection is reestablished.
case ReestablishedConnection of
true ->
ok;
false ->
- case application:get_application(prometheus) of
- undefined -> ok;
- _ -> prometheus_histogram:observe(http_request_duration_seconds, [
- method_to_list(Method),
- Path,
- get_status_class(Response)
- ], EndTime - StartTime)
- end
+ record_duration(#{
+ <<"request-method">> => method_to_bin(Method),
+ <<"request-path">> => hb_util:bin(Path),
+ <<"status-class">> => get_status_class(Response),
+ <<"duration">> => EndTime - StartTime
+ },
+ Opts
+ )
end,
Response.
+
+%% @doc Record the duration of the request in an async process. We write the
+%% data to prometheus if the application is enabled, as well as invoking the
+%% `http_monitor' if appropriate.
+record_duration(Details, Opts) ->
+ spawn(
+ fun() ->
+ % First, write to prometheus if it is enabled. Prometheus works
+ % only with strings as lists, so we encode the data before granting
+ % it.
+ GetFormat = fun(Key) -> hb_util:list(maps:get(Key, Details)) end,
+ case application:get_application(prometheus) of
+ undefined -> ok;
+ _ ->
+ prometheus_histogram:observe(
+ http_request_duration_seconds,
+ lists:map(
+ GetFormat,
+ [
+ <<"request-method">>,
+ <<"request-path">>,
+ <<"status-class">>
+ ]
+ ),
+ maps:get(<<"duration">>, Details)
+ )
+ end,
+ maybe_invoke_monitor(
+ Details#{ <<"path">> => <<"duration">> },
+ Opts
+ )
+ end
+ ).
+
+%% @doc Invoke the HTTP monitor message with AO-Core, if it is set in the
+%% node message key. We invoke the given message with the `body' set to a signed
+%% version of the details. This allows node operators to configure their machine
+%% to record duration statistics into customized data stores, computations, or
+%% processes etc. Additionally, we include the `http_reference' value, if set in
+%% the given `opts'.
+%%
+%% We use `hb_ao:get' rather than `hb_opts:get', as settings configured
+%% by the `~router@1.0' route `opts' key are unable to generate atoms.
+maybe_invoke_monitor(Details, Opts) ->
+ case hb_ao:get(<<"http_monitor">>, Opts, Opts) of
+ not_found -> ok;
+ Monitor ->
+ % We have a monitor message. Place the `details' into the body, set
+ % the `method' to "POST", add the `http_reference' (if applicable)
+ % and sign the request. We use the node message's wallet as the
+ % source of the key.
+ MaybeWithReference =
+ case hb_ao:get(<<"http_reference">>, Opts, Opts) of
+ not_found -> Details;
+ Ref -> Details#{ <<"reference">> => Ref }
+ end,
+ Req =
+ Monitor#{
+ <<"body">> =>
+ hb_message:commit(
+ MaybeWithReference#{
+ <<"method">> => <<"POST">>
+ },
+ Opts
+ )
+ },
+ % Use the singleton parse to generate the message sequence to
+ % execute.
+ ReqMsgs = hb_singleton:from(Req),
+ Res = hb_ao:resolve_many(ReqMsgs, Opts),
+ ?event(http_monitor, {resolved_monitor, Res})
+ end.
+
%%% ==================================================================
%%% gen_server callbacks.
%%% ==================================================================
@@ -448,33 +531,33 @@ reply_error([PendingRequest | PendingRequests], Reason) ->
record_response_status(Method, Path, Response) ->
inc_prometheus_counter(gun_requests_total,
[
- method_to_list(Method),
+ hb_util:list(method_to_bin(Method)),
Path,
- get_status_class(Response)
+ hb_util:list(get_status_class(Response))
],
1
).
-method_to_list(get) ->
- "GET";
-method_to_list(post) ->
- "POST";
-method_to_list(put) ->
- "PUT";
-method_to_list(head) ->
- "HEAD";
-method_to_list(delete) ->
- "DELETE";
-method_to_list(connect) ->
- "CONNECT";
-method_to_list(options) ->
- "OPTIONS";
-method_to_list(trace) ->
- "TRACE";
-method_to_list(patch) ->
- "PATCH";
-method_to_list(_) ->
- "unknown".
+method_to_bin(get) ->
+ <<"GET">>;
+method_to_bin(post) ->
+ <<"POST">>;
+method_to_bin(put) ->
+ <<"PUT">>;
+method_to_bin(head) ->
+ <<"HEAD">>;
+method_to_bin(delete) ->
+ <<"DELETE">>;
+method_to_bin(connect) ->
+ <<"CONNECT">>;
+method_to_bin(options) ->
+ <<"OPTIONS">>;
+method_to_bin(trace) ->
+ <<"TRACE">>;
+method_to_bin(patch) ->
+ <<"PATCH">>;
+method_to_bin(_) ->
+ <<"unknown">>.
request(PID, Args, Opts) ->
Timer =
@@ -590,37 +673,37 @@ upload_metric(_) ->
get_status_class({ok, {{Status, _}, _, _, _, _}}) ->
get_status_class(Status);
get_status_class({error, connection_closed}) ->
- "connection_closed";
+ <<"connection_closed">>;
get_status_class({error, connect_timeout}) ->
- "connect_timeout";
+ <<"connect_timeout">>;
get_status_class({error, timeout}) ->
- "timeout";
+ <<"timeout">>;
get_status_class({error,{shutdown,timeout}}) ->
- "shutdown_timeout";
+ <<"shutdown_timeout">>;
get_status_class({error, econnrefused}) ->
- "econnrefused";
+ <<"econnrefused">>;
get_status_class({error, {shutdown,econnrefused}}) ->
- "shutdown_econnrefused";
+ <<"shutdown_econnrefused">>;
get_status_class({error, {shutdown,ehostunreach}}) ->
- "shutdown_ehostunreach";
+ <<"shutdown_ehostunreach">>;
get_status_class({error, {shutdown,normal}}) ->
- "shutdown_normal";
+ <<"shutdown_normal">>;
get_status_class({error, {closed,_}}) ->
- "closed";
+ <<"closed">>;
get_status_class({error, noproc}) ->
- "noproc";
+ <<"noproc">>;
get_status_class(208) ->
- "already_processed";
+ <<"already_processed">>;
get_status_class(Data) when is_integer(Data), Data > 0 ->
- prometheus_http:status_class(Data);
+ hb_util:bin(prometheus_http:status_class(Data));
get_status_class(Data) when is_binary(Data) ->
case catch binary_to_integer(Data) of
{_, _} ->
- "unknown";
+ <<"unknown">>;
Status ->
get_status_class(Status)
end;
get_status_class(Data) when is_atom(Data) ->
- atom_to_list(Data);
+ atom_to_binary(Data);
get_status_class(_) ->
- "unknown".
\ No newline at end of file
+ <<"unknown">>.
\ No newline at end of file
diff --git a/src/hb_http_server.erl b/src/hb_http_server.erl
index b9fb36606..51f26979b 100644
--- a/src/hb_http_server.erl
+++ b/src/hb_http_server.erl
@@ -10,7 +10,7 @@
%%% such that changing it on start of the router server allows for
%%% the execution parameters of all downstream requests to be controlled.
-module(hb_http_server).
--export([start/0, start/1, allowed_methods/2, init/2, set_opts/1, get_opts/1]).
+-export([start/0, start/1, allowed_methods/2, init/2, set_opts/1, set_opts/2, get_opts/1]).
-export([start_node/0, start_node/1, set_default_opts/1]).
-include_lib("eunit/include/eunit.hrl").
-include("include/hb.hrl").
@@ -86,7 +86,6 @@ start() ->
FormattedConfig
]
),
-
start(
Loaded#{
priv_wallet => PrivWallet,
@@ -111,12 +110,34 @@ start(Opts) ->
{ok, Listener, _Port} = new_server(BaseOpts),
{ok, Listener}.
+%% @doc Trigger the creation of a new HTTP server node. Accepts a `NodeMsg'
+%% message, which is used to configure the server. This function executed the
+%% `start' hook on the node, giving it the opportunity to modify the `NodeMsg'
+%% before it is used to configure the server. The `start' hook expects gives and
+%% expects the node message to be in the `body' key.
new_server(RawNodeMsg) ->
- NodeMsg =
+ RawNodeMsgWithDefaults =
maps:merge(
hb_opts:default_message(),
RawNodeMsg#{ only => local }
),
+ HookMsg = #{ <<"body">> => RawNodeMsgWithDefaults },
+ NodeMsg =
+ case dev_hook:on(<<"start">>, HookMsg, RawNodeMsgWithDefaults) of
+ {ok, #{ <<"body">> := NodeMsgAfterHook }} -> NodeMsgAfterHook;
+ Unexpected ->
+ ?event(http,
+ {failed_to_start_server,
+ {unexpected_hook_result, Unexpected}
+ }
+ ),
+ throw(
+ {failed_to_start_server,
+ {unexpected_hook_result, Unexpected}
+ }
+ )
+ end,
+ % Put server ID into node message so it's possible to update current server
hb_http:start(),
ServerID =
hb_util:human_id(
@@ -291,8 +312,8 @@ handle_request(RawReq, Body, ServerID) ->
StartTime = os:system_time(millisecond),
Req = RawReq#{ start_time => StartTime },
NodeMsg = get_opts(#{ http_server => ServerID }),
- case cowboy_req:path(RawReq) of
- <<"/">> ->
+ case {cowboy_req:path(RawReq), cowboy_req:qs(RawReq)} of
+ {<<"/">>, <<>>} ->
% If the request is for the root path, serve a redirect to the default
% request of the node.
cowboy_req:reply(
@@ -317,54 +338,50 @@ handle_request(RawReq, Body, ServerID) ->
% The request is of normal AO-Core form, so we parse it and invoke
% the meta@1.0 device to handle it.
?event(http, {http_inbound, {cowboy_req, Req}, {body, {string, Body}}}),
- TracePID = hb_tracer:start_trace(),
+ TracePID = hb_tracer:start_trace(),
% Parse the HTTP request into HyerBEAM's message format.
- try
- ReqSingleton = hb_http:req_to_tabm_singleton(Req, Body, NodeMsg),
- CommitmentCodec = hb_http:accept_to_codec(ReqSingleton, NodeMsg),
- ?event(http,
+ try
+ ReqSingleton = hb_http:req_to_tabm_singleton(Req, Body, NodeMsg),
+ CommitmentCodec = hb_http:accept_to_codec(ReqSingleton, NodeMsg),
+ ?event(http,
{parsed_singleton,
{req_singleton, ReqSingleton},
{accept_codec, CommitmentCodec}},
#{trace => TracePID}
),
- % hb_tracer:record_step(TracePID, request_parsing),
- % Invoke the meta@1.0 device to handle the request.
- {ok, Res} =
- dev_meta:handle(
- NodeMsg#{
+ % hb_tracer:record_step(TracePID, request_parsing),
+ % Invoke the meta@1.0 device to handle the request.
+ {ok, Res} =
+ dev_meta:handle(
+ NodeMsg#{
commitment_device => CommitmentCodec,
trace => TracePID
},
- ReqSingleton
- ),
- hb_http:reply(Req, ReqSingleton, Res, NodeMsg)
- catch
- throw:_ ->
- Trace = hb_tracer:get_trace(TracePID),
- TraceString = hb_tracer:format_error_trace(Trace),
- hb_http:reply(
- Req,
- #{},
- #{
- <<"status">> => 500,
- <<"body">> => list_to_binary(TraceString)
- },
- NodeMsg
- );
- error:_ ->
- Trace = hb_tracer:get_trace(TracePID),
- TraceString = hb_tracer:format_error_trace(Trace),
- hb_http:reply(
+ ReqSingleton
+ ),
+ hb_http:reply(Req, ReqSingleton, Res, NodeMsg)
+ catch
+ Type:Details:Stacktrace ->
+ Trace = hb_tracer:get_trace(TracePID),
+ TraceString = hb_tracer:format_error_trace(Trace),
+ ?event(
+ http_error,
+ {http_error,
+ {type, Type},
+ {details, Details},
+ {stacktrace, Stacktrace}
+ }
+ ),
+ hb_http:reply(
Req,
#{},
#{
<<"status">> => 500,
- <<"body">> => list_to_binary(TraceString)
+ <<"body">> => TraceString
},
NodeMsg
)
- end
+ end
end.
handle_logs_request(Req, QS, _NodeMsg) ->
@@ -485,8 +502,10 @@ allowed_methods(Req, State) ->
State
}.
-%% @doc Update the `Opts' map that the HTTP server uses for all future
-%% requests.
+%% @doc Merges the provided `Opts' with uncommitted values from `Request',
+%% preserves the http_server value, and updates node_history by prepending
+%% the `Request'. If a server reference exists, updates the Cowboy environment
+%% variable 'node_msg' with the resulting options map.
set_opts(Opts) ->
case hb_opts:get(http_server, no_server_ref, Opts) of
no_server_ref ->
@@ -494,6 +513,20 @@ set_opts(Opts) ->
ServerRef ->
ok = cowboy:set_env(ServerRef, node_msg, Opts)
end.
+set_opts(Request, Opts) ->
+ MergedOpts =
+ maps:merge(
+ Opts,
+ hb_opts:mimic_default_types(
+ hb_message:uncommitted(Request),
+ new_atoms
+ )
+ ),
+ FinalOpts = MergedOpts#{
+ http_server => hb_opts:get(http_server, no_server, Opts),
+ node_history => [Request | hb_opts:get(node_history, [], Opts)]
+ },
+ {set_opts(FinalOpts), FinalOpts}.
get_opts(NodeMsg) ->
ServerRef = hb_opts:get(http_server, no_server_ref, NodeMsg),
@@ -557,5 +590,34 @@ start_node(Opts) ->
hb_sup:start_link(Opts),
ServerOpts = set_default_opts(Opts),
{ok, _Listener, Port} = new_server(ServerOpts),
- Host = hb_opts:get(host, <<"localhost">>, ServerOpts),
- <<"http://", Host/binary, ":", (integer_to_binary(Port))/binary, "/">>.
\ No newline at end of file
+ <<"http://localhost:", (integer_to_binary(Port))/binary, "/">>.
+
+%%% Tests
+%%% The following only covering the HTTP server initialization process. For tests
+%%% of HTTP server requests/responses, see `hb_http.erl'.
+
+%% @doc Ensure that the `start' hook can be used to modify the node options. We
+%% do this by creating a message with a device that has a `start' key. This
+%% key takes the message's body (the anticipated node options) and returns a
+%% modified version of that body, which will be used to configure the node. We
+%% then check that the node options were modified as we expected.
+set_node_opts_test() ->
+ Node =
+ start_node(#{
+ on => #{
+ <<"start">> => #{
+ <<"device">> =>
+ #{
+ <<"start">> =>
+ fun(_, #{ <<"body">> := NodeMsg }, _) ->
+ {ok, #{
+ <<"body">> =>
+ NodeMsg#{ <<"test-success">> => true }
+ }}
+ end
+ }
+ }
+ }
+ }),
+ {ok, LiveOpts} = hb_http:get(Node, <<"/~meta@1.0/info">>, #{}),
+ ?assert(hb_ao:get(<<"test-success">>, LiveOpts, false, #{})).
diff --git a/src/hb_keccak.erl b/src/hb_keccak.erl
new file mode 100644
index 000000000..8d4048c1f
--- /dev/null
+++ b/src/hb_keccak.erl
@@ -0,0 +1,86 @@
+-module(hb_keccak).
+-export([sha3_256/1]).
+-export([keccak_256/1]).
+-export([key_to_ethereum_address/1]).
+-include_lib("eunit/include/eunit.hrl").
+
+-on_load(init/0).
+
+-define(APPNAME, keccak).
+-define(LIBNAME, keccak_nif).
+
+%% NIF Initialization
+init() ->
+ SoName = filename:join([code:priv_dir(hb), "hb_keccak"]),
+ erlang:load_nif(SoName, 0).
+
+sha3_256(_Bin) ->
+ erlang:nif_error(not_loaded).
+
+keccak_256(_Bin) ->
+ erlang:nif_error(not_loaded).
+
+to_hex(Bin) when is_binary(Bin) ->
+ binary:encode_hex(Bin).
+
+key_to_ethereum_address(Key) when is_binary(Key) ->
+ <<_Prefix: 1/binary, NoCompressionByte/binary>> = Key,
+ Prefix = hb_util:to_hex(hb_keccak:keccak_256(NoCompressionByte)),
+ Last40 = binary:part(Prefix, byte_size(Prefix) - 40, 40),
+
+ Hash = hb_keccak:keccak_256(Last40),
+ HashHex = hb_util:to_hex(Hash),
+
+ ChecksumAddress = hash_to_checksum_address(Last40, HashHex),
+ ChecksumAddress.
+
+hash_to_checksum_address(Last40, Hash) when
+ is_binary(Last40),
+ is_binary(Hash),
+ byte_size(Last40) =:= 40 ->
+
+ Checksummed = lists:zip(binary:bin_to_list(Last40), binary:bin_to_list(binary:part(Hash, 0, 40))),
+ Formatted = lists:map(fun({Char, H}) ->
+ case H >= $8 of
+ true -> string:to_upper([Char]);
+ false -> [Char]
+ end
+ end, Checksummed),
+ <<"0x", (list_to_binary(lists:append(Formatted)))/binary>>.
+
+%% Test functions
+keccak_256_test() ->
+ Input = <<"testing">>,
+ Expected = <<"5F16F4C7F149AC4F9510D9CF8CF384038AD348B3BCDC01915F95DE12DF9D1B02">>,
+ Actual = to_hex(hb_keccak:keccak_256(Input)),
+ ?assertEqual(Expected, Actual).
+
+keccak_256_key_test() ->
+ Input = <<"BAoixXds4JhW42pzlLb83B3-I21lX78j3Q7cPaoFiCjMgjYwYLDj-xL132J147ifZFwRBmzmEMC8eYAXzbRNWuA">>,
+ BinaryInput = hb_util:decode(Input),
+ <<_Prefix: 1/binary, NoCompressionByte/binary>> = BinaryInput,
+
+ Prefix = hb_keccak:keccak_256(NoCompressionByte),
+ PrefixHex = hb_util:to_hex(Prefix),
+ ?assertEqual(PrefixHex, <<"12f9afe6abd38444cab38e8cb7b4360f7f6298de2e7a11009270f35f189bd77e">>),
+
+ Last40 = binary:part(PrefixHex, byte_size(PrefixHex) - 40, 40),
+ ?assertEqual(Last40, <<"b7b4360f7f6298de2e7a11009270f35f189bd77e">>),
+
+ Hash = hb_keccak:keccak_256(Last40),
+ HashHex = hb_util:to_hex(Hash),
+
+ ChecksumAddress = hash_to_checksum_address(Last40, HashHex),
+ ?assertEqual(ChecksumAddress, <<"0xb7B4360F7F6298dE2e7a11009270F35F189Bd77E">>).
+
+keccak_256_key_to_address_test() ->
+ Input = <<"BAoixXds4JhW42pzlLb83B3-I21lX78j3Q7cPaoFiCjMgjYwYLDj-xL132J147ifZFwRBmzmEMC8eYAXzbRNWuA">>,
+ ChecksumAddress = key_to_ethereum_address(hb_util:decode(Input)),
+ ?assertEqual(ChecksumAddress, <<"0xb7B4360F7F6298dE2e7a11009270F35F189Bd77E">>).
+
+sha3_256_test() ->
+ %% "abc" => known SHA3-256 hash from NIST
+ Input = <<"testing">>,
+ Expected = <<"7F5979FB78F082E8B1C676635DB8795C4AC6FABA03525FB708CB5FD68FD40C5E">>,
+ Actual = to_hex(hb_keccak:sha3_256(Input)),
+ ?assertEqual(Expected, Actual).
diff --git a/src/hb_message.erl b/src/hb_message.erl
index 71d54ae41..71f6f2052 100644
--- a/src/hb_message.erl
+++ b/src/hb_message.erl
@@ -787,6 +787,7 @@ match_test(Codec) ->
Decoded = convert(Encoded, <<"structured@1.0">>, Codec, #{}),
?assert(match(Msg, Decoded)).
+binary_to_binary_test(<<"flat@1.0">>) -> ok;
binary_to_binary_test(Codec) ->
% Serialization must be able to turn a raw binary into a TX, then turn
% that TX back into a binary and have the result match the original.
diff --git a/src/hb_opts.erl b/src/hb_opts.erl
index c08e03036..0a9890c27 100644
--- a/src/hb_opts.erl
+++ b/src/hb_opts.erl
@@ -13,7 +13,9 @@
%%% deterministic behavior impossible, the caller should fail the execution
%%% with a refusal to execute.
-module(hb_opts).
--export([get/1, get/2, get/3, load/1, default_message/0, mimic_default_types/2]).
+-export([get/1, get/2, get/3, load/1, load_bin/1]).
+-export([default_message/0, mimic_default_types/2, validate_node_history/1, validate_node_history/3]).
+-export([check_required_opts/2]).
-include("include/hb.hrl").
%% @doc The default configuration options of the hyperbeam node.
@@ -85,6 +87,7 @@ default_message() ->
#{<<"name">> => <<"stack@1.0">>, <<"module">> => dev_stack},
#{<<"name">> => <<"structured@1.0">>, <<"module">> => dev_codec_structured},
#{<<"name">> => <<"test-device@1.0">>, <<"module">> => dev_test},
+ #{<<"name">> => <<"volume@1.0">>, <<"module">> => dev_volume},
#{<<"name">> => <<"wasi@1.0">>, <<"module">> => dev_wasi},
#{<<"name">> => <<"wasm-64@1.0">>, <<"module">> => dev_wasm},
% temporal hack, for the future should load it from config
@@ -121,7 +124,7 @@ default_message() ->
mode => debug,
% Every modification to `Opts' called directly by the node operator
% should be recorded here.
- node_history => [],
+ node_history => [],
debug_stack_depth => 40,
debug_print_map_line_threshold => 30,
debug_print_binary_max => 60,
@@ -131,10 +134,10 @@ default_message() ->
debug_print_trace => short, % `short' | `false'. Has performance impact.
short_trace_len => 5,
debug_metadata => true,
- debug_ids => false,
+ debug_ids => true,
debug_committers => false,
debug_show_priv => false,
- trusted => #{},
+ snp_trusted => [],
routes => [
#{
% Routes for the genesis-wasm device to use a local CU, if requested.
@@ -168,8 +171,28 @@ default_message() ->
],
store =>
[
- #{ <<"store-module">> => hb_store_fs, <<"prefix">> => <<"cache-mainnet">> },
- #{ <<"store-module">> => hb_store_gateway,
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-mainnet">>
+ },
+ #{
+ <<"store-module">> => hb_store_gateway,
+ <<"subindex">> => [
+ #{
+ <<"name">> => <<"Data-Protocol">>,
+ <<"value">> => <<"ao">>
+ }
+ ],
+ <<"store">> =>
+ [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-mainnet">>
+ }
+ ]
+ },
+ #{
+ <<"store-module">> => hb_store_gateway,
<<"store">> =>
[
#{
@@ -248,9 +271,9 @@ get(Key, Default, Opts) ->
?MODULE:get(Key, Default, Opts#{ prefer => local }).
-ifdef(TEST).
--define(DEFAULT_PRINT_OPTS, "error").
+-define(DEFAULT_PRINT_OPTS, "error,http_error").
-else.
--define(DEFAULT_PRINT_OPTS, "error,http_short,compute_short,push_short").
+-define(DEFAULT_PRINT_OPTS, "error,http_error,http_short,compute_short,push_short").
-endif.
-define(ENV_KEYS,
@@ -264,10 +287,13 @@ get(Key, Default, Opts) ->
fun
(Str) when Str == "1" -> true;
(Str) when Str == "true" -> true;
- (Str) -> string:tokens(Str, ",")
+ (Str) ->
+ lists:map(fun hb_util:bin/1, string:tokens(Str, ","))
end,
?DEFAULT_PRINT_OPTS
- }
+ },
+ lua_scripts => {"LUA_SCRIPTS", "scripts"},
+ lua_tests => {"LUA_TESTS", fun dev_lua_test:parse_spec/1, tests}
}
).
@@ -319,20 +345,22 @@ config_lookup(Key, Default) -> maps:get(Key, default_message(), Default).
load(Path) ->
case file:read_file(Path) of
{ok, Bin} ->
- try dev_codec_flat:deserialize(Bin) of
- {ok, Map} -> {ok, mimic_default_types(Map, new_atoms)}
- catch
- error:B -> {error, B}
- end;
+ load_bin(Bin);
_ -> {error, not_found}
end.
+load_bin(Bin) ->
+ try dev_codec_flat:deserialize(Bin) of
+ {ok, Map} -> {ok, mimic_default_types(Map, new_atoms)}
+ catch
+ error:B -> {error, B}
+ end.
%% @doc Mimic the types of the default message for a given map.
mimic_default_types(Map, Mode) ->
Default = default_message(),
maps:from_list(lists:map(
fun({Key, Value}) ->
- NewKey = hb_util:key_to_atom(Key, Mode),
+ NewKey = try hb_util:key_to_atom(Key, Mode) catch _:_ -> Key end,
NewValue =
case maps:get(NewKey, Default, not_found) of
not_found ->
@@ -350,6 +378,75 @@ mimic_default_types(Map, Mode) ->
maps:to_list(Map)
)).
+%% @doc Validate that the node_history length is within an acceptable range.
+%% @param Opts The options map containing node_history
+%% @param MinLength The minimum acceptable length of node_history
+%% @param MaxLength The maximum acceptable length of node_history
+%% @returns `{ok, Length}' if `MinLength =< Length =< MaxLength',
+%% or `{error, Reason}' if the length is outside the range.
+validate_node_history(Opts) ->
+ validate_node_history(Opts, 1, 1).
+validate_node_history(Opts, MinLength, MaxLength) ->
+ Length = length(hb_opts:get(node_history, [], Opts)),
+ if
+ Length >= MinLength, Length =< MaxLength ->
+ {ok, Length};
+ Length < MinLength ->
+ {
+ error,
+ <<
+ "Node history too short. Expected at least ",
+ (integer_to_binary(MinLength))/binary,
+ " entries, got ",
+ (integer_to_binary(Length))/binary,
+ "."
+ >>
+ };
+ true ->
+ {
+ error,
+ <<
+ "Node history too long. Expected at most ",
+ (integer_to_binary(MaxLength))/binary,
+ " entries, got ",
+ (integer_to_binary(Length))/binary,
+ "."
+ >>
+ }
+ end.
+
+%% @doc Utility function to check for required options in a list.
+%% Takes a list of {Name, Value} pairs and returns:
+%% - {ok, Opts} when all required options are present (Value =/= not_found)
+%% - {error, ErrorMsg} with a message listing all missing options when any are not_found
+%% @param KeyValuePairs A list of {Name, Value} pairs to check.
+%% @param Opts The original options map to return if validation succeeds.
+%% @returns `{ok, Opts}' if all required options are present, or
+%% `{error, <<"Missing required parameters: ", MissingOptsStr/binary>>}'
+%% where `MissingOptsStr' is a comma-separated list of missing option names.
+-spec check_required_opts(list({binary(), term()}), map()) ->
+ {ok, map()} | {error, binary()}.
+check_required_opts(KeyValuePairs, Opts) ->
+ MissingOpts = lists:filtermap(
+ fun({Name, Value}) ->
+ case Value of
+ not_found -> {true, Name};
+ _ -> false
+ end
+ end,
+ KeyValuePairs
+ ),
+ case MissingOpts of
+ [] ->
+ {ok, Opts};
+ _ ->
+ MissingOptsStr = binary:list_to_bin(
+ lists:join(<<", ">>, MissingOpts)
+ ),
+ ErrorMsg = <<"Missing required opts: ", MissingOptsStr/binary>>,
+ {error, ErrorMsg}
+ end.
+
%%% Tests
-ifdef(TEST).
@@ -397,4 +494,24 @@ load_test() ->
?assertEqual(<<"https://ao.computer">>, maps:get(host, Conf)),
% An atom, where the key contained a header-key `-' rather than a `_'.
?assertEqual(false, maps:get(await_inprogress, Conf)).
--endif.
+
+validate_node_history_test() ->
+ % Test default values (min=1, max=1)
+ ?assertEqual({ok, 1}, validate_node_history(#{node_history => [entry1]})),
+ ?assertEqual({error, <<"Node history too short. Expected at least 1 entries, got 0.">>},
+ validate_node_history(#{})),
+ ?assertEqual({error, <<"Node history too long. Expected at most 1 entries, got 2.">>},
+ validate_node_history(#{node_history => [entry1, entry2]})),
+ % Test with custom range
+ ?assertEqual({ok, 0}, validate_node_history(#{}, 0, 2)),
+ ?assertEqual({ok, 1}, validate_node_history(#{node_history => [entry1]}, 0, 2)),
+ ?assertEqual({ok, 2}, validate_node_history(#{node_history => [entry1, entry2]}, 0, 2)),
+ % Test range validations
+ ?assertEqual({error, <<"Node history too short. Expected at least 2 entries, got 1.">>},
+ validate_node_history(#{node_history => [entry1]}, 2, 4)),
+ ?assertEqual({error, <<"Node history too long. Expected at most 2 entries, got 3.">>},
+ validate_node_history(#{node_history => [entry1, entry2, entry3]}, 1, 2)),
+ % Test edge cases
+ ?assertEqual({ok, 3}, validate_node_history(#{node_history => [entry1, entry2, entry3]}, 3, 3)),
+ ?assertEqual({ok, 0}, validate_node_history(#{}, 0, 0)).
+-endif.
\ No newline at end of file
diff --git a/src/hb_singleton.erl b/src/hb_singleton.erl
index b4c621111..3b824a25a 100644
--- a/src/hb_singleton.erl
+++ b/src/hb_singleton.erl
@@ -101,7 +101,6 @@ to(Messages) ->
end,
{#{}, 0, #{}},
Messages),
-
MessageWithTypeAndScopes =
maps:fold(
fun
@@ -135,6 +134,8 @@ type(_Value) -> unknown.
%% @doc Normalize a singleton TABM message into a list of executable AO-Core
%% messages.
+from(Path) when is_binary(Path) ->
+ from(#{ <<"path">> => Path });
from(RawMsg) ->
RawPath = maps:get(<<"path">>, RawMsg, <<>>),
?event(parsing, {raw_path, RawPath}),
@@ -162,15 +163,16 @@ from(RawMsg) ->
%% @doc Parse the relative reference into path, query, and fragment.
parse_full_path(RelativeRef) ->
- {Path, QKVList} =
- case binary:split(RelativeRef, <<"?">>) of
- [P, QStr] -> {P, cowboy_req:parse_qs(#{ qs => QStr })};
- [P] -> {P, []}
+ {Path, QueryMap} =
+ case part([$?], RelativeRef) of
+ {$?, Base, Query} ->
+ {Base, parse_inlined_keys(Query, #{})};
+ {no_match, Base, <<>>} -> {Base, #{}}
end,
{
ok,
lists:map(fun(Part) -> decode_string(Part) end, path_parts($/, Path)),
- maps:from_list(QKVList)
+ QueryMap
}.
%% @doc Step 2: Decode, split and sanitize the path. Split by `/' but avoid
@@ -183,6 +185,7 @@ path_messages(RawBin) when is_binary(RawBin) ->
normalize_base([]) -> [];
normalize_base([First|Rest]) when ?IS_ID(First) -> [First|Rest];
normalize_base([{as, DevID, First}|Rest]) -> [{as, DevID, First}|Rest];
+normalize_base([Subres = {resolve, _}|Rest]) -> [Subres|Rest];
normalize_base(Rest) -> [#{}|Rest].
%% @doc Split the path into segments, filtering out empty segments and
@@ -276,28 +279,28 @@ parse_scope(KeyBin) ->
%% @doc Step 5: Merge the base message with the scoped messages.
build_messages(Msgs, ScopedModifications) ->
- do_build(1, Msgs, ScopedModifications).
-
-do_build(_, [], _ScopedKeys) -> [];
-do_build(I, [{as, DevID, Msg = #{ <<"path">> := <<"">> }}|Rest], ScopedKeys) ->
+ build(1, Msgs, ScopedModifications).
+build(_, [], _ScopedKeys) -> [];
+build(I, [{as, DevID, Msg = #{ <<"path">> := <<"">> }}|Rest], ScopedKeys) ->
ScopedKey = lists:nth(I, ScopedKeys),
- StepMsg = hb_message:convert(
- Merged = maps:merge(Msg, ScopedKey),
- <<"structured@1.0">>,
- #{ topic => ao_internal }
- ),
+ StepMsg =
+ hb_message:convert(
+ Merged = maps:merge(Msg, ScopedKey),
+ <<"structured@1.0">>,
+ #{ topic => ao_internal }
+ ),
?event({merged, {dev, DevID}, {input, Msg}, {merged, Merged}, {output, StepMsg}}),
- [{as, DevID, StepMsg} | do_build(I + 1, Rest, ScopedKeys)];
-do_build(I, [Msg|Rest], ScopedKeys) when not is_map(Msg) ->
- [Msg | do_build(I + 1, Rest, ScopedKeys)];
-do_build(I, [Msg | Rest], ScopedKeys) ->
- ScopedKey = lists:nth(I, ScopedKeys),
- StepMsg = hb_message:convert(
- maps:merge(Msg, ScopedKey),
- <<"structured@1.0">>,
- #{ topic => ao_internal }
- ),
- [StepMsg | do_build(I + 1, Rest, ScopedKeys)].
+ [{as, DevID, StepMsg} | build(I + 1, Rest, ScopedKeys)];
+build(I, [Msg|Rest], ScopedKeys) when not is_map(Msg) ->
+ [Msg | build(I + 1, Rest, ScopedKeys)];
+build(I, [Msg | Rest], ScopedKeys) ->
+ StepMsg =
+ hb_message:convert(
+ maps:merge(Msg, lists:nth(I, ScopedKeys)),
+ <<"structured@1.0">>,
+ #{ topic => ao_internal }
+ ),
+ [StepMsg | build(I + 1, Rest, ScopedKeys)].
%% @doc Parse a path part into a message or an ID.
%% Applies the syntax rules outlined in the module doc, in the following order:
@@ -334,13 +337,18 @@ parse_part_mods(<<"~", PartMods/binary>>, Msg) ->
% Apply the device specifier
{as, maybe_subpath(DeviceBin), MsgWithInlines};
parse_part_mods(<< "&", InlinedMsgBin/binary >>, Msg) ->
+ parse_inlined_keys(InlinedMsgBin, Msg).
+
+%% @doc Parse inlined key-value pairs from a path segment. Each key-value pair
+%% is separated by `&' and is of the form `K=V'.
+parse_inlined_keys(InlinedMsgBin, Msg) ->
InlinedKeys = path_parts($&, InlinedMsgBin),
MsgWithInlined =
lists:foldl(
fun(InlinedKey, Acc) ->
{Key, Val} = parse_inlined_key_val(InlinedKey),
?event({inlined_key, {explicit, Key}, {explicit, Val}}),
- maps:put(Key, Val, Acc)
+ Acc#{ Key => Val }
end,
Msg,
InlinedKeys
diff --git a/src/hb_store_fs.erl b/src/hb_store_fs.erl
index deabad2af..45251d0ba 100644
--- a/src/hb_store_fs.erl
+++ b/src/hb_store_fs.erl
@@ -133,7 +133,30 @@ make_link(Opts, Existing, New) ->
%% @doc Add the directory prefix to a path.
add_prefix(#{ <<"prefix">> := Prefix }, Path) ->
- hb_store:join([Prefix, Path]).
+ ?event({add_prefix, Prefix, Path}),
+ % Check if the prefix is an absolute path
+ IsAbsolute = is_binary(Prefix) andalso binary:first(Prefix) =:= $/ orelse
+ is_list(Prefix) andalso hd(Prefix) =:= $/,
+ % Join the paths
+ JoinedPath = hb_store:join([Prefix, Path]),
+ % If the prefix was absolute, ensure the joined path is also absolute
+ case IsAbsolute of
+ true ->
+ case is_binary(JoinedPath) of
+ true ->
+ case binary:first(JoinedPath) of
+ $/ -> JoinedPath;
+ _ -> <<"/", JoinedPath/binary>>
+ end;
+ false ->
+ case JoinedPath of
+ [$/ | _] -> JoinedPath;
+ _ -> [$/ | JoinedPath]
+ end
+ end;
+ false ->
+ JoinedPath
+ end.
%% @doc Remove the directory prefix from a path.
remove_prefix(#{ <<"prefix">> := Prefix }, Path) ->
diff --git a/src/hb_store_gateway.erl b/src/hb_store_gateway.erl
index 55870306c..5495470c0 100644
--- a/src/hb_store_gateway.erl
+++ b/src/hb_store_gateway.erl
@@ -235,3 +235,34 @@ resolve_on_gateway_test_() ->
),
?assertMatch(#{ <<"assignments">> := _ }, X)
end}.
+
+%% @doc Test to verify store opts is being set for Data-Protocol ao
+store_opts_test() ->
+ Opts = #{
+ cache_control => <<"cache">>,
+ store =>
+ [
+ #{
+ <<"store-module">> => hb_store_fs,
+ <<"prefix">> => <<"cache-TEST">>
+ },
+ #{ <<"store-module">> => hb_store_gateway,
+ <<"store">> => false,
+ <<"subindex">> => [
+ #{
+ <<"name">> => <<"Data-Protocol">>,
+ <<"value">> => <<"ao">>
+ }
+ ]
+ }
+ ]
+ },
+ Node = hb_http_server:start_node(Opts),
+ {ok, Res} =
+ hb_http:get(
+ Node,
+ <<"myb2p8_TSM0KSgBMoG-nu6TLuqWwPmdZM5V2QSUeNmM">>,
+ #{}
+ ),
+ ?event(debug_gateway, {res, Res}),
+ ?assertEqual(<<"Hello World">>,hb_ao:get(<<"data">>, Res)).
\ No newline at end of file
diff --git a/src/hb_tracer.erl b/src/hb_tracer.erl
index 654b9dea1..4b4521ecb 100644
--- a/src/hb_tracer.erl
+++ b/src/hb_tracer.erl
@@ -2,102 +2,130 @@
%%% This allows for tracking the lifecycle of a request from HTTP receipt through processing and response.
-module(hb_tracer).
+
-export([start_trace/0, record_step/2, get_trace/1, format_error_trace/1]).
-include("include/hb.hrl").
+%%% @doc Start a new tracer acting as queue of events registered.
start_trace() ->
- Trace = #{ steps => queue:new() },
- TracePID = spawn(fun() -> trace_loop(Trace) end),
- ?event(trace, {trace_started, TracePID}),
- TracePID.
+ Trace = #{steps => queue:new()},
+ TracePID = spawn(fun() -> trace_loop(Trace) end),
+ ?event(trace, {trace_started, TracePID}),
+ TracePID.
trace_loop(Trace) ->
- receive
- {record_step, Step} ->
- Steps = maps:get(steps, Trace),
- NewTrace = Trace#{steps => queue:in(Step, Steps)},
- ?event(trace, {step_recorded, Step}),
- trace_loop(NewTrace);
- {get_trace, From} ->
- % Convert queue to list for the response
- TraceWithList = Trace#{steps => queue:to_list(maps:get(steps, Trace))},
- From ! {trace, TraceWithList},
- trace_loop(Trace)
- end.
-
+ receive
+ {record_step, Step} ->
+ Steps = maps:get(steps, Trace),
+ NewTrace = Trace#{steps => queue:in(Step, Steps)},
+ ?event(trace, {step_recorded, Step}),
+ trace_loop(NewTrace);
+ {get_trace, From} ->
+ % Convert queue to list for the response
+ TraceWithList =
+ Trace#{steps =>
+ queue:to_list(
+ maps:get(steps, Trace))},
+ From ! {trace, TraceWithList},
+ trace_loop(Trace)
+ end.
+
+%%% @doc Register a new step into a tracer
record_step(TracePID, Step) ->
- TracePID! {record_step, Step}.
+ TracePID ! {record_step, Step}.
+%%% @doc Exports the complete queue of events
get_trace(TracePID) ->
- TracePID! {get_trace, self()},
- receive
- {trace, Trace} ->
- Trace
- after 5000 ->
- ?event(trace, {trace_timeout, TracePID}),
- {trace, #{}}
- end.
-
+ TracePID ! {get_trace, self()},
+ receive
+ {trace, Trace} ->
+ Trace
+ after 5000 ->
+ ?event(trace, {trace_timeout, TracePID}),
+ {trace, #{}}
+ end.
+
+%%% @doc Format a trace for error in a user-friendly emoji oriented output
format_error_trace(Trace) ->
- Steps = maps:get(steps, Trace, []),
-
- TraceMap = lists:foldl(fun (TraceItem, Acc) ->
- case TraceItem of
- {http, {parsed_singleton, _ReqSingleton, _}} ->
- maps:put(request_parsing, true, Acc);
- {ao_core, {stage, Stage, _Task}} ->
- maps:put(resolve_stage, Stage, Acc);
- {ao_result, {load_device_failed, _, _, _, _, {exec_exception, Exception}, _, _}} ->
- maps:put(error, Exception, Acc);
- {ao_result, {exec_failed, _, _, _, {func, Fun}, _, {exec_exception, Error}, _, _}} ->
- maps:put(error, {Fun, Error}, Acc);
- _ -> Acc
- end
- end, #{}, Steps),
-
- % Build the trace message
- TraceStrings = ["Oops! Something went wrong. Here's the rundown:"],
-
- % Add parsing status
- ParsingTrace = case maps:get(request_parsing, TraceMap, false) of
- false ->
- TraceStrings ++ [[failure_emoji(), "Parsing your request"]];
- true ->
- TraceStrings ++ [[checkmark_emoji(), "Parsing your request"]]
- end,
-
- % Add stage information
- StageTrace = case maps:get(resolve_stage, TraceMap, undefined) of
- undefined ->
- ParsingTrace;
- Stage ->
- ParsingTrace ++ [[stage_to_emoji(Stage), " Resolved steps of your execution"]]
- end,
-
- % Add error information
- ErrorTrace = case maps:get(error, TraceMap, undefined) of
- undefined ->
- StageTrace;
- {Fun, Reason} ->
- StageTrace ++ [[failure_emoji(), io_lib:format("Error: ~p -> ~p", [Fun, Reason])]];
- Error ->
- StageTrace ++ [[failure_emoji(), io_lib:format("Error: ~p", [Error])]]
- end,
- string:join(ErrorTrace, "\n").
-
+ Steps = maps:get(steps, Trace, []),
+ TraceMap =
+ lists:foldl(fun(TraceItem, Acc) ->
+ case TraceItem of
+ {http, {parsed_singleton, _ReqSingleton, _}} ->
+ maps:put(request_parsing, true, Acc);
+ {ao_core, {stage, Stage, _Task}} ->
+ maps:put(resolve_stage, Stage, Acc);
+ {ao_result,
+ {load_device_failed, _, _, _, _, {exec_exception, Exception}, _, _}} ->
+ maps:put(error, Exception, Acc);
+ {ao_result,
+ {exec_failed,
+ _,
+ _,
+ _,
+ {func, Fun},
+ _,
+ {exec_exception, Error},
+ _,
+ _}} ->
+ maps:put(error, {Fun, Error}, Acc);
+ _ -> Acc
+ end
+ end,
+ #{},
+ Steps),
+ % Build the trace message
+ TraceStrings = <<"Oops! Something went wrong. Here's the rundown:">>,
+ % Add parsing status
+ ParsingTrace =
+ case maps:get(request_parsing, TraceMap, false) of
+ false ->
+ Emoji = failure_emoji(),
+ <>;
+ true ->
+ Emoji = checkmark_emoji(),
+ <>
+ end,
+ % Add stage information
+ StageTrace =
+ case maps:get(resolve_stage, TraceMap, undefined) of
+ undefined ->
+ ParsingTrace;
+ Stage ->
+ StageEmoji = stage_to_emoji(Stage),
+ try << ParsingTrace/binary, "\n", StageEmoji/binary,
+ " Resolved steps of your execution" >>
+ catch
+ error:badarg ->
+ iolist_to_binary(io_lib:format("~p", [ParsingTrace]))
+ end
+ end,
+ % Add error information
+ case maps:get(error, TraceMap, undefined) of
+ undefined ->
+ StageTrace;
+ {Fun, Reason} ->
+ FailureEmoji = failure_emoji(),
+ ErrMsg = list_to_binary(io_lib:format("~p -> ~p", [Fun, Reason])),
+ <>;
+ Error ->
+ FailureEmoji = failure_emoji(),
+ <>
+ end.
checkmark_emoji() ->
- % Unicode for checkmark
- "\xE2\x9C\x85". % \xE2\x9C\x85 is the checkmark emoji in UTF-8
+ % Unicode for checkmark
+ <<"\xE2\x9C\x85">>. % \xE2\x9C\x85 is the checkmark emoji in UTF-8
failure_emoji() ->
- % Unicode for failure emoji
- "\xE2\x9D\x8C". % \xE2\x9D\x8C is the failure emoji in UTF-8
+ % Unicode for failure emoji
+ <<"\xE2\x9D\x8C">>. % \xE2\x9D\x8C is the failure emoji in UTF-8
% Helper function to convert stage number to emoji
stage_to_emoji(Stage) when Stage >= 1, Stage =< 9 ->
- % Unicode for circled numbers 1-9
- [Stage + 48, 16#E2, 16#83, 16#A3];
+ % Unicode for circled numbers 1-9
+ StageEmoji = Stage + 48,
+ <>;
stage_to_emoji(_) ->
- "".
\ No newline at end of file
+ "".
diff --git a/src/hb_util.erl b/src/hb_util.erl
index d37a33a09..f37244521 100644
--- a/src/hb_util.erl
+++ b/src/hb_util.erl
@@ -6,7 +6,7 @@
-export([encode/1, decode/1, safe_encode/1, safe_decode/1]).
-export([find_value/2, find_value/3]).
-export([deep_merge/2, number/1, list_to_numbered_map/1]).
--export([message_to_ordered_list/1, message_to_ordered_list/2]).
+-export([is_ordered_list/1, message_to_ordered_list/1, message_to_ordered_list/2]).
-export([is_string_list/1, to_sorted_list/1, to_sorted_keys/1]).
-export([hd/1, hd/2, hd/3]).
-export([remove_common/2, to_lower/1]).
@@ -15,8 +15,9 @@
-export([format_maybe_multiline/2, remove_trailing_noise/2]).
-export([debug_print/4, debug_fmt/1, debug_fmt/2, eunit_print/2]).
-export([print_trace/4, trace_macro_helper/5, print_trace_short/4]).
+-export([format_trace/1, format_trace_short/1]).
+-export([is_hb_module/1, is_hb_module/2, all_hb_modules/0]).
-export([ok/1, ok/2, until/1, until/2, until/3]).
--export([format_trace_short/1, is_hb_module/1, is_hb_module/2, all_hb_modules/0]).
-export([count/2, mean/1, stddev/1, variance/1, weighted_random/1]).
-include("include/hb.hrl").
@@ -38,7 +39,9 @@ float(Str) when is_binary(Str) ->
float(Str) when is_list(Str) ->
list_to_float(Str);
float(Float) when is_float(Float) ->
- Float.
+ Float;
+float(Int) when is_integer(Int) ->
+ Int / 1.
%% @doc Coerce a string to an atom.
atom(Str) when is_binary(Str) ->
@@ -63,7 +66,8 @@ bin(Value) when is_binary(Value) ->
%% @doc Coerce a value to a list.
list(Value) when is_binary(Value) ->
binary_to_list(Value);
-list(Value) when is_list(Value) -> Value.
+list(Value) when is_list(Value) -> Value;
+list(Value) when is_atom(Value) -> atom_to_list(Value).
%% @doc Unwrap a tuple of the form `{ok, Value}', or throw/return, depending on
%% the value of the `error_strategy' option.
@@ -136,11 +140,7 @@ key_to_atom(Key, Mode) ->
WithoutDashes = binary:replace(Key, <<"-">>, <<"_">>, [global]),
case Mode of
new_atoms -> binary_to_atom(WithoutDashes, utf8);
- _ ->
- try binary_to_existing_atom(WithoutDashes, utf8)
- catch
- error:badarg -> WithoutDashes
- end
+ _ -> binary_to_existing_atom(WithoutDashes, utf8)
end.
%% @doc Convert a human readable ID to a native binary ID. If the ID is already
@@ -148,13 +148,18 @@ key_to_atom(Key, Mode) ->
native_id(Bin) when is_binary(Bin) andalso byte_size(Bin) == 43 ->
decode(Bin);
native_id(Bin) when is_binary(Bin) andalso byte_size(Bin) == 32 ->
+ Bin;
+native_id(Bin) when is_binary(Bin) andalso byte_size(Bin) == 42 ->
Bin.
%% @doc Convert a native binary ID to a human readable ID. If the ID is already
-%% a human readable ID, it is returned as is.
+%% a human readable ID, it is returned as is. If it is an ethereum address, it
+%% is returned as is.
human_id(Bin) when is_binary(Bin) andalso byte_size(Bin) == 32 ->
encode(Bin);
human_id(Bin) when is_binary(Bin) andalso byte_size(Bin) == 43 ->
+ Bin;
+human_id(Bin) when is_binary(Bin) andalso byte_size(Bin) == 42 ->
Bin.
%% @doc Return a short ID for the different types of IDs used in AO-Core.
@@ -255,6 +260,21 @@ number(List) ->
list_to_numbered_map(List) ->
maps:from_list(number(List)).
+%% @doc Determine if the message given is an ordered list, starting from 1.
+is_ordered_list(Msg) when is_list(Msg) -> true;
+is_ordered_list(Msg) ->
+ is_ordered_list(1, hb_ao:normalize_keys(Msg)).
+is_ordered_list(_, Msg) when map_size(Msg) == 0 -> true;
+is_ordered_list(N, Msg) ->
+ case maps:get(NormKey = hb_ao:normalize_key(N), Msg, not_found) of
+ not_found -> false;
+ _ ->
+ is_ordered_list(
+ N + 1,
+ maps:without([NormKey], Msg)
+ )
+ end.
+
%% @doc Take a message with numbered keys and convert it to a list of tuples
%% with the associated key as an integer and a value. Optionally, it takes a
%% standard map of HyperBEAM runtime options.
@@ -422,7 +442,11 @@ do_debug_fmt({X, Y}, Indent) when is_record(Y, tx) ->
do_debug_fmt({X, Y}, Indent) when is_map(Y) ->
Formatted = format_maybe_multiline(Y, Indent + 1),
HasNewline = lists:member($\n, Formatted),
- format_indented("~p~s",
+ format_indented(
+ case is_binary(X) of
+ true -> "~s";
+ false -> "~p"
+ end ++ "~s",
[
X,
case HasNewline of
@@ -581,18 +605,15 @@ print_trace(Stack, Label, CallerInfo) ->
io:format(standard_error, "=== ~s ===~s==>~n~s",
[
Label, CallerInfo,
- lists:flatten(
- format_trace(
- Stack,
- hb_opts:get(stack_print_prefixes, [], #{})
- )
- )
+ lists:flatten(format_trace(Stack))
]).
%% @doc Format a stack trace as a list of strings, one for each stack frame.
%% Each stack frame is formatted if it matches the `stack_print_prefixes'
%% option. At the first frame that does not match a prefix in the
%% `stack_print_prefixes' option, the rest of the stack is not formatted.
+format_trace(Stack) ->
+ format_trace(Stack, hb_opts:get(stack_print_prefixes, [], #{})).
format_trace([], _) -> [];
format_trace([Item|Rest], Prefixes) ->
case element(1, Item) of
diff --git a/src/hb_volume.erl b/src/hb_volume.erl
new file mode 100644
index 000000000..d4e1a3171
--- /dev/null
+++ b/src/hb_volume.erl
@@ -0,0 +1,445 @@
+-module(hb_volume).
+-moduledoc """
+Module for managing physical disks and volumes, providing operations
+for partitioning, formatting, mounting, and managing encrypted volumes.
+""".
+-export([list_partitions/0, create_partition/2]).
+-export([format_disk/2, mount_disk/4, change_node_store/2]).
+-export([check_for_device/1]).
+-include("include/hb.hrl").
+
+-doc """
+List available partitions in the system.
+@returns {ok, Map} where Map contains the partition information,
+ or {error, Reason} if the operation fails.
+""".
+-spec list_partitions() -> {ok, map()} | {error, binary()}.
+list_partitions() ->
+ ?event(disk, {list_partitions, start}),
+
+ % Get the partition information using fdisk -l
+ case os:cmd("sudo fdisk -l") of
+ [] ->
+ % Empty output indicates an error
+ Reason = <<"Failed to list partitions: no output">>,
+ ?event(disk, {list_partitions, error, Reason}),
+ {error, Reason};
+ Output ->
+ ?event(disk, {list_partitions, complete}),
+
+ % Split output into lines
+ Lines = string:split(Output, "\n", all),
+
+ % Process the output to group information by disk
+ {_, DiskData} = lists:foldl(
+ fun process_disk_line/2,
+ {undefined, []},
+ Lines
+ ),
+
+ % Process each disk's data to extract all information
+ DiskObjects = lists:filtermap(
+ fun(DiskEntry) ->
+ Device = maps:get(<<"device">>, DiskEntry),
+ DiskLines = lists:reverse(maps:get(<<"data">>, DiskEntry)),
+ DiskInfo = parse_disk_info(Device, DiskLines),
+ {true, DiskInfo}
+ end,
+ DiskData
+ ),
+
+ % Return the partition information
+ {ok, #{
+ <<"status">> => 200,
+ <<"content-type">> => <<"application/json">>,
+ <<"body">> => hb_json:encode(#{<<"disks">> => DiskObjects})
+ }}
+ end.
+
+%%% Helper functions for list_partitions
+
+% Process a line of fdisk output to group by disk
+process_disk_line(Line, {CurrentDisk, Acc}) ->
+ % Match for a new disk entry
+ DiskPattern = "^Disk (/dev/(?!ram)\\S+):",
+ case re:run(Line, DiskPattern, [{capture, [1], binary}]) of
+ {match, [Device]} ->
+ % Start a new disk entry
+ NewDisk = #{
+ <<"device">> => Device,
+ <<"data">> => [Line]
+ },
+ {NewDisk, [NewDisk | Acc]};
+ _ when CurrentDisk =:= undefined ->
+ % Not a disk line and no current disk
+ {undefined, Acc};
+ _ ->
+ % Add line to current disk's data
+ CurrentData = maps:get(<<"data">>, CurrentDisk),
+ UpdatedDisk = CurrentDisk#{
+ <<"data">> => [Line | CurrentData]
+ },
+ % Update the list with the modified disk entry
+ UpdatedAcc = [UpdatedDisk | lists:delete(CurrentDisk, Acc)],
+ {UpdatedDisk, UpdatedAcc}
+ end.
+
+% Parse detailed disk information from fdisk output lines
+parse_disk_info(Device, Lines) ->
+ % Initialize with device ID
+ DiskInfo = #{<<"device">> => Device},
+
+ % Process each line to extract information
+ lists:foldl(
+ fun parse_disk_line/2,
+ DiskInfo,
+ Lines
+ ).
+
+% Parse a single line of disk information
+parse_disk_line(Line, Info) ->
+ % Extract disk size and bytes
+ SizePattern = "^Disk .+: ([0-9.]+ [KMGT]iB), ([0-9]+) bytes, ([0-9]+) sectors",
+ case re:run(Line, SizePattern, [{capture, [1, 2, 3], binary}]) of
+ {match, [Size, Bytes, Sectors]} ->
+ Info#{
+ <<"size">> => Size,
+ <<"bytes">> => binary_to_integer(Bytes),
+ <<"sectors">> => binary_to_integer(Sectors)
+ };
+ _ ->
+ parse_disk_model_line(Line, Info)
+ end.
+
+% Parse disk model information
+parse_disk_model_line(Line, Info) ->
+ % Extract disk model
+ ModelPattern = "^Disk model: (.+)\\s*$",
+ case re:run(Line, ModelPattern, [{capture, [1], binary}]) of
+ {match, [Model]} ->
+ Info#{<<"model">> => string:trim(Model)};
+ _ ->
+ parse_disk_units_line(Line, Info)
+ end.
+
+% Parse disk units information
+parse_disk_units_line(Line, Info) ->
+ % Extract units information
+ UnitsPattern = "^Units: (.+)$",
+ case re:run(Line, UnitsPattern, [{capture, [1], binary}]) of
+ {match, [Units]} ->
+ Info#{<<"units">> => Units};
+ _ ->
+ parse_sector_size_line(Line, Info)
+ end.
+
+% Parse sector size information
+parse_sector_size_line(Line, Info) ->
+ % Extract sector size
+ SectorPattern = "^Sector size \\(logical/physical\\): ([^/]+)/(.+)$",
+ case re:run(Line, SectorPattern, [{capture, [1, 2], binary}]) of
+ {match, [LogicalSize, PhysicalSize]} ->
+ Info#{
+ <<"sector_size">> => #{
+ <<"logical">> => string:trim(LogicalSize),
+ <<"physical">> => string:trim(PhysicalSize)
+ }
+ };
+ _ ->
+ parse_io_size_line(Line, Info)
+ end.
+
+% Parse I/O size information
+parse_io_size_line(Line, Info) ->
+ % Extract I/O size
+ IOPattern = "^I/O size \\(minimum/optimal\\): ([^/]+)/(.+)$",
+ case re:run(Line, IOPattern, [{capture, [1, 2], binary}]) of
+ {match, [MinSize, OptSize]} ->
+ Info#{
+ <<"io_size">> => #{
+ <<"minimum">> => string:trim(MinSize),
+ <<"optimal">> => string:trim(OptSize)
+ }
+ };
+ _ ->
+ Info
+ end.
+
+-doc """
+Create a partition on a disk device.
+@param Device The path to the device, e.g. "/dev/sdb".
+@param PartType The partition type to create, defaults to "ext4".
+@returns {ok, Map} on success where Map includes status and partition information,
+ or {error, Reason} if the operation fails.
+""".
+-spec create_partition(Device :: binary(), PartType :: binary()) ->
+ {ok, map()} | {error, binary()}.
+create_partition(undefined, _PartType) ->
+ {error, <<"Device path not specified">>};
+create_partition(Device, PartType) ->
+ ?event(disk, {create_partition, start}),
+ ?event(disk, {create_partition, device, Device}),
+ ?event(disk, {create_partition, part_type, PartType}),
+
+ % Create a GPT partition table
+ DeviceStr = binary_to_list(Device),
+ MklabelCmd = "sudo parted " ++ DeviceStr ++ " mklabel gpt",
+ MklabelResult = os:cmd(MklabelCmd),
+
+ % Check if creating the partition table succeeded
+ case string:find(MklabelResult, "Error") of
+ nomatch ->
+ create_actual_partition(Device, PartType);
+ _ ->
+ ?event(disk, {create_partition, error, list_to_binary(MklabelResult)}),
+ {error, list_to_binary(MklabelResult)}
+ end.
+
+% Create the actual partition after making the GPT label
+create_actual_partition(Device, PartType) ->
+ DeviceStr = binary_to_list(Device),
+ PartTypeStr = binary_to_list(PartType),
+
+ % Build the parted command to create the partition
+ MkpartCmd = "sudo parted -a optimal " ++ DeviceStr ++
+ " mkpart primary " ++ PartTypeStr ++ " 0% 100%",
+ MkpartResult = os:cmd(MkpartCmd),
+
+ % Check if creating the partition succeeded
+ case string:find(MkpartResult, "Error") of
+ nomatch ->
+ get_partition_info(Device);
+ _ ->
+ ?event(disk, {create_partition, error, list_to_binary(MkpartResult)}),
+ {error, list_to_binary(MkpartResult)}
+ end.
+
+% Get the partition information after creating a partition
+get_partition_info(Device) ->
+ DeviceStr = binary_to_list(Device),
+
+ % Print partition information
+ PrintCmd = "sudo parted " ++ DeviceStr ++ " print",
+ PartitionInfo = os:cmd(PrintCmd),
+
+ ?event(disk, {create_partition, complete}),
+ {ok, #{
+ <<"status">> => 200,
+ <<"message">> => <<"Partition created successfully.">>,
+ <<"device_path">> => Device,
+ <<"partition_info">> => list_to_binary(PartitionInfo)
+ }}.
+
+-doc """
+Format a disk or partition with LUKS encryption.
+@param Partition The path to the partition, e.g. "/dev/sdc1".
+@param EncKey The encryption key to use for LUKS.
+@returns {ok, Map} on success where Map includes the status and confirmation message,
+ or {error, Reason} if the operation fails.
+""".
+-spec format_disk(Partition :: binary(), EncKey :: binary()) ->
+ {ok, map()} | {error, binary()}.
+format_disk(undefined, _EncKey) ->
+ {error, <<"Partition path not specified">>};
+format_disk(_Partition, undefined) ->
+ {error, <<"Encryption key not specified">>};
+format_disk(Partition, EncKey) ->
+ ?event(disk, {format, start}),
+ ?event(disk, {format, partition, Partition}),
+
+ % Ensure tmp directory exists
+ os:cmd("sudo mkdir -p /root/tmp"),
+ KeyFile = "/root/tmp/luks_key_" ++ os:getpid(),
+ file:write_file(KeyFile, EncKey, [raw]),
+
+ % Format with LUKS
+ PartitionStr = binary_to_list(Partition),
+ FormatCmd = "sudo cryptsetup luksFormat --batch-mode --key-file " ++
+ KeyFile ++ " " ++ PartitionStr,
+ FormatResult = os:cmd(FormatCmd),
+
+ % Remove the temporary key file
+ os:cmd("sudo shred -u " ++ KeyFile),
+
+ % Check if the command succeeded
+ case string:find(FormatResult, "failed") of
+ nomatch ->
+ ?event(disk, {format, complete}),
+ {ok, #{
+ <<"status">> => 200,
+ <<"message">> =>
+ <<"Partition formatted with LUKS encryption successfully.">>
+ }};
+ _ ->
+ ?event(disk, {format, error, list_to_binary(FormatResult)}),
+ {error, list_to_binary(FormatResult)}
+ end.
+
+-doc """
+Mount a LUKS-encrypted disk.
+@param Partition The path to the partition, e.g. "/dev/sdc1".
+@param EncKey The encryption key for LUKS.
+@param MountPoint The directory where the disk should be mounted.
+@param VolumeName The name to use for the decrypted LUKS volume.
+@returns {ok, Map} on success where Map includes the status and confirmation message,
+ or {error, Reason} if the operation fails.
+""".
+-spec mount_disk(
+ Partition :: binary(),
+ EncKey :: binary(),
+ MountPoint :: binary(),
+ VolumeName :: binary()
+) -> {ok, map()} | {error, binary()}.
+mount_disk(undefined, _EncKey, _MountPoint, _VolumeName) ->
+ {error, <<"Partition path not specified">>};
+mount_disk(_Partition, undefined, _MountPoint, _VolumeName) ->
+ {error, <<"Encryption key not specified">>};
+mount_disk(_Partition, _EncKey, undefined, _VolumeName) ->
+ {error, <<"Mount point not specified">>};
+mount_disk(Partition, EncKey, MountPoint, VolumeName) ->
+ ?event(disk, {mount, start}),
+ ?event(disk, {mount, partition, Partition}),
+ ?event(disk, {mount, mount_point, MountPoint}),
+ ?event(disk, {mount, volume_name, VolumeName}),
+
+ % Ensure tmp directory exists
+ os:cmd("sudo mkdir -p /root/tmp"),
+ KeyFile = "/root/tmp/luks_key_" ++ os:getpid(),
+ file:write_file(KeyFile, EncKey, [raw]),
+
+ % Open the LUKS volume
+ PartitionStr = binary_to_list(Partition),
+ VolumeNameStr = binary_to_list(VolumeName),
+ OpenCmd = "sudo cryptsetup luksOpen --key-file " ++ KeyFile ++ " " ++
+ PartitionStr ++ " " ++ VolumeNameStr,
+ OpenResult = os:cmd(OpenCmd),
+
+ % Remove the temporary key file
+ os:cmd("sudo shred -u " ++ KeyFile),
+
+ % Check if opening the LUKS volume succeeded
+ case string:find(OpenResult, "failed") of
+ nomatch ->
+ mount_opened_volume(Partition, MountPoint, VolumeName);
+ _ ->
+ ?event(disk, {mount, error, list_to_binary(OpenResult)}),
+ {error, list_to_binary(OpenResult)}
+ end.
+
+% Mount an already opened LUKS volume
+mount_opened_volume(Partition, MountPoint, VolumeName) ->
+ % Create mount point if it doesn't exist
+ MountPointStr = binary_to_list(MountPoint),
+ os:cmd("sudo mkdir -p " ++ MountPointStr),
+
+ % Mount the unlocked LUKS volume
+ VolumeNameStr = binary_to_list(VolumeName),
+ MountCmd = "sudo mount /dev/mapper/" ++ VolumeNameStr ++ " " ++
+ MountPointStr,
+ MountResult = os:cmd(MountCmd),
+
+ % Check if mounting succeeded
+ case string:find(MountResult, "failed") of
+ nomatch ->
+ create_mount_info(Partition, MountPoint, VolumeName);
+ _ ->
+ % Close the LUKS volume if mounting failed
+ VolumeNameStr = binary_to_list(VolumeName),
+ os:cmd("sudo cryptsetup luksClose " ++ VolumeNameStr),
+ ?event(disk, {mount, error, list_to_binary(MountResult)}),
+ {error, list_to_binary(MountResult)}
+ end.
+
+% Create mount info response
+create_mount_info(Partition, MountPoint, VolumeName) ->
+ ?event(disk, {mount, complete}),
+ {ok, #{
+ <<"status">> => 200,
+ <<"message">> => <<"Encrypted partition mounted successfully.">>,
+ <<"mount_point">> => MountPoint,
+ <<"mount_info">> => #{
+ partition => Partition,
+ mount_point => MountPoint,
+ volume_name => VolumeName
+ }
+ }}.
+
+-doc """
+Change the node's data store location to the mounted encrypted disk.
+@param StorePath The new path for the store directory.
+@param CurrentStore The current store configuration.
+@returns {ok, Map} on success where Map includes the status and confirmation message,
+ or {error, Reason} if the operation fails.
+""".
+-spec change_node_store(StorePath :: binary(), CurrentStore :: list()) ->
+ {ok, map()} | {error, binary()}.
+change_node_store(undefined, _CurrentStore) ->
+ {error, <<"Store path not specified">>};
+change_node_store(StorePath, CurrentStore) ->
+ ?event(disk, {change_store, start}),
+ ?event(disk, {change_store, store_path, StorePath}),
+
+ % Create the store directory if it doesn't exist
+ StorePathStr = binary_to_list(StorePath),
+ os:cmd("sudo mkdir -p " ++ StorePathStr),
+
+ % Update the store configuration with the new path
+ NewStore = update_store_config(CurrentStore, StorePath),
+
+ % Return the result
+ ?event(disk, {change_store, complete}),
+ {ok, #{
+ <<"status">> => 200,
+ <<"message">> => <<"Node store updated to use encrypted disk.">>,
+ <<"store_path">> => StorePath,
+ <<"store">> => NewStore
+ }}.
+
+%%% Helper functions
+
+% Update the store configuration with a new base path
+-spec update_store_config(StoreConfig :: term(), NewPath :: binary()) -> term().
+update_store_config(StoreConfig, NewPath) when is_list(StoreConfig) ->
+ % For a list, update each element
+ [update_store_config(Item, NewPath) || Item <- StoreConfig];
+update_store_config(#{<<"store-module">> := Module} = StoreConfig, NewPath)
+ when is_map(StoreConfig) ->
+ % Handle various store module types differently
+ case Module of
+ hb_store_fs ->
+ % For filesystem store, replace prefix with the new path
+ StoreConfig#{<<"prefix">> => NewPath};
+ hb_store_rocksdb ->
+ % For RocksDB store, replace prefix with the new path
+ StoreConfig#{<<"prefix">> => NewPath};
+ hb_store_gateway ->
+ % For gateway store, recursively update nested store configurations
+ NestedStore = maps:get(<<"store">>, StoreConfig, []),
+ StoreConfig#{
+ <<"store">> => update_store_config(NestedStore, NewPath)
+ };
+ _ ->
+ % For any other store type, update the prefix
+ StoreConfig#{<<"prefix">> => NewPath}
+ end;
+update_store_config({Type, _OldPath, Opts}, NewPath) ->
+ % For tuple format with options
+ {Type, NewPath, Opts};
+update_store_config({Type, _OldPath}, NewPath) ->
+ % For tuple format without options
+ {Type, NewPath};
+update_store_config(StoreConfig, _NewPath) ->
+ % Return unchanged for any other format
+ StoreConfig.
+
+-doc """
+Check if a device exists on the system.
+@param Device The path to the device to check (binary).
+@returns true if the device exists, false otherwise.
+""".
+-spec check_for_device(Device :: binary()) -> boolean().
+check_for_device(Device) ->
+ Command = io_lib:format("ls -l ~s 2>/dev/null || echo 'not_found'", [binary_to_list(Device)]),
+ ?event(disk, {check_for_device, command, Command}),
+ Result = os:cmd(Command),
+ string:find(Result, "not_found") =:= nomatch.
\ No newline at end of file
diff --git a/src/html/hyperbuddy@1.0/graph.html b/src/html/hyperbuddy@1.0/graph.html
new file mode 100644
index 000000000..079054e58
--- /dev/null
+++ b/src/html/hyperbuddy@1.0/graph.html
@@ -0,0 +1,321 @@
+
+
+
+
+
+ HyperBEAM Cache Graph
+
+
+
+
+
+
+
+
+
+ HyperBEAM Cache Graph
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Nodes:
+ 0
+
+
+ Links:
+ 0
+
+
+
+ Simple
+
+
+
+ Composite
+
+
+
+
+
+
+
Loading graph data...
+
FPS: 0
+
+
+
+
Debug Info
+
Nodes: 0/0
+
Links: 0
+
Grid cells: 0
+
Objects in grid: 0
+
Avg per cell: 0
+
Camera Position:
+
X: 0
+
Y: 0
+
Z: 0
+
Press 1-3 to toggle visualizations
+
+
+
+
+
0 FPS
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/html/hyperbuddy@1.0/graph.js b/src/html/hyperbuddy@1.0/graph.js
new file mode 100644
index 000000000..2500e795c
--- /dev/null
+++ b/src/html/hyperbuddy@1.0/graph.js
@@ -0,0 +1,3751 @@
+/**
+ * HyperBEAM Cache Graph Renderer - Modular Version
+ * A 2D force-directed graph visualization for the HyperBEAM cache system
+ */
+
+/**
+ * Utility function to create a circular texture for node rendering
+ * @param {number} size - Size of the texture in pixels
+ * @param {number|string} color - Color of the circle (hex)
+ * @param {boolean} border - Whether to add a border
+ * @param {number|string} borderColor - Color of the border (hex)
+ * @returns {THREE.Texture} The generated texture
+ */
+function createCircleTexture(size = 64, color = 0xffffff, border = false, borderColor = 0x000000) {
+ // Create a canvas element
+ const canvas = document.createElement('canvas');
+ canvas.width = size;
+ canvas.height = size;
+ const context = canvas.getContext('2d');
+
+ // Clear canvas with transparent background
+ context.clearRect(0, 0, size, size);
+
+ // Convert color to string format if it's a number
+ const fillColor = typeof color === 'number' ? '#' + color.toString(16).padStart(6, '0') : color;
+ const strokeColor = typeof borderColor === 'number' ? '#' + borderColor.toString(16).padStart(6, '0') : borderColor;
+
+ // Draw a circle
+ const radius = size / 2 - 2;
+ context.beginPath();
+ context.arc(size / 2, size / 2, radius, 0, 2 * Math.PI, false);
+ context.fillStyle = fillColor;
+ context.fill();
+
+ // Add border if requested
+ if (border) {
+ context.lineWidth = 1;
+ context.strokeStyle = strokeColor;
+ context.stroke();
+ }
+
+ // Create a texture from the canvas
+ const texture = new THREE.CanvasTexture(canvas);
+ texture.needsUpdate = true;
+
+ return texture;
+}
+
+/**
+ * ThemeManager - Handles configuration and visual styling
+ */
+class ThemeManager {
+ constructor() {
+ this.config = {
+ // Node styling
+ nodeSize: {
+ simple: 6,
+ composite: 8
+ },
+ // Color scheme
+ colors: {
+ background: 0xf9f9f9,
+ simpleNode: 0x6495ED, // Light blue
+ compositeNode: 0xF08080, // Light coral
+ highlight: 0xFFA500, // Orange for highlighting
+ selectedNode: 0xFF5500, // Orange-red for selected node
+ neighborNode: 0x4CAF50, // Green for neighbor nodes
+ link: 0xcccccc, // Light gray for links
+ activeLink: 0x333333, // Dark gray for active links
+ hover: 0xfafa33 // Warm orange/yellow for hover
+ },
+ // Display options
+ showLabels: true,
+ physicsEnabled: true,
+ // Physics settings
+ defaultDistance: 150,
+ highConnectionThreshold: 10,
+ // Camera settings
+ zoomLevel: {
+ default: 1.0,
+ focused: 2.5
+ },
+ // Z-positions for layering
+ zPos: {
+ line: 0,
+ node: 5,
+ label: 10
+ }
+ };
+ }
+
+ /**
+ * Get the color for a node based on its type and state
+ * @param {string} nodeType - The type of node ('simple' or 'composite')
+ * @param {string} state - The state of the node ('default', 'selected', 'neighbor', 'hover')
+ * @returns {number} The color as a hex number
+ */
+ getNodeColor(nodeType, state = 'default') {
+ switch(state) {
+ case 'selected':
+ return this.config.colors.selectedNode;
+ case 'neighbor':
+ return this.config.colors.neighborNode;
+ case 'hover':
+ return this.config.colors.hover;
+ default:
+ return nodeType === 'simple' ?
+ this.config.colors.simpleNode :
+ this.config.colors.compositeNode;
+ }
+ }
+
+ /**
+ * Get the color for a link based on its state
+ * @param {string} state - The state of the link ('default' or 'active')
+ * @returns {number} The color as a hex number
+ */
+ getLinkColor(state = 'default') {
+ return state === 'active' ?
+ this.config.colors.activeLink :
+ this.config.colors.link;
+ }
+
+ /**
+ * Get the size for a node based on its type
+ * @param {string} nodeType - The type of node ('simple' or 'composite')
+ * @returns {number} The node size
+ */
+ getNodeSize(nodeType) {
+ return nodeType === 'simple' ?
+ this.config.nodeSize.simple :
+ this.config.nodeSize.composite;
+ }
+
+ /**
+ * Toggle label visibility
+ * @returns {boolean} The new label visibility state
+ */
+ toggleLabels() {
+ this.config.showLabels = !this.config.showLabels;
+ return this.config.showLabels;
+ }
+
+ /**
+ * Toggle physics simulation
+ * @returns {boolean} The new physics enabled state
+ */
+ togglePhysics() {
+ this.config.physicsEnabled = !this.config.physicsEnabled;
+ return this.config.physicsEnabled;
+ }
+}
+
+/**
+ * SceneManager - Handles Three.js scene, camera, and rendering
+ */
+class SceneManager {
+ constructor(container, themeManager) {
+ this.container = container;
+ this.themeManager = themeManager;
+
+ // Three.js components
+ this.scene = null;
+ this.camera = null;
+ this.renderer = null;
+ this.controls = null;
+ this.raycaster = new THREE.Raycaster();
+ this.mouse = new THREE.Vector2();
+
+ // Performance optimization
+ this.frustum = new THREE.Frustum();
+ this.projScreenMatrix = new THREE.Matrix4();
+ this.tmpVector = new THREE.Vector3();
+ this.enableFrustumCulling = true;
+ this.frustumCullingDistance = 1250; // Beyond this distance, apply visibility culling
+
+ // Initialize the scene
+ this.initScene();
+ }
+
+ /**
+ * Initialize the Three.js scene and renderer
+ */
+ initScene() {
+ const width = this.container.clientWidth;
+ const height = this.container.clientHeight;
+
+ // Create scene with background color
+ this.scene = new THREE.Scene();
+ this.scene.background = new THREE.Color(this.themeManager.config.colors.background);
+
+ // Create perspective camera with large clipping plane to prevent culling
+ const aspectRatio = width / height;
+ this.camera = new THREE.PerspectiveCamera(
+ 40, // Narrower field of view for less distortion
+ aspectRatio,
+ 0.1,
+ 15000 // Increased far clipping plane
+ );
+ this.camera.position.z = 1000;
+
+ // Create renderer
+ this.renderer = new THREE.WebGLRenderer({
+ antialias: true,
+ alpha: true
+ });
+ this.renderer.setSize(width, height);
+ this.renderer.setClearColor(this.themeManager.config.colors.background, 1);
+ this.renderer.sortObjects = true; // Enable sorting for proper z-ordering
+ this.container.appendChild(this.renderer.domElement);
+
+ // Configure raycaster for better point detection
+ this.raycaster = new THREE.Raycaster();
+ this.raycaster.params.Points.threshold = 10; // Increase threshold for easier point selection
+
+ // Add orbit controls limited to 2D movement with perspective camera
+ this.controls = new THREE.OrbitControls(this.camera, this.renderer.domElement);
+ this.controls.enableDamping = true;
+ this.controls.dampingFactor = 0.1;
+ this.controls.enableRotate = false; // Disable 3D rotation
+ this.controls.screenSpacePanning = true;
+
+ // Set zoom limits - constrain camera between 750 and 15000 on z-axis
+ this.controls.minDistance = 750;
+ this.controls.maxDistance = 15000;
+
+ // Handle window resize
+ window.addEventListener('resize', () => this.onWindowResize());
+ }
+
+ /**
+ * Handle window resize events
+ */
+ onWindowResize() {
+ const width = this.container.clientWidth;
+ const height = this.container.clientHeight;
+
+ // Update perspective camera aspect ratio
+ this.camera.aspect = width / height;
+ this.camera.updateProjectionMatrix();
+
+ // Update renderer
+ this.renderer.setSize(width, height);
+ }
+
+ /**
+ * Reset the camera view
+ */
+ resetView() {
+ // Reset camera position for perspective camera
+ this.camera.position.set(0, 0, 1000);
+ this.controls.target.set(0, 0, 0);
+ this.camera.updateProjectionMatrix();
+ this.controls.update();
+ }
+
+ /**
+ * Focus camera on a specific position with smooth animation
+ * @param {THREE.Vector3} position - The position to focus on
+ */
+ focusCamera(position) {
+ const duration = 500; // milliseconds
+ const startTime = Date.now();
+
+ // Save starting values
+ const startPosition = this.camera.position.clone();
+ const startTarget = this.controls.target.clone();
+
+ // Define a fixed Z-offset for viewing the target
+ const zOffset = 600;
+
+ // Animation function
+ const animateCamera = () => {
+ const elapsed = Date.now() - startTime;
+ const progress = Math.min(elapsed / duration, 1);
+
+ // Ease function - ease out cubic
+ const easeProgress = 1 - Math.pow(1 - progress, 3);
+
+ // Only update the target x and y position, keeping rotation consistent
+ this.controls.target.x = startTarget.x + (position.x - startTarget.x) * easeProgress;
+ this.controls.target.y = startTarget.y + (position.y - startTarget.y) * easeProgress;
+ // Keep z at the same value to maintain default camera angle
+
+ // Move camera x and y to match target
+ this.camera.position.x = startPosition.x + (position.x - startPosition.x) * easeProgress;
+ this.camera.position.y = startPosition.y + (position.y - startPosition.y) * easeProgress;
+
+ // Adjust Z with fixed offset
+ const targetZ = position.z + zOffset;
+ this.camera.position.z = startPosition.z + (targetZ - startPosition.z) * easeProgress;
+
+ this.camera.updateProjectionMatrix();
+ this.controls.update();
+
+ if (progress < 1) {
+ requestAnimationFrame(animateCamera);
+ }
+ };
+
+ animateCamera();
+ }
+
+ /**
+ * Update the mouse position for raycasting
+ * @param {MouseEvent} event - The mouse event
+ */
+ updateMousePosition(event) {
+ const rect = this.renderer.domElement.getBoundingClientRect();
+ this.mouse.x = ((event.clientX - rect.left) / rect.width) * 2 - 1;
+ this.mouse.y = -((event.clientY - rect.top) / rect.height) * 2 + 1;
+ }
+
+ /**
+ * Get objects intersecting with the current mouse position
+ * @returns {Array} Array of intersected objects
+ */
+ getIntersectedObjects() {
+ this.raycaster.setFromCamera(this.mouse, this.camera);
+ return this.raycaster.intersectObjects(this.scene.children, true);
+ }
+
+ /**
+ * Update the scene (called in animation loop)
+ */
+ update() {
+ // Update controls
+ if (this.controls) {
+ this.controls.update();
+
+ // Enforce camera z-position limits
+ if (this.camera.position.z < 750) {
+ this.camera.position.z = 750;
+ } else if (this.camera.position.z > 15000) {
+ this.camera.position.z = 15000;
+ }
+ }
+
+ // Apply frustum culling for distant objects
+ if (this.enableFrustumCulling) {
+ this.updateFrustumCulling();
+ }
+
+ // Render the scene
+ this.renderer.render(this.scene, this.camera);
+ }
+
+ /**
+ * Update frustum and apply visibility culling for better performance
+ */
+ updateFrustumCulling() {
+ // Update the frustum
+ this.projScreenMatrix.multiplyMatrices(
+ this.camera.projectionMatrix,
+ this.camera.matrixWorldInverse
+ );
+ this.frustum.setFromProjectionMatrix(this.projScreenMatrix);
+
+ // If we have a reference to the controller, access the dataManager
+ if (this.graphController && this.graphController.dataManager) {
+ const dataManager = this.graphController.dataManager;
+
+ // Process all nodes
+ dataManager.graphObjects.nodes.forEach(node => {
+ if (!node.object) return;
+
+ // Get distance from camera
+ this.tmpVector.copy(node.object.position);
+ const distance = this.tmpVector.distanceTo(this.camera.position);
+
+ // If the node is beyond our threshold, check if it's in the frustum
+ if (distance > this.frustumCullingDistance) {
+ // Check if the node is in the frustum
+ const isVisible = this.frustum.containsPoint(node.object.position);
+
+ // Only update visibility if necessary to avoid unnecessary matrix updates
+ if (node.object.visible !== isVisible) {
+ node.object.visible = isVisible;
+
+ // Also update label visibility if it exists
+ if (node.labelObject) {
+ node.labelObject.visible = isVisible && this.themeManager.config.showLabels;
+ }
+ }
+ } else if (!node.object.visible) {
+ // If node is within threshold distance but not visible, make visible
+ node.object.visible = true;
+ if (node.labelObject) {
+ node.labelObject.visible = this.themeManager.config.showLabels;
+ }
+ }
+ });
+
+ // Optional: Process links for better culling
+ // Only show links if both endpoints are visible
+ dataManager.graphObjects.links.forEach(link => {
+ if (!link.line) return;
+
+ const sourceNode = dataManager.graphObjects.nodes.get(link.sourceId);
+ const targetNode = dataManager.graphObjects.nodes.get(link.targetId);
+
+ if (sourceNode && targetNode && sourceNode.object && targetNode.object) {
+ const sourceDist = sourceNode.object.position.distanceTo(this.camera.position);
+ const targetDist = targetNode.object.position.distanceTo(this.camera.position);
+
+ // If both nodes are distant, check if they're visible
+ if (sourceDist > this.frustumCullingDistance && targetDist > this.frustumCullingDistance) {
+ const sourceVisible = sourceNode.object.visible;
+ const targetVisible = targetNode.object.visible;
+
+ // Only show link if both endpoints are visible
+ link.line.visible = sourceVisible && targetVisible;
+
+ // Update label visibility if needed
+ if (link.labelObject) {
+ link.labelObject.visible = sourceVisible && targetVisible &&
+ this.themeManager.config.showLabels &&
+ dataManager.activeLinks.has(`${link.sourceId}-${link.targetId}`);
+ }
+ } else if (!link.line.visible) {
+ // If at least one endpoint is close, show the link
+ link.line.visible = true;
+ }
+ }
+ });
+ }
+ }
+
+ /**
+ * Add an object to the scene
+ * @param {THREE.Object3D} object - The object to add
+ */
+ addToScene(object) {
+ this.scene.add(object);
+ }
+
+ /**
+ * Remove an object from the scene
+ * @param {THREE.Object3D} object - The object to remove
+ */
+ removeFromScene(object) {
+ this.scene.remove(object);
+ }
+}
+
+/**
+ * DataManager - Handles graph data loading and processing
+ */
+class DataManager {
+ constructor() {
+ // Graph data
+ this.graphData = { nodes: [], links: [] };
+ this.graphObjects = { nodes: new Map(), links: new Map() };
+
+ // State tracking
+ this.selectedNode = null;
+ this.neighborNodes = new Set();
+ this.activeLinks = new Set();
+ this.hoveredNode = null;
+ }
+
+ /**
+ * Load graph data from the server
+ * @returns {Promise} Promise that resolves when data is loaded
+ */
+ loadData() {
+ return new Promise((resolve, reject) => {
+ fetch('/~hyperbuddy@1.0/graph-data')
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error ${response.status}`);
+ }
+ return response.json();
+ })
+ .then(data => {
+ // Clear existing data
+ this.clearData();
+
+ // Validate data
+ if (!this.validateData(data)) {
+ reject(new Error('Invalid data format'));
+ return;
+ }
+
+ this.graphData = data;
+ resolve(data);
+ })
+ .catch(error => {
+ console.error('Error loading graph data:', error);
+ reject(error);
+ });
+ });
+ }
+
+ /**
+ * Validate the graph data structure
+ * @param {Object} data - The data to validate
+ * @returns {boolean} Whether the data is valid
+ */
+ validateData(data) {
+ // Check if we have valid data
+ if (!data || !data.nodes || !data.links ||
+ !Array.isArray(data.nodes) || !Array.isArray(data.links)) {
+ return false;
+ }
+
+ // Check if we have any nodes
+ if (data.nodes.length === 0) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Clear all graph data
+ */
+ clearData() {
+ this.graphData = { nodes: [], links: [] };
+ this.graphObjects.nodes.clear();
+ this.graphObjects.links.clear();
+
+ // Reset state
+ this.selectedNode = null;
+ this.hoveredNode = null;
+ this.neighborNodes.clear();
+ this.activeLinks.clear();
+ }
+
+ /**
+ * Determine node type based on ID pattern
+ * @param {string} nodeId - The node ID
+ * @returns {string} The node type ('simple' or 'composite')
+ */
+ determineNodeType(nodeId) {
+ const pathParts = nodeId.split('/').filter(p => p.length > 0);
+ return (pathParts.length <= 1 && !nodeId.endsWith('/')) ? 'simple' : 'composite';
+ }
+
+ /**
+ * Search for nodes matching a term
+ * @param {string} searchTerm - The term to search for
+ * @returns {Array} Array of matching node IDs
+ */
+ searchNodes(searchTerm) {
+ if (!searchTerm) return [];
+
+ const searchLower = searchTerm.toLowerCase();
+
+ // Find matching nodes
+ return this.graphData.nodes
+ .filter(node =>
+ (node.id && node.id.toLowerCase().includes(searchLower)) ||
+ (node.label && node.label.toLowerCase().includes(searchLower))
+ )
+ .map(node => node.id);
+ }
+
+ /**
+ * Get nodes connected to a starting node up to a specified depth
+ * @param {string} startNodeId - The ID of the starting node
+ * @param {number} maxDepth - Maximum depth/distance to traverse
+ * @returns {Object} Object containing connected nodes and links
+ */
+ getConnectedSubgraph(startNodeId, maxDepth = 1) {
+ const connectedNodes = new Map();
+ const connectedLinks = new Set();
+ const queue = [{id: startNodeId, depth: 0}];
+ const visited = new Set([startNodeId]);
+
+ // First make sure we have the start node
+ const startNode = this.graphData.nodes.find(n => n.id === startNodeId);
+ if (!startNode) return {nodes: [], links: []};
+
+ connectedNodes.set(startNodeId, startNode);
+
+ // BFS to find connected nodes up to maxDepth
+ while (queue.length > 0) {
+ const {id, depth} = queue.shift();
+
+ if (depth >= maxDepth) continue;
+
+ // Find all links connected to this node
+ this.graphData.links.forEach(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+
+ if (sourceId === id || targetId === id) {
+ const linkId = `${sourceId}-${targetId}`;
+
+ // If we've already processed this link, skip it
+ if (connectedLinks.has(linkId)) return;
+
+ connectedLinks.add(linkId);
+
+ // Get the ID of the node on the other end of the link
+ const otherId = sourceId === id ? targetId : sourceId;
+
+ // If we haven't visited this node yet, add it to the queue
+ if (!visited.has(otherId)) {
+ visited.add(otherId);
+ const otherNode = this.graphData.nodes.find(n => n.id === otherId);
+ if (otherNode) {
+ connectedNodes.set(otherId, otherNode);
+ queue.push({id: otherId, depth: depth + 1});
+ }
+ }
+ }
+ });
+ }
+
+ return {
+ nodes: Array.from(connectedNodes.values()),
+ links: this.graphData.links.filter(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+ return connectedNodes.has(sourceId) && connectedNodes.has(targetId);
+ })
+ };
+ }
+
+ /**
+ * Store a node object reference
+ * @param {string} nodeId - The node ID
+ * @param {Object} nodeData - The node data
+ */
+ storeNodeObject(nodeId, nodeData) {
+ this.graphObjects.nodes.set(nodeId, nodeData);
+ }
+
+ /**
+ * Store a link object reference
+ * @param {string} linkId - The link ID (format: "sourceId-targetId")
+ * @param {Object} linkData - The link data
+ */
+ storeLinkObject(linkId, linkData) {
+ this.graphObjects.links.set(linkId, linkData);
+ }
+
+ /**
+ * Get links connected to a node
+ * @param {string} nodeId - The node ID
+ * @returns {Array} Array of connected links
+ */
+ getConnectedLinks(nodeId) {
+ return this.graphData.links.filter(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+ return sourceId === nodeId || targetId === nodeId;
+ });
+ }
+
+ /**
+ * Track selected node
+ * @param {string} nodeId - The selected node ID
+ */
+ setSelectedNode(nodeId) {
+ this.selectedNode = nodeId;
+
+ // Find and track connected nodes and links
+ if (nodeId) {
+ // Clear previous
+ this.neighborNodes.clear();
+ this.activeLinks.clear();
+
+ // Find connected links
+ this.graphData.links.forEach(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+
+ if (sourceId === nodeId || targetId === nodeId) {
+ // This is a connected link
+ const otherNodeId = sourceId === nodeId ? targetId : sourceId;
+ this.neighborNodes.add(otherNodeId);
+
+ // Track active link
+ const linkKey = `${sourceId}-${targetId}`;
+ this.activeLinks.add(linkKey);
+ }
+ });
+ }
+ }
+
+ /**
+ * Clear selected node
+ */
+ clearSelectedNode() {
+ this.selectedNode = null;
+ this.neighborNodes.clear();
+ this.activeLinks.clear();
+ }
+
+ /**
+ * Track hovered node
+ * @param {string} nodeId - The hovered node ID
+ */
+ setHoveredNode(nodeId) {
+ this.hoveredNode = nodeId;
+ }
+
+ /**
+ * Clear hovered node
+ */
+ clearHoveredNode() {
+ this.hoveredNode = null;
+ }
+}
+
+/**
+ * GraphObjectManager - Creates and manages visual objects for nodes and links
+ */
+class GraphObjectManager {
+ constructor(sceneManager, dataManager, themeManager) {
+ this.sceneManager = sceneManager;
+ this.dataManager = dataManager;
+ this.themeManager = themeManager;
+ }
+
+ /**
+ * Create a visual node object
+ * @param {Object} node - The node data
+ * @returns {Object} The created node object with visual elements
+ */
+ createNodeObject(node) {
+ // Determine node type if not set
+ if (!node.type) {
+ node.type = this.dataManager.determineNodeType(node.id);
+ }
+
+ // Add to NodeCloud for efficient rendering
+ if (this.graphController && this.graphController.nodeCloud) {
+ // Add to node cloud
+ const nodeIndex = this.graphController.nodeCloud.addNode(node);
+
+ // Create a virtual object for compatibility
+ // This is needed because other code expects a THREE.Object3D
+ const virtualObject = {
+ position: new THREE.Vector3(node.x || 0, node.y || 0, this.themeManager.config.zPos.node),
+ visible: true,
+ userData: { id: node.id, type: node.type, label: node.label }
+ };
+
+ // Store virtual object reference
+ node.object = virtualObject;
+ node.nodeCloudIndex = nodeIndex;
+ }
+
+ // Create label if enabled
+ let labelObject = null;
+ if (this.themeManager.config.showLabels) {
+ labelObject = this.createLabel(node);
+ }
+
+ // Store label reference
+ node.labelObject = labelObject;
+
+ // Store in dataManager
+ this.dataManager.storeNodeObject(node.id, node);
+
+ // Add to spatial grid if simulation manager is available
+ if (this.graphController && this.graphController.simulationManager) {
+ this.graphController.simulationManager.addNodeToSpatialGrid(node);
+ }
+
+ return node;
+ }
+
+ /**
+ * Create a visual link object
+ * @param {Object} link - The link data
+ * @returns {Object} The created link object with visual elements
+ */
+ createLinkObject(link) {
+ // Get node IDs
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+
+ // Get node objects
+ const sourceNode = this.dataManager.graphObjects.nodes.get(sourceId);
+ const targetNode = this.dataManager.graphObjects.nodes.get(targetId);
+
+ if (!sourceNode || !targetNode) {
+ return null;
+ }
+
+ // Create line geometry
+ const points = [
+ new THREE.Vector3(sourceNode.x, sourceNode.y, this.themeManager.config.zPos.line),
+ new THREE.Vector3(targetNode.x, targetNode.y, this.themeManager.config.zPos.line)
+ ];
+
+ // Create material and line
+ const material = new THREE.LineDashedMaterial({
+ color: this.themeManager.getLinkColor(),
+ dashSize: 2.5,
+ gapSize: 1.5,
+ transparent: true,
+ opacity: 0.6,
+ linewidth: 1,
+ depthWrite: false
+ });
+
+ const geometry = new THREE.BufferGeometry().setFromPoints(points);
+ const line = new THREE.Line(geometry, material);
+
+ // Important: Disable frustum culling to ensure lines are always visible
+ line.frustumCulled = false;
+ line.renderOrder = 0; // Ensure lines render before nodes
+
+ this.sceneManager.addToScene(line);
+
+ // Store connection info
+ link.sourceId = sourceId;
+ link.targetId = targetId;
+ link.line = line;
+
+ // Create label if needed
+ let labelObject = null;
+ if (this.themeManager.config.showLabels && link.label) {
+ labelObject = this.createLinkLabel(link, sourceNode, targetNode);
+ }
+
+ link.labelObject = labelObject;
+
+ // Store reference
+ const linkId = `${sourceId}-${targetId}`;
+ this.dataManager.storeLinkObject(linkId, link);
+
+ return link;
+ }
+
+ /**
+ * Truncate text and add ellipsis in the middle
+ * @param {string} text - The text to truncate
+ * @returns {string} Truncated text with ellipsis
+ */
+ truncateWithEllipsis(text) {
+ // Show only if longer than 15 characters (6 + 3 + 6)
+ if (!text || text.length <= 15) {
+ return text;
+ }
+
+ // Take exactly 6 chars from start and 6 from end
+ return text.substring(0, 6) + '...' + text.substring(text.length - 6);
+ }
+
+ /**
+ * Create a label for a node
+ * @param {Object} node - The node data
+ * @returns {Object} The created label object
+ */
+ createLabel(node) {
+ // Get display text and truncate it if needed
+ const displayText = this.truncateWithEllipsis(node.label || node.id);
+ const label = new SpriteText(displayText);
+
+ // Improved text rendering settings
+ label.fontFace = 'Arial, Helvetica, sans-serif';
+ label.fontSize = 32;
+ label.fontWeight = '600';
+ label.strokeWidth = 0; // No stroke for sharper text
+ label.color = '#000000';
+ label.backgroundColor = 'rgba(255,255,255,0.95)';
+ label.padding = 3;
+ label.textHeight = 5; // Increased for better resolution with larger text
+ label.borderWidth = 0; // No border for sharper edges
+
+ // Position above node with pixel-perfect positioning
+ const offset_val = 100;
+ const isSimple = node.type === 'simple';
+ const offset = isSimple ?
+ this.themeManager.config.nodeSize.simple + offset_val :
+ this.themeManager.config.nodeSize.composite + offset_val;
+
+ // Round to whole pixels to avoid subpixel rendering
+ const x = Math.round(node.x || 0);
+ const y = Math.round((node.y || 0) + offset);
+ const z = this.themeManager.config.zPos.label;
+
+ label.position.set(x, y, z);
+ label.renderOrder = 20;
+
+ this.sceneManager.addToScene(label);
+
+ return label;
+ }
+
+ /**
+ * Create a label for a link
+ * @param {Object} link - The link data
+ * @param {Object} sourceNode - The source node
+ * @param {Object} targetNode - The target node
+ * @returns {Object} The created label object
+ */
+ createLinkLabel(link, sourceNode, targetNode) {
+ const midPoint = new THREE.Vector3(
+ (sourceNode.x + targetNode.x) / 2,
+ (sourceNode.y + targetNode.y) / 2,
+ this.themeManager.config.zPos.label
+ );
+
+ let label_text = link.label;
+ if (link.label.length == 43) {
+ label_text = this.truncateWithEllipsis(link.label);
+ }
+
+ const label = new SpriteText(label_text);
+
+ // Improved text rendering settings
+ label.fontFace = 'Arial, Helvetica, sans-serif';
+ label.fontSize = 32;
+ label.fontWeight = '600';
+ label.strokeWidth = 0; // No stroke for sharper text
+ label.color = '#000000';
+ label.backgroundColor = 'rgba(255,255,255,0.95)';
+ label.padding = 3;
+ label.textHeight = 4; // Better resolution for link labels
+ label.borderWidth = 0; // No border for sharper edges
+
+ // Round to whole pixels to avoid subpixel rendering
+ midPoint.x = Math.round(midPoint.x);
+ midPoint.y = Math.round(midPoint.y);
+
+ label.position.copy(midPoint);
+ label.renderOrder = 20;
+
+ // Hide link labels by default - only show when node is selected
+ label.visible = false;
+
+ this.sceneManager.addToScene(label);
+
+ return label;
+ }
+
+ /**
+ * Update the position of a node object
+ * @param {Object} node - The node object to update
+ */
+ updateNodePosition(node) {
+ if (!node) return;
+
+ if (this.graphController && this.graphController.nodeCloud) {
+ // Update position in the NodeCloud
+ this.graphController.nodeCloud.updateNodePosition(node.id, node.x || 0, node.y || 0);
+
+ // Also update the virtual object for compatibility
+ if (node.object && node.object.position) {
+ node.object.position.x = node.x || 0;
+ node.object.position.y = node.y || 0;
+ }
+ }
+
+ // Update label position if it exists
+ if (node.labelObject) {
+ const offset_val = 6; // Same value as in createLabel
+ const isSimple = node.type === 'simple';
+ const offset = isSimple ?
+ this.themeManager.config.nodeSize.simple + offset_val :
+ this.themeManager.config.nodeSize.composite + offset_val;
+
+ node.labelObject.position.x = node.x || 0;
+ node.labelObject.position.y = (node.y || 0) + offset;
+ }
+ }
+
+ /**
+ * Update the position of a link object
+ * @param {Object} link - The link object to update
+ */
+ updateLinkPosition(link) {
+ if (!link.line) return;
+
+ const sourceId = link.sourceId || (typeof link.source === 'object' ? link.source.id : link.source);
+ const targetId = link.targetId || (typeof link.target === 'object' ? link.target.id : link.target);
+
+ const sourceNode = this.dataManager.graphObjects.nodes.get(sourceId);
+ const targetNode = this.dataManager.graphObjects.nodes.get(targetId);
+
+ if (sourceNode && targetNode) {
+ // Update the link line geometry
+ const points = [
+ new THREE.Vector3(sourceNode.x || 0, sourceNode.y || 0, this.themeManager.config.zPos.line),
+ new THREE.Vector3(targetNode.x || 0, targetNode.y || 0, this.themeManager.config.zPos.line)
+ ];
+
+ // Update the line geometry
+ link.line.geometry.setFromPoints(points);
+
+ // Ensure frustum culling remains disabled after updates
+ link.line.frustumCulled = false;
+
+ // Update the link label position if it exists
+ if (link.labelObject) {
+ const midPoint = new THREE.Vector3(
+ (sourceNode.x + targetNode.x) / 2,
+ (sourceNode.y + targetNode.y) / 2,
+ this.themeManager.config.zPos.label
+ );
+ link.labelObject.position.copy(midPoint);
+ }
+ }
+ }
+
+ /**
+ * Update node colors based on selection and hover state
+ * @param {string} nodeId - The ID of the node to update
+ */
+ updateNodeColors(nodeId) {
+ const node = this.dataManager.graphObjects.nodes.get(nodeId);
+ if (!node) return;
+
+ let state = 'default';
+
+ // Determine state based on selection and hover
+ if (nodeId === this.dataManager.selectedNode) {
+ state = 'selected';
+ } else if (this.dataManager.neighborNodes.has(nodeId)) {
+ state = 'neighbor';
+ } else if (nodeId === this.dataManager.hoveredNode) {
+ state = 'hover';
+ }
+
+ if (this.graphController && this.graphController.nodeCloud) {
+ // Update the color in the node cloud
+ this.graphController.nodeCloud.updateNodeColor(nodeId, node.type, state);
+ }
+ }
+
+ /**
+ * Update link colors based on selection state
+ * @param {string} linkId - The ID of the link to update (format: "sourceId-targetId")
+ */
+ updateLinkColors(linkId) {
+ const link = this.dataManager.graphObjects.links.get(linkId);
+ if (!link || !link.line) return;
+
+ // Determine if this is an active link
+ const isActive = this.dataManager.activeLinks.has(linkId);
+
+ // Set color and opacity based on active state
+ link.line.material.color.set(this.themeManager.getLinkColor(isActive ? 'active' : 'default'));
+ link.line.material.opacity = isActive ? 1.0 : 0.6;
+ }
+
+ /**
+ * Remove a node and its visual elements from the scene
+ * @param {string} nodeId - The ID of the node to remove
+ */
+ removeNode(nodeId) {
+ const node = this.dataManager.graphObjects.nodes.get(nodeId);
+ if (!node) return;
+
+ // Remove from NodeCloud if available
+ if (this.graphController && this.graphController.nodeCloud) {
+ this.graphController.nodeCloud.removeNode(nodeId);
+ }
+
+ // Remove label from scene
+ if (node.labelObject) {
+ this.sceneManager.removeFromScene(node.labelObject);
+ }
+
+ // Remove from data manager
+ this.dataManager.graphObjects.nodes.delete(nodeId);
+ }
+
+ /**
+ * Remove a link and its visual elements from the scene
+ * @param {string} linkId - The ID of the link to remove
+ */
+ removeLink(linkId) {
+ const link = this.dataManager.graphObjects.links.get(linkId);
+ if (!link) return;
+
+ // Remove line from scene
+ if (link.line) {
+ this.sceneManager.removeFromScene(link.line);
+ }
+
+ // Remove label from scene
+ if (link.labelObject) {
+ this.sceneManager.removeFromScene(link.labelObject);
+ }
+
+ // Remove from data manager
+ this.dataManager.graphObjects.links.delete(linkId);
+ }
+
+ /**
+ * Clear all visible nodes and links from the scene
+ */
+ clearVisibleObjects() {
+ // Clear NodeCloud if available
+ if (this.graphController && this.graphController.nodeCloud) {
+ this.graphController.nodeCloud.clear();
+ }
+
+ // Remove label objects from the scene
+ this.dataManager.graphObjects.nodes.forEach((node, id) => {
+ if (node.labelObject) {
+ this.sceneManager.removeFromScene(node.labelObject);
+ }
+ });
+
+ this.dataManager.graphObjects.links.forEach((link, id) => {
+ if (link.line) this.sceneManager.removeFromScene(link.line);
+ if (link.labelObject) this.sceneManager.removeFromScene(link.labelObject);
+ });
+
+ // Clear references in data manager
+ this.dataManager.graphObjects.nodes.clear();
+ this.dataManager.graphObjects.links.clear();
+ }
+
+ /**
+ * Toggle visibility of all labels
+ * @returns {boolean} The new label visibility state
+ */
+ toggleLabels() {
+ const showLabels = this.themeManager.toggleLabels();
+
+ // Update node labels
+ this.dataManager.graphObjects.nodes.forEach((node, id) => {
+ if (node.labelObject) {
+ node.labelObject.visible = showLabels;
+ } else if (showLabels) {
+ // Create label if it doesn't exist yet
+ node.labelObject = this.createLabel(node);
+ }
+ });
+
+ // Update link labels - only show for active links connected to selected node
+ this.dataManager.graphObjects.links.forEach((link, id) => {
+ if (link.labelObject) {
+ // Only show if labels are enabled AND this is an active link
+ const isActive = this.dataManager.activeLinks.has(id);
+ link.labelObject.visible = showLabels && isActive;
+ } else if (showLabels && link.label) {
+ // Create label if it doesn't exist yet
+ const sourceNode = this.dataManager.graphObjects.nodes.get(link.sourceId);
+ const targetNode = this.dataManager.graphObjects.nodes.get(link.targetId);
+
+ if (sourceNode && targetNode) {
+ link.labelObject = this.createLinkLabel(link, sourceNode, targetNode);
+ // Only show if this is an active link
+ link.labelObject.visible = this.dataManager.activeLinks.has(id);
+ }
+ }
+ });
+
+ return showLabels;
+ }
+}
+
+/**
+ * SpatialGrid - Simple spatial partitioning for efficient queries
+ */
+class SpatialGrid {
+ constructor(cellSize = 200) {
+ this.cellSize = cellSize;
+ this.grid = new Map();
+ this.objects = new Set();
+ }
+
+ /**
+ * Get the cell key for a position
+ * @param {THREE.Vector3} position - The position to get the cell for
+ * @returns {string} The cell key
+ */
+ getCellKey(position) {
+ const x = Math.floor(position.x / this.cellSize);
+ const y = Math.floor(position.y / this.cellSize);
+ const z = Math.floor(position.z / this.cellSize);
+ return `${x},${y},${z}`;
+ }
+
+ /**
+ * Add an object to the grid
+ * @param {Object} object - The object to add
+ */
+ addObject(object) {
+ if (!object.object || !object.object.position) return;
+
+ const position = object.object.position;
+ const cellKey = this.getCellKey(position);
+
+ // Create cell if it doesn't exist
+ if (!this.grid.has(cellKey)) {
+ this.grid.set(cellKey, new Set());
+ }
+
+ // Add to cell
+ this.grid.get(cellKey).add(object);
+ this.objects.add(object);
+ }
+
+ /**
+ * Remove an object from the grid
+ * @param {Object} object - The object to remove
+ */
+ removeObject(object) {
+ if (!object.object || !object.object.position) return;
+
+ // Remove from all cells (in case it moved)
+ this.grid.forEach(cell => {
+ cell.delete(object);
+ });
+
+ this.objects.delete(object);
+
+ // Clean up empty cells
+ this.grid.forEach((cell, key) => {
+ if (cell.size === 0) {
+ this.grid.delete(key);
+ }
+ });
+ }
+
+ /**
+ * Find objects within a radius of a position
+ * @param {THREE.Vector3} position - The center position
+ * @param {number} radius - The radius to search within
+ * @returns {Array} Array of objects within the radius
+ */
+ findNearbyObjects(position, radius) {
+ // Calculate the cell range to check
+ const cellRadius = Math.ceil(radius / this.cellSize);
+ const centerX = Math.floor(position.x / this.cellSize);
+ const centerY = Math.floor(position.y / this.cellSize);
+ const centerZ = Math.floor(position.z / this.cellSize);
+
+ const result = [];
+ const radiusSquared = radius * radius;
+
+ // Check each cell in the range
+ for (let x = centerX - cellRadius; x <= centerX + cellRadius; x++) {
+ for (let y = centerY - cellRadius; y <= centerY + cellRadius; y++) {
+ for (let z = centerZ - cellRadius; z <= centerZ + cellRadius; z++) {
+ const cellKey = `${x},${y},${z}`;
+ const cell = this.grid.get(cellKey);
+
+ if (cell) {
+ // Check each object in the cell
+ cell.forEach(object => {
+ if (object.object && object.object.position) {
+ const distSquared = position.distanceToSquared(object.object.position);
+ if (distSquared <= radiusSquared) {
+ result.push(object);
+ }
+ }
+ });
+ }
+ }
+ }
+ }
+
+ return result;
+ }
+
+ /**
+ * Clear all objects from the grid
+ */
+ clear() {
+ this.grid.clear();
+ this.objects.clear();
+ }
+
+ /**
+ * Get the total number of objects in the grid
+ * @returns {number} The number of objects
+ */
+ size() {
+ return this.objects.size;
+ }
+}
+
+/**
+ * NodeCloud - Manages efficient point cloud rendering for graph nodes
+ */
+class NodeCloud {
+ constructor(scene, themeManager) {
+ this.scene = scene;
+ this.sceneManager = null; // Will be set by the EventManager
+ this.themeManager = themeManager;
+
+ // Capacity tracking
+ this.maxNodes = 1000; // Initial capacity
+ this.nodeCount = 0;
+
+ // Node tracking
+ this.nodeIndices = new Map(); // Maps node IDs to their index in the arrays
+ this.nodeTypes = new Map(); // Maps node IDs to their types
+ this.positions = null;
+ this.colors = null;
+ this.sizes = null;
+
+ // Selection tracking
+ this.selectedIndices = new Set();
+ this.neighborIndices = new Set();
+ this.hoverIndex = -1;
+
+ // Create base texture for all nodes
+ this.baseTexture = createCircleTexture(64, 0xffffff);
+
+ // Initialize geometry and point cloud
+ this.initialize();
+ }
+
+ /**
+ * Initialize buffers and point cloud with initial capacity
+ */
+ initialize() {
+ // Create buffer attributes with initial capacity
+ this.positions = new Float32Array(this.maxNodes * 3);
+ this.colors = new Float32Array(this.maxNodes * 3);
+ this.sizes = new Float32Array(this.maxNodes);
+
+ // Create buffer geometry
+ this.geometry = new THREE.BufferGeometry();
+ this.geometry.setAttribute('position', new THREE.BufferAttribute(this.positions, 3));
+ this.geometry.setAttribute('color', new THREE.BufferAttribute(this.colors, 3));
+ this.geometry.setAttribute('size', new THREE.BufferAttribute(this.sizes, 1));
+
+ // Set draw range to only render active nodes
+ this.geometry.setDrawRange(0, 0);
+
+ // Create point material
+ this.material = new THREE.ShaderMaterial({
+ uniforms: {
+ pointTexture: { value: this.baseTexture }
+ },
+ vertexShader: `
+ precision highp float;
+ attribute float size;
+ attribute vec3 color;
+ varying vec3 vColor;
+
+ void main() {
+ vColor = color;
+ vec4 mvPosition = modelViewMatrix * vec4(position, 1.0);
+ gl_PointSize = size * (1200.0 / -mvPosition.z);
+ gl_Position = projectionMatrix * mvPosition;
+ }
+ `,
+ fragmentShader: `
+ precision highp float;
+ uniform sampler2D pointTexture;
+ varying vec3 vColor;
+
+ void main() {
+ vec4 texColor = texture2D(pointTexture, gl_PointCoord);
+ if (texColor.a < 0.5) discard;
+ gl_FragColor = vec4(vColor, 1.0) * texColor;
+ }
+ `,
+ transparent: true,
+ depthWrite: false,
+ blending: THREE.NormalBlending
+ });
+
+ // Create points
+ this.points = new THREE.Points(this.geometry, this.material);
+ this.points.frustumCulled = false; // Disable frustum culling
+ this.points.renderOrder = 10;
+
+ // Add to scene
+ this.scene.add(this.points);
+ }
+
+ /**
+ * Resize buffers if needed
+ */
+ ensureCapacity(requiredNodes) {
+ if (requiredNodes <= this.maxNodes) return;
+
+ // Calculate new capacity (1.5x required or 2x current, whichever is larger)
+ const newCapacity = Math.max(Math.ceil(requiredNodes * 1.5), this.maxNodes * 2);
+
+ // Create new arrays
+ const newPositions = new Float32Array(newCapacity * 3);
+ const newColors = new Float32Array(newCapacity * 3);
+ const newSizes = new Float32Array(newCapacity);
+
+ // Copy existing data
+ newPositions.set(this.positions);
+ newColors.set(this.colors);
+ newSizes.set(this.sizes);
+
+ // Update references
+ this.positions = newPositions;
+ this.colors = newColors;
+ this.sizes = newSizes;
+ this.maxNodes = newCapacity;
+
+ // Update buffer attributes
+ this.geometry.setAttribute('position', new THREE.BufferAttribute(this.positions, 3));
+ this.geometry.setAttribute('color', new THREE.BufferAttribute(this.colors, 3));
+ this.geometry.setAttribute('size', new THREE.BufferAttribute(this.sizes, 1));
+ }
+
+ /**
+ * Add a node to the point cloud
+ * @param {Object} node - Node data with position and type
+ * @returns {number} Index of the node in the point cloud
+ */
+ addNode(node) {
+ // Get node ID
+ const nodeId = node.id;
+
+ // Check if this node is already in the cloud
+ if (this.nodeIndices.has(nodeId)) {
+ return this.nodeIndices.get(nodeId);
+ }
+
+ // Ensure we have enough capacity
+ this.ensureCapacity(this.nodeCount + 1);
+
+ // Add to the end of the arrays
+ const index = this.nodeCount;
+ const i3 = index * 3;
+
+ // Set position
+ this.positions[i3] = node.x || 0;
+ this.positions[i3 + 1] = node.y || 0;
+ this.positions[i3 + 2] = this.themeManager.config.zPos.node;
+
+ // Store node type for later reference
+ this.nodeTypes.set(nodeId, node.type || 'simple');
+
+ // Set color based on node type
+ const color = new THREE.Color(this.themeManager.getNodeColor(node.type));
+ this.colors[i3] = color.r;
+ this.colors[i3 + 1] = color.g;
+ this.colors[i3 + 2] = color.b;
+
+ // Set size based on node type - use larger sizes to make selection easier
+ const baseSize = this.themeManager.getNodeSize(node.type);
+ this.sizes[index] = baseSize * 4; // Increase size to improve interaction
+
+ // Track this node
+ this.nodeIndices.set(nodeId, index);
+ this.nodeCount++;
+
+ // Update draw range
+ this.geometry.setDrawRange(0, this.nodeCount);
+
+ // Mark attributes as needing update
+ this.geometry.attributes.position.needsUpdate = true;
+ this.geometry.attributes.color.needsUpdate = true;
+ this.geometry.attributes.size.needsUpdate = true;
+
+ return index;
+ }
+
+ /**
+ * Update a node's position
+ * @param {string} nodeId - ID of the node to update
+ * @param {number} x - New X position
+ * @param {number} y - New Y position
+ */
+ updateNodePosition(nodeId, x, y) {
+ if (!this.nodeIndices.has(nodeId)) return;
+
+ const index = this.nodeIndices.get(nodeId);
+ const i3 = index * 3;
+
+ this.positions[i3] = x;
+ this.positions[i3 + 1] = y;
+
+ // Mark position attribute as needing update
+ this.geometry.attributes.position.needsUpdate = true;
+ }
+
+ /**
+ * Update a node's color based on state
+ * @param {string} nodeId - ID of the node to update
+ * @param {string} nodeType - Type of the node
+ * @param {string} state - State of the node (default, selected, neighbor, hover)
+ */
+ updateNodeColor(nodeId, nodeType, state = 'default') {
+ if (!this.nodeIndices.has(nodeId)) return;
+
+ // Store node type if provided
+ if (nodeType) {
+ this.nodeTypes.set(nodeId, nodeType);
+ } else {
+ // Use stored type if available
+ nodeType = this.nodeTypes.get(nodeId) || 'simple';
+ }
+
+ const index = this.nodeIndices.get(nodeId);
+ const i3 = index * 3;
+
+ // Get color for this node state
+ const color = new THREE.Color(this.themeManager.getNodeColor(nodeType, state));
+
+ // Update color in buffer
+ this.colors[i3] = color.r;
+ this.colors[i3 + 1] = color.g;
+ this.colors[i3 + 2] = color.b;
+
+ // Mark colors attribute as needing update
+ this.geometry.attributes.color.needsUpdate = true;
+ }
+
+ /**
+ * Update colors for all nodes based on selection state
+ * @param {string} selectedId - ID of the selected node
+ * @param {Set} neighborIds - Set of neighbor node IDs
+ * @param {string} hoveredId - ID of the hovered node
+ */
+ updateColors(selectedId, neighborIds, hoveredId) {
+ // Reset tracking sets
+ this.selectedIndices.clear();
+ this.neighborIndices.clear();
+ this.hoverIndex = -1;
+
+ // Track indices for faster updates
+ if (selectedId && this.nodeIndices.has(selectedId)) {
+ this.selectedIndices.add(this.nodeIndices.get(selectedId));
+ }
+
+ neighborIds.forEach(id => {
+ if (this.nodeIndices.has(id)) {
+ this.neighborIndices.add(this.nodeIndices.get(id));
+ }
+ });
+
+ if (hoveredId && this.nodeIndices.has(hoveredId)) {
+ this.hoverIndex = this.nodeIndices.get(hoveredId);
+ }
+
+ // Update all node colors based on state
+ for (const [nodeId, index] of this.nodeIndices.entries()) {
+ const i3 = index * 3;
+ let state = 'default';
+
+ // Determine state based on selection and hover
+ if (index === this.hoverIndex) {
+ state = 'hover';
+ } else if (this.selectedIndices.has(index)) {
+ state = 'selected';
+ } else if (this.neighborIndices.has(index)) {
+ state = 'neighbor';
+ }
+
+ // Get node type from our stored map
+ const nodeType = this.nodeTypes.get(nodeId) || 'simple';
+
+ // Get color for this state
+ const color = new THREE.Color(this.themeManager.getNodeColor(nodeType, state));
+
+ // Update color
+ this.colors[i3] = color.r;
+ this.colors[i3 + 1] = color.g;
+ this.colors[i3 + 2] = color.b;
+ }
+
+ // Mark attributes as needing update
+ this.geometry.attributes.color.needsUpdate = true;
+ }
+
+ /**
+ * Find the closest node to a mouse position
+ * @param {THREE.Raycaster} raycaster - The raycaster
+ * @param {number} threshold - Maximum distance to consider a hit (in screen space)
+ * @returns {string|null} ID of the closest node or null if none found
+ */
+ findClosestNode(raycaster, threshold = 0.05) {
+ if (this.nodeCount === 0 || !this.positions) return null;
+
+ // Get the camera from the scene manager or from the global controller
+ let camera = null;
+ let mousePosition = null;
+
+ if (this.sceneManager) {
+ camera = this.sceneManager.camera;
+ mousePosition = this.sceneManager.mouse; // This is the normalized mouse position
+ } else if (window.lastGraphController && window.lastGraphController.sceneManager) {
+ camera = window.lastGraphController.sceneManager.camera;
+ mousePosition = window.lastGraphController.sceneManager.mouse;
+ }
+
+ // If we can't get a camera or mouse position, we can't continue
+ if (!camera || !mousePosition) return null;
+
+ // Find the closest node based on screen space distance
+ let closestDistance = Infinity;
+ let closestNodeId = null;
+
+ // Check each node
+ for (const [nodeId, index] of this.nodeIndices.entries()) {
+ const i3 = index * 3;
+
+ // Get node position
+ const nodePos = new THREE.Vector3(
+ this.positions[i3],
+ this.positions[i3 + 1],
+ this.positions[i3 + 2]
+ );
+
+ // Project to screen space
+ const screenPos = nodePos.clone().project(camera);
+
+ // Calculate 2D distance in screen space between mouse and node
+ const dx = screenPos.x - mousePosition.x;
+ const dy = screenPos.y - mousePosition.y;
+ const distance = Math.sqrt(dx * dx + dy * dy);
+
+ // Get node size and adjust threshold based on size
+ const nodeSize = this.sizes[index];
+ const adjustedThreshold = threshold * (1 + (nodeSize / 10));
+
+ // If this node is closer than the current closest and within threshold, update
+ if (distance < closestDistance && distance < adjustedThreshold) {
+ closestDistance = distance;
+ closestNodeId = nodeId;
+ }
+ }
+
+ return closestNodeId;
+ }
+
+ /**
+ * Remove a node from the point cloud
+ * @param {string} nodeId - ID of the node to remove
+ */
+ removeNode(nodeId) {
+ if (!this.nodeIndices.has(nodeId)) return;
+
+ const indexToRemove = this.nodeIndices.get(nodeId);
+
+ // Only perform complex removal if not the last node
+ if (indexToRemove !== this.nodeCount - 1) {
+ // Move the last node to this position
+ const lastIndex = this.nodeCount - 1;
+ const lastI3 = lastIndex * 3;
+ const removeI3 = indexToRemove * 3;
+
+ // Copy position
+ this.positions[removeI3] = this.positions[lastI3];
+ this.positions[removeI3 + 1] = this.positions[lastI3 + 1];
+ this.positions[removeI3 + 2] = this.positions[lastI3 + 2];
+
+ // Copy color
+ this.colors[removeI3] = this.colors[lastI3];
+ this.colors[removeI3 + 1] = this.colors[lastI3 + 1];
+ this.colors[removeI3 + 2] = this.colors[lastI3 + 2];
+
+ // Copy size
+ this.sizes[indexToRemove] = this.sizes[lastIndex];
+
+ // Find which node was at the last position
+ let lastNodeId = null;
+ for (const [id, index] of this.nodeIndices.entries()) {
+ if (index === lastIndex) {
+ lastNodeId = id;
+ break;
+ }
+ }
+
+ // Update the moved node's index
+ if (lastNodeId) {
+ this.nodeIndices.set(lastNodeId, indexToRemove);
+ }
+ }
+
+ // Remove the node from tracking
+ this.nodeIndices.delete(nodeId);
+ this.nodeCount--;
+
+ // Update draw range
+ this.geometry.setDrawRange(0, this.nodeCount);
+
+ // Mark attributes as needing update
+ this.geometry.attributes.position.needsUpdate = true;
+ this.geometry.attributes.color.needsUpdate = true;
+ this.geometry.attributes.size.needsUpdate = true;
+ }
+
+ /**
+ * Clear all nodes from the point cloud
+ */
+ clear() {
+ this.nodeIndices.clear();
+ this.nodeTypes.clear();
+ this.nodeCount = 0;
+ this.geometry.setDrawRange(0, 0);
+
+ // Reset state tracking
+ this.selectedIndices.clear();
+ this.neighborIndices.clear();
+ this.hoverIndex = -1;
+ }
+
+ /**
+ * Update all positions from the node objects
+ * @param {Map} nodes - Map of nodes with current positions
+ */
+ updateAllPositions(nodes) {
+ // Update all node positions from data
+ for (const [nodeId, node] of nodes.entries()) {
+ if (this.nodeIndices.has(nodeId) && node.x !== undefined && node.y !== undefined) {
+ const index = this.nodeIndices.get(nodeId);
+ const i3 = index * 3;
+
+ this.positions[i3] = node.x;
+ this.positions[i3 + 1] = node.y;
+ }
+ }
+
+ // Mark position attribute as needing update
+ this.geometry.attributes.position.needsUpdate = true;
+ }
+}
+
+/**
+ * SimulationManager - Manages the D3 force-directed simulation
+ */
+class SimulationManager {
+ constructor(dataManager, graphObjectManager, themeManager) {
+ this.dataManager = dataManager;
+ this.graphObjectManager = graphObjectManager;
+ this.themeManager = themeManager;
+
+ this.simulation = null;
+ this.isRunning = false;
+
+ // Initialize spatial partitioning
+ this.spatialGrid = null;
+ this.useSpatialIndex = true;
+ this.gridUpdateInterval = 30; // Update grid every 30 frames
+ this.gridUpdateCounter = 0;
+ this.gridCellSize = 200; // Size of each grid cell
+
+ // Initialize the simulation
+ this.initSimulation();
+ this.initSpatialIndex();
+ }
+
+ /**
+ * Initialize D3 force simulation
+ */
+ initSimulation() {
+ // Calculate connection counts for better distribution
+ const connectionCounts = this.calculateConnectionCounts();
+
+ // Link distance based on connection count
+ const linkDistance = link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+
+ const sourceConnections = connectionCounts.get(sourceId) || 0;
+ const targetConnections = connectionCounts.get(targetId) || 0;
+
+ // Scale distance based on connection count
+ const baseDistance = this.themeManager.config.defaultDistance;
+ const connectionFactor = Math.max(sourceConnections, targetConnections);
+
+ return baseDistance * (1 + Math.log(1 + connectionFactor * 0.2));
+ };
+
+ // Collision radius based on connection count
+ const collisionRadius = node => {
+ const connections = connectionCounts.get(node.id) || 0;
+ const baseRadius = 15;
+
+ // Increase collision radius for highly connected nodes
+ if (connections > this.themeManager.config.highConnectionThreshold) {
+ return baseRadius * (1 + Math.log(connections) * 0.1);
+ }
+ return baseRadius;
+ };
+
+ // For monitoring simulation progress
+ this.tickCounter = 0;
+ this.lastLogTime = 0;
+
+ // Create the simulation with all forces
+ this.simulation = d3.forceSimulation()
+ .force('link', d3.forceLink().id(d => d.id).distance(linkDistance))
+ .force('charge', d3.forceManyBody().strength(-15))
+ .force('center', d3.forceCenter(0, 0))
+ .force('collision', d3.forceCollide().radius(collisionRadius))
+ .force('x', d3.forceX().strength(0.001))
+ .force('y', d3.forceY().strength(0.005))
+ .on('tick', () => {
+ this.tickCounter++;
+ this.onSimulationTick();
+ this.monitorSimulationProgress();
+ })
+ .on('end', () => {
+ console.log('Simulation reached equilibrium!');
+ console.log('Final alpha:', this.simulation.alpha());
+ console.log('Alpha min:', this.simulation.alphaMin());
+ console.log('Alpha decay:', this.simulation.alphaDecay());
+ console.log('Node count:', this.simulation.nodes().length);
+ console.log('Total ticks:', this.tickCounter);
+ this.isRunning = false;
+ });
+
+ // Adjust alpha settings for longer simulation time
+ // Reduce decay rate (default is ~0.0228 which is 1% cooling per tick)
+ this.simulation.alphaDecay(0.0228); // Slower decay (about 0.5% cooling per tick)
+
+ // Lower minimum alpha threshold (default is 0.001)
+ this.simulation.alphaMin(0.001); // Lower threshold for stopping
+
+ // Reduce velocity decay for more momentum (default is 0.4)
+ this.simulation.velocityDecay(0.1);
+ }
+
+ /**
+ * Calculate connection counts for each node
+ * @returns {Map} Map of node IDs to connection counts
+ */
+ calculateConnectionCounts() {
+ const connectionCounts = new Map();
+
+ // Count connections for each node
+ this.dataManager.graphData.links.forEach(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+
+ connectionCounts.set(sourceId, (connectionCounts.get(sourceId) || 0) + 1);
+ connectionCounts.set(targetId, (connectionCounts.get(targetId) || 0) + 1);
+ });
+
+ return connectionCounts;
+ }
+
+ /**
+ * Update the simulation with current nodes and links
+ * @param {boolean} restart - Whether to restart the simulation
+ */
+ updateSimulation(restart = true) {
+ if (!this.simulation) return;
+
+ // Get visible nodes and links from data manager
+ const visibleNodes = Array.from(this.dataManager.graphObjects.nodes.values());
+ const visibleLinks = Array.from(this.dataManager.graphObjects.links.values());
+
+ console.log(`Updating simulation with ${visibleNodes.length} nodes and ${visibleLinks.length} links`);
+
+ // Update nodes and links in the simulation
+ this.simulation.nodes(visibleNodes);
+ this.simulation.force('link').links(visibleLinks);
+
+ // Restart simulation if needed
+ if (restart && this.themeManager.config.physicsEnabled) {
+ // Reset tick counter and timing
+ this.tickCounter = 0;
+ this.lastLogTime = Date.now();
+
+ // Set a higher alpha to ensure thorough exploration of layout space
+ const startingAlpha = 1.0;
+ console.log(`Starting simulation with alpha=${startingAlpha}, alphaMin=${this.simulation.alphaMin()}, alphaDecay=${this.simulation.alphaDecay()}`);
+ this.simulation.alpha(startingAlpha).restart();
+ this.isRunning = true;
+ } else {
+ console.log("Simulation not restarted (either restart=false or physics is disabled)");
+ this.simulation.alpha(0);
+ this.isRunning = false;
+ }
+ }
+
+ /**
+ * Toggle physics simulation on/off
+ * @returns {boolean} The new physics state
+ */
+ togglePhysics() {
+ const physicsEnabled = this.themeManager.togglePhysics();
+
+ console.log(`Physics simulation ${physicsEnabled ? 'enabled' : 'disabled'}`);
+
+ // Use updateSimulation to properly handle the physics state
+ this.updateSimulation(physicsEnabled);
+
+ return physicsEnabled;
+ }
+
+ /**
+ * Handle force simulation tick events
+ * Updates the positions of nodes and links in the visualization
+ */
+ onSimulationTick() {
+ this.updatePositions();
+
+ // Update spatial grid periodically
+ if (this.useSpatialIndex) {
+ this.updateSpatialGrid();
+ }
+ }
+
+ /**
+ * Update positions of all nodes and links
+ */
+ updatePositions() {
+ // Check if we have a NodeCloud available
+ if (this.graphController && this.graphController.nodeCloud) {
+ // Bulk update the NodeCloud for better performance
+ this.graphController.nodeCloud.updateAllPositions(this.dataManager.graphObjects.nodes);
+
+ // Update virtual objects for compatibility with other systems
+ this.dataManager.graphObjects.nodes.forEach(node => {
+ if (node.object && node.object.position) {
+ node.object.position.x = node.x || 0;
+ node.object.position.y = node.y || 0;
+ }
+
+ // Update labels separately
+ if (node.labelObject) {
+ this.graphObjectManager.updateNodePosition(node);
+ }
+ });
+ }
+
+ // Update the position of links in the scene
+ this.dataManager.graphObjects.links.forEach((link) => {
+ this.graphObjectManager.updateLinkPosition(link);
+ });
+ }
+
+ /**
+ * Monitor simulation progress with periodic logging
+ */
+ monitorSimulationProgress() {
+ // Only log every 100 ticks to avoid spamming the console
+ if (this.tickCounter % 100 === 0) {
+ const now = Date.now();
+ const timeSinceLastLog = now - this.lastLogTime;
+ this.lastLogTime = now;
+
+ // Only print if we're still running
+ if (this.isRunning) {
+ const currentAlpha = this.simulation.alpha();
+ console.log(`Simulation progress: tick=${this.tickCounter}, alpha=${currentAlpha.toFixed(6)}, ticks/second=${(100 / (timeSinceLastLog / 1000)).toFixed(1)}`);
+ }
+ }
+ }
+
+ /**
+ * Initialize spatial index using a simple grid system
+ */
+ initSpatialIndex() {
+ this.spatialGrid = new SpatialGrid(this.gridCellSize);
+ }
+
+ /**
+ * Add a node to the spatial grid
+ * @param {Object} node - The node to add
+ */
+ addNodeToSpatialGrid(node) {
+ if (!this.useSpatialIndex || !this.spatialGrid || !node.object) return;
+
+ // Add node to the grid
+ this.spatialGrid.addObject(node);
+ }
+
+ /**
+ * Update the spatial grid with current node positions
+ */
+ updateSpatialGrid() {
+ if (!this.useSpatialIndex || !this.spatialGrid) return;
+
+ // Only update periodically for performance
+ this.gridUpdateCounter++;
+ if (this.gridUpdateCounter < this.gridUpdateInterval) return;
+ this.gridUpdateCounter = 0;
+
+ // Rebuild the grid
+ this.spatialGrid.clear();
+
+ // Add all current nodes to the grid
+ this.dataManager.graphObjects.nodes.forEach(node => {
+ if (node.object) {
+ this.spatialGrid.addObject(node);
+ }
+ });
+ }
+
+ /**
+ * Get nodes within a specific radius of a position
+ * @param {THREE.Vector3} position - The center position
+ * @param {number} radius - The radius to search within
+ * @returns {Array} Array of nodes within the radius
+ */
+ getNodesInRadius(position, radius) {
+ // Use spatial grid for more efficient spatial query
+ return this.spatialGrid.findNearbyObjects(position, radius);
+ }
+}
+
+/**
+ * UIManager - Handles UI elements and user interaction
+ */
+class UIManager {
+ constructor(container, dataManager, graphController) {
+ this.container = container;
+ this.dataManager = dataManager;
+ this.graphController = graphController;
+
+ // DOM elements
+ this.nodeCountEl = document.getElementById('node-count');
+ this.linkCountEl = document.getElementById('link-count');
+ this.loadingEl = document.getElementById('loading');
+ this.searchInput = document.getElementById('search-input');
+ this.initialMessageEl = null;
+ this.nodeInfoPanel = null;
+
+ // Search state
+ this.previousSearchValue = '';
+ this.isUpdatingAutocomplete = false;
+
+ // Autocomplete state
+ this.autocompleteList = null;
+ this.autocompleteSuggestions = [];
+ this.autocompleteSelectedIndex = -1;
+
+ // Initialize UI elements
+ this.createAutocompleteUI();
+ this.createNodeInfoPanel();
+ this.setupEventListeners();
+ }
+
+ /**
+ * Set up event listeners for UI controls
+ */
+ setupEventListeners() {
+ // Button event listeners
+ const labelsBtn = document.getElementById('toggle-labels-btn');
+ if (labelsBtn) {
+ // Set initial state based on ThemeManager config
+ // The initial state should be true by default
+ labelsBtn.classList.add('active');
+
+ // Add click handler
+ labelsBtn.addEventListener('click', () => {
+ const showLabels = this.graphController.toggleLabels();
+ // Toggle active class based on the returned state
+ labelsBtn.classList.toggle('active', showLabels);
+ });
+ }
+
+ document.getElementById('reset-btn')?.addEventListener('click', () => {
+ this.graphController.resetView();
+ this.hideAutocomplete();
+ this.previousSearchValue = '';
+ this.searchInput.value = '';
+ });
+
+ // Add click handler for the Load All Nodes button
+ document.getElementById('load-all-btn')?.addEventListener('click', () => {
+ this.graphController.loadAllNodes();
+ });
+
+ // Set up search input with debounced handling
+ if (this.searchInput) {
+ let searchTimeout = null;
+
+ // Input event for search text changes
+ this.searchInput.addEventListener('input', (e) => {
+ if (this.isUpdatingAutocomplete) return;
+
+ const currentValue = e.target.value.trim();
+ if (currentValue === this.previousSearchValue) return;
+
+ this.previousSearchValue = currentValue;
+ clearTimeout(searchTimeout);
+
+ if (currentValue.length > 0) {
+ this.isUpdatingAutocomplete = true;
+ searchTimeout = setTimeout(() => {
+ this.updateAutocompleteSuggestions(currentValue);
+ this.isUpdatingAutocomplete = false;
+ }, 250);
+ } else {
+ this.hideAutocomplete();
+ }
+ });
+
+ // Keyboard navigation in autocomplete dropdown
+ this.searchInput.addEventListener('keydown', (e) => {
+ if (['ArrowUp', 'ArrowDown', 'Enter', 'Escape'].includes(e.key)) {
+ this.handleAutocompleteKeydown(e);
+ }
+ });
+
+ // Focus handler to show autocomplete
+ this.searchInput.addEventListener('focus', () => {
+ if (this.isUpdatingAutocomplete) return;
+
+ const currentValue = this.searchInput.value.trim();
+ if (currentValue.length > 0) {
+ this.previousSearchValue = currentValue;
+ this.isUpdatingAutocomplete = true;
+ this.updateAutocompleteSuggestions(currentValue);
+ this.isUpdatingAutocomplete = false;
+ }
+ });
+ }
+
+ // Hide autocomplete when clicking outside
+ document.addEventListener('click', (e) => {
+ if (this.autocompleteList && e.target !== this.searchInput && !this.autocompleteList.contains(e.target)) {
+ this.hideAutocomplete();
+ }
+ });
+ }
+
+ /**
+ * Create the autocomplete UI elements
+ */
+ createAutocompleteUI() {
+ if (!this.searchInput) return;
+
+ // Create autocomplete container if it doesn't exist
+ if (!this.autocompleteList) {
+ this.autocompleteList = document.createElement('div');
+ this.autocompleteList.className = 'autocomplete-items';
+ this.autocompleteList.style.display = 'none';
+ this.autocompleteList.style.position = 'absolute';
+ this.autocompleteList.style.zIndex = '999';
+ this.autocompleteList.style.maxHeight = '300px';
+ this.autocompleteList.style.overflowY = 'auto';
+ this.autocompleteList.style.width = '100%';
+ this.autocompleteList.style.background = '#fff';
+ this.autocompleteList.style.border = '1px solid #ddd';
+ this.autocompleteList.style.borderRadius = '0 0 4px 4px';
+ this.autocompleteList.style.boxShadow = '0 2px 4px rgba(0,0,0,0.2)';
+
+ // Append to parent container
+ const searchContainer = this.searchInput.parentNode;
+ searchContainer.appendChild(this.autocompleteList);
+ }
+ }
+
+ /**
+ * Update autocomplete suggestions based on search term
+ * @param {string} searchTerm - The current search term
+ */
+ updateAutocompleteSuggestions(searchTerm) {
+ if (!this.autocompleteList || !searchTerm) {
+ this.hideAutocomplete();
+ return;
+ }
+
+ // Clear previous suggestions
+ this.autocompleteList.innerHTML = '';
+ this.autocompleteSuggestions = [];
+ this.autocompleteSelectedIndex = -1;
+
+ const maxSuggestions = 10;
+ const searchLower = searchTerm.toLowerCase();
+
+ // Get all nodes that match the search term
+ const matchingNodes = this.dataManager.graphData.nodes
+ .filter(node => (
+ (node.id && node.id.toLowerCase().includes(searchLower)) ||
+ (node.label && node.label.toLowerCase().includes(searchLower))
+ ))
+ .sort((a, b) => {
+ // Prioritize exact matches and matches at the beginning
+ const aId = a.id.toLowerCase();
+ const bId = b.id.toLowerCase();
+ const aLabel = (a.label || '').toLowerCase();
+ const bLabel = (b.label || '').toLowerCase();
+
+ // Check for exact matches first
+ if (aId === searchLower || aLabel === searchLower) return -1;
+ if (bId === searchLower || bLabel === searchLower) return 1;
+
+ // Then check for starting with search term
+ if (aId.startsWith(searchLower) || aLabel.startsWith(searchLower)) return -1;
+ if (bId.startsWith(searchLower) || bLabel.startsWith(searchLower)) return 1;
+
+ // Fallback to alphabetical
+ return aId.localeCompare(bId);
+ })
+ .slice(0, maxSuggestions);
+
+ if (matchingNodes.length === 0) {
+ this.hideAutocomplete();
+ return;
+ }
+
+ // Save suggestions for keyboard navigation
+ this.autocompleteSuggestions = matchingNodes;
+
+ // Create suggestion items
+ matchingNodes.forEach((node, index) => {
+ const item = document.createElement('div');
+ item.className = 'autocomplete-item';
+ item.style.padding = '8px 12px';
+ item.style.cursor = 'pointer';
+ item.style.borderBottom = '1px solid #f4f4f4';
+
+ // Highlight matching parts
+ const displayText = node.label || node.id;
+ const parts = displayText.split(new RegExp(`(${searchTerm})`, 'i'));
+
+ parts.forEach(part => {
+ const span = document.createElement('span');
+ span.textContent = part;
+ if (part.toLowerCase() === searchTerm.toLowerCase()) {
+ span.style.fontWeight = 'bold';
+ span.style.backgroundColor = 'rgba(66, 133, 244, 0.1)';
+ }
+ item.appendChild(span);
+ });
+
+ // Add node type indicator
+ const typeIndicator = document.createElement('span');
+ typeIndicator.style.marginLeft = '8px';
+ typeIndicator.style.padding = '2px 6px';
+ typeIndicator.style.borderRadius = '10px';
+ typeIndicator.style.fontSize = '0.8em';
+
+ // Different styling for different node types
+ if (node.type === 'simple') {
+ typeIndicator.textContent = 'item';
+ typeIndicator.style.backgroundColor = 'rgba(100, 149, 237, 0.2)';
+ typeIndicator.style.color = 'rgb(50, 90, 160)';
+ } else {
+ typeIndicator.textContent = 'collection';
+ typeIndicator.style.backgroundColor = 'rgba(240, 128, 128, 0.2)';
+ typeIndicator.style.color = 'rgb(180, 70, 70)';
+ }
+
+ item.appendChild(typeIndicator);
+
+ // Add hover effect
+ item.addEventListener('mouseover', () => {
+ this.autocompleteSelectedIndex = index;
+ this.highlightSelectedSuggestion();
+ });
+
+ // Add click handler
+ item.addEventListener('click', () => {
+ this.searchInput.value = node.id;
+ this.hideAutocomplete();
+ this.graphController.searchNodes(node.id);
+ });
+
+ this.autocompleteList.appendChild(item);
+ });
+
+ // Show the autocomplete list
+ this.autocompleteList.style.display = 'block';
+ }
+
+ /**
+ * Handle keyboard navigation in autocomplete list
+ * @param {KeyboardEvent} event - The keyboard event
+ */
+ handleAutocompleteKeydown(event) {
+ // If no suggestions or hidden, do nothing special except for Enter
+ if (this.autocompleteSuggestions.length === 0 ||
+ this.autocompleteList.style.display === 'none') {
+ if (event.key === 'Enter') {
+ const searchTerm = this.searchInput.value.trim();
+ if (searchTerm) {
+ this.graphController.searchNodes(searchTerm);
+ this.hideAutocomplete();
+ }
+ }
+ return;
+ }
+
+ switch (event.key) {
+ case 'ArrowDown':
+ // Move selection down
+ event.preventDefault();
+ this.autocompleteSelectedIndex = Math.min(
+ this.autocompleteSelectedIndex + 1,
+ this.autocompleteSuggestions.length - 1
+ );
+ this.highlightSelectedSuggestion();
+ break;
+
+ case 'ArrowUp':
+ // Move selection up
+ event.preventDefault();
+ this.autocompleteSelectedIndex = Math.max(this.autocompleteSelectedIndex - 1, -1);
+ this.highlightSelectedSuggestion();
+ break;
+
+ case 'Enter':
+ // Select current suggestion or search with current text
+ event.preventDefault();
+ if (this.autocompleteSelectedIndex >= 0) {
+ const selectedNode = this.autocompleteSuggestions[this.autocompleteSelectedIndex];
+ this.searchInput.value = selectedNode.id;
+ this.graphController.searchNodes(selectedNode.id);
+ } else {
+ const searchTerm = this.searchInput.value.trim();
+ if (searchTerm) {
+ this.graphController.searchNodes(searchTerm);
+ }
+ }
+ this.hideAutocomplete();
+ break;
+
+ case 'Escape':
+ // Hide autocomplete
+ event.preventDefault();
+ this.hideAutocomplete();
+ break;
+ }
+ }
+
+ /**
+ * Highlight the currently selected suggestion item
+ */
+ highlightSelectedSuggestion() {
+ // Remove highlight from all items
+ const items = this.autocompleteList.querySelectorAll('.autocomplete-item');
+ items.forEach(item => {
+ item.style.backgroundColor = '';
+ });
+
+ // Highlight selected item if any
+ if (this.autocompleteSelectedIndex >= 0 && this.autocompleteSelectedIndex < items.length) {
+ const selectedItem = items[this.autocompleteSelectedIndex];
+ selectedItem.style.backgroundColor = 'rgba(66, 133, 244, 0.1)';
+
+ // Scroll into view if needed
+ if (selectedItem.offsetTop < this.autocompleteList.scrollTop) {
+ this.autocompleteList.scrollTop = selectedItem.offsetTop;
+ } else if (selectedItem.offsetTop + selectedItem.offsetHeight >
+ this.autocompleteList.scrollTop + this.autocompleteList.offsetHeight) {
+ this.autocompleteList.scrollTop =
+ selectedItem.offsetTop + selectedItem.offsetHeight - this.autocompleteList.offsetHeight;
+ }
+ }
+ }
+
+ /**
+ * Hide the autocomplete list
+ */
+ hideAutocomplete() {
+ if (this.autocompleteList) {
+ this.autocompleteList.style.display = 'none';
+ this.autocompleteSelectedIndex = -1;
+ }
+ }
+
+ /**
+ * Show or hide the loading indicator
+ * @param {boolean} show - Whether to show or hide the loading indicator
+ */
+ showLoading(show) {
+ if (this.loadingEl) {
+ this.loadingEl.style.display = show ? 'block' : 'none';
+ }
+ }
+
+ /**
+ * Show an initial message in the graph area
+ * @param {string} message - The message to display
+ */
+ showInitialMessage(message) {
+ // Create or update the message element
+ if (!this.initialMessageEl) {
+ this.initialMessageEl = document.createElement('div');
+ this.initialMessageEl.style.position = 'absolute';
+ this.initialMessageEl.style.top = '50%';
+ this.initialMessageEl.style.left = '50%';
+ this.initialMessageEl.style.transform = 'translate(-50%, -50%)';
+ this.initialMessageEl.style.background = 'rgba(0, 0, 0, 0.7)';
+ this.initialMessageEl.style.color = '#ffffff';
+ this.initialMessageEl.style.padding = '20px';
+ this.initialMessageEl.style.borderRadius = '8px';
+ this.initialMessageEl.style.maxWidth = '80%';
+ this.initialMessageEl.style.textAlign = 'center';
+ this.initialMessageEl.style.fontSize = '18px';
+ this.container.appendChild(this.initialMessageEl);
+ }
+
+ this.initialMessageEl.textContent = message;
+ this.initialMessageEl.style.display = 'block';
+ }
+
+ /**
+ * Hide the initial message
+ */
+ hideInitialMessage() {
+ if (this.initialMessageEl) {
+ this.initialMessageEl.style.display = 'none';
+ }
+ }
+
+ /**
+ * Show error message
+ * @param {string} message - The error message to display
+ */
+ showError(message) {
+ console.error(message);
+ this.showInitialMessage(message);
+ }
+
+ /**
+ * Update statistics display
+ */
+ updateStats() {
+ if (this.nodeCountEl) {
+ this.nodeCountEl.textContent = this.dataManager.graphData.nodes.length;
+ }
+ if (this.linkCountEl) {
+ this.linkCountEl.textContent = this.dataManager.graphData.links.length;
+ }
+ }
+
+ /**
+ * Create the node info panel
+ */
+ createNodeInfoPanel() {
+ if (!this.nodeInfoPanel) {
+ this.nodeInfoPanel = document.createElement('div');
+ this.nodeInfoPanel.className = 'node-info-panel';
+ this.nodeInfoPanel.style.display = 'none';
+ this.container.appendChild(this.nodeInfoPanel);
+ }
+ }
+
+ /**
+ * Show node information in the info panel
+ * @param {string} nodeId - The ID of the node to display info for
+ */
+ showNodeInfo(nodeId) {
+ if (!this.nodeInfoPanel) this.createNodeInfoPanel();
+
+ const node = this.dataManager.graphObjects.nodes.get(nodeId);
+ if (!node) return;
+
+ // Store the current node ID for the Get Data button
+ this.currentNodeId = nodeId;
+
+ // Get the node's connections
+ const connectedLinks = this.dataManager.getConnectedLinks(nodeId);
+ const connectionCount = connectedLinks.length;
+
+ // Get any additional properties
+ const nodeType = node.type || 'Unknown';
+ const nodeLabel = node.label || nodeId;
+
+ // Build HTML content
+ let html = `
+
${nodeLabel}
+
ID: ${this.truncateWithEllipsis(nodeId)}
+
Type: ${nodeType}
+
Connections: ${connectionCount}
+ `;
+
+ // Only show the Get Data button for composite nodes where the ID doesn't start with "data/"
+ if (nodeType === 'composite' && !nodeId.startsWith('data/')) {
+ html += `
+
+
+ `;
+ }
+
+ // Add any other properties that exist
+ if (node.data) {
+ html += `
Data: ${JSON.stringify(node.data)}
`;
+ }
+
+ // Add information about connected nodes
+ if (connectionCount > 0) {
+ html += `
Connected Nodes:
`;
+ html += `
`;
+
+ // Get info about connected nodes
+ const connectedNodes = new Map(); // Use Map to avoid duplicates
+
+ connectedLinks.forEach(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+
+ // Get the ID of the connected node (not the current node)
+ const connectedNodeId = sourceId === nodeId ? targetId : sourceId;
+
+ // Store relationship type if available
+ const relationship = link.label || '';
+
+ // Get the connected node
+ const connectedNode = this.dataManager.graphObjects.nodes.get(connectedNodeId);
+ if (connectedNode && !connectedNodes.has(connectedNodeId)) {
+ connectedNodes.set(connectedNodeId, {
+ node: connectedNode,
+ relationship: relationship
+ });
+ }
+ });
+
+ // Display connected nodes (limited to avoid overwhelming the panel)
+ const maxNodesToShow = 50;
+ let nodeCount = 0;
+
+ connectedNodes.forEach((data, connectedNodeId) => {
+ if (nodeCount < maxNodesToShow) {
+ const connectedNode = data.node;
+ const relationship = data.relationship;
+
+ const truncatedId = this.truncateWithEllipsis(connectedNodeId);
+ const nodeLabel = connectedNode.label || truncatedId;
+ const nodeType = connectedNode.type || 'Unknown';
+
+ html += `
`;
+ html += `${nodeLabel}`;
+ html += `
${nodeType}
`;
+
+ if (relationship) {
+ html += `
+ Relationship: ${relationship}
`;
+ }
+
+ html += `
`;
+
+ nodeCount++;
+ }
+ });
+
+ // If there are more nodes than we're showing
+ if (connectedNodes.size > maxNodesToShow) {
+ html += `
...and ${connectedNodes.size - maxNodesToShow} more
`;
+ }
+
+ html += `
`;
+ }
+
+ // Set content and show panel
+ this.nodeInfoPanel.innerHTML = html;
+ this.nodeInfoPanel.style.display = 'block';
+
+ // Add event listener for the Get Data button
+ const getDataBtn = document.getElementById('get-node-data');
+ if (getDataBtn) {
+ getDataBtn.addEventListener('click', () => this.fetchNodeData(nodeId));
+ }
+
+ // Add event listeners to connected node items
+ const nodeItems = this.nodeInfoPanel.querySelectorAll('.connected-node-item');
+ nodeItems.forEach(item => {
+ // Hover effect
+ item.addEventListener('mouseover', () => {
+ item.style.backgroundColor = '#f5f5f5';
+ item.style.borderLeftColor = '#4D90FE';
+ });
+
+ item.addEventListener('mouseout', () => {
+ item.style.backgroundColor = '';
+ item.style.borderLeftColor = '#eee';
+ });
+
+ // Click to select the node
+ item.addEventListener('click', () => {
+ const clickedNodeId = item.getAttribute('data-node-id');
+ if (clickedNodeId && this.graphController.eventManager) {
+ this.graphController.eventManager.selectNode(clickedNodeId);
+ this.graphController.eventManager.focusOnNode(clickedNodeId);
+ }
+ });
+ });
+ }
+
+ /**
+ * Fetch data for a specific node from the server
+ * @param {string} nodeId - The ID of the node to fetch data for
+ */
+ fetchNodeData(nodeId) {
+ // Get the result container
+ const resultContainer = document.getElementById('node-data-result');
+ if (!resultContainer) return;
+
+ // Show loading indicator
+ resultContainer.style.display = 'block';
+ resultContainer.innerHTML = `
+
+
+
Loading data...
+
+
+ `;
+
+ // Construct the URL for the data endpoint
+ const url = `/${nodeId}`;
+
+ // Fetch the data
+ fetch(url)
+ .then(response => {
+ if (!response.ok) {
+ throw new Error(`HTTP error ${response.status}`);
+ }
+ return response.text(); // Using text() instead of json() to handle any type of response
+ })
+ .then(data => {
+ // Try to parse as JSON if possible
+ try {
+ const jsonData = JSON.parse(data);
+ this.displayNodeData(jsonData, resultContainer);
+ } catch (e) {
+ // If not JSON, display as text
+ this.displayNodeData(data, resultContainer, false);
+ }
+ })
+ .catch(error => {
+ // Show error message
+ resultContainer.innerHTML = `
+
+ Error loading data: ${error.message}
+
+ `;
+ });
+ }
+
+ /**
+ * Display node data in the result container
+ * @param {Object|string} data - The data to display
+ * @param {HTMLElement} container - The container to display the data in
+ * @param {boolean} isJson - Whether the data is JSON
+ */
+ displayNodeData(data, container, isJson = true) {
+ if (isJson) {
+ // Format JSON for display
+ const formattedJson = JSON.stringify(data, null, 2);
+ container.innerHTML = `
+
+ ${formattedJson.replace(//g, '>')}
+
+ `;
+ } else {
+ // Display as text
+ container.innerHTML = `
+
+ ${data.toString().replace(//g, '>')}
+
+ `;
+ }
+ }
+
+ /**
+ * Hide the node info panel
+ */
+ hideNodeInfo() {
+ if (this.nodeInfoPanel) {
+ this.nodeInfoPanel.style.display = 'none';
+ }
+ }
+
+ /**
+ * Truncate text and add ellipsis in the middle
+ * @param {string} text - The text to truncate
+ * @returns {string} Truncated text with ellipsis
+ */
+ truncateWithEllipsis(text) {
+ // Show only if longer than 15 characters (6 + 3 + 6)
+ if (!text || text.length <= 15) {
+ return text;
+ }
+
+ // Take exactly 6 chars from start and 6 from end
+ return text.substring(0, 6) + '...' + text.substring(text.length - 6);
+ }
+}
+
+/**
+ * EventManager - Handles user interaction events
+ */
+class EventManager {
+ constructor(sceneManager, dataManager, graphObjectManager) {
+ this.sceneManager = sceneManager;
+ this.dataManager = dataManager;
+ this.graphObjectManager = graphObjectManager;
+
+ // Set up event listeners
+ this.setupEventListeners();
+ }
+
+ /**
+ * Set up graph-specific interaction handlers
+ */
+ setupEventListeners() {
+ const renderer = this.sceneManager.renderer;
+ if (!renderer || !renderer.domElement) return;
+
+ // Add click event listener for node selection
+ renderer.domElement.addEventListener('click', this.onMouseClick.bind(this));
+
+ // Add double-click event listener for camera focus
+ renderer.domElement.addEventListener('dblclick', this.onDoubleClick.bind(this));
+
+ // Add hover event listeners
+ renderer.domElement.addEventListener('mousemove', this.onMouseMove.bind(this));
+ }
+
+ /**
+ * Handle mouse click events for node selection
+ * @param {MouseEvent} event - The mouse event
+ */
+ onMouseClick(event) {
+ // Calculate mouse position and find intersections
+ this.sceneManager.updateMousePosition(event);
+
+ // Set the scene manager on the NodeCloud for camera access
+ this.graphController.nodeCloud.sceneManager = this.sceneManager;
+
+ const nodeId = this.graphController.nodeCloud.findClosestNode(
+ this.sceneManager.raycaster,
+ 0.08 // Screen space threshold for clicks (0-1 normalized coordinates)
+ );
+
+ if (nodeId) {
+ // We clicked on a node
+ this.selectNode(nodeId);
+ } else {
+ // Clicked on empty space - deselect
+ this.deselectNode();
+ }
+ }
+
+ /**
+ * Handle double-click events for focusing on nodes
+ * @param {MouseEvent} event - The mouse event
+ */
+ onDoubleClick(event) {
+ // Calculate mouse position and find intersections
+ this.sceneManager.updateMousePosition(event);
+
+ // Set the scene manager on the NodeCloud for camera access
+ this.graphController.nodeCloud.sceneManager = this.sceneManager;
+
+ const nodeId = this.graphController.nodeCloud.findClosestNode(
+ this.sceneManager.raycaster,
+ 0.08 // Screen space threshold for double-clicks (0-1 normalized coordinates)
+ );
+
+ if (nodeId) {
+ // Double-clicked on a node - focus camera on this node
+ this.focusOnNode(nodeId);
+ }
+ }
+
+ /**
+ * Handle mouse movement for hover effects
+ * @param {MouseEvent} event - The mouse event
+ */
+ onMouseMove(event) {
+ // Calculate mouse position
+ this.sceneManager.updateMousePosition(event);
+
+ // Set the scene manager on the NodeCloud for camera access
+ this.graphController.nodeCloud.sceneManager = this.sceneManager;
+
+ const nodeId = this.graphController.nodeCloud.findClosestNode(
+ this.sceneManager.raycaster,
+ 0.04 // Screen space threshold for hover (0-1 normalized coordinates)
+ );
+
+ if (nodeId) {
+ // Hovering over a node
+ this.hoverNode(nodeId);
+ } else {
+ // Not hovering over any node
+ this.unhoverNode();
+ }
+ }
+
+ /**
+ * Select a node and highlight it and its connections
+ * @param {string} nodeId - The ID of the node to select
+ */
+ selectNode(nodeId) {
+ if (this.dataManager.selectedNode === nodeId) return;
+
+ // Set selection in data manager
+ this.dataManager.setSelectedNode(nodeId);
+
+ // Update colors in bulk
+ this.graphController.nodeCloud.updateColors(
+ nodeId,
+ this.dataManager.neighborNodes,
+ this.dataManager.hoveredNode
+ );
+
+ // Update link colors
+ this.updateLinkColors();
+
+ // First, hide all link labels
+ this.dataManager.graphObjects.links.forEach((link, id) => {
+ if (link.labelObject) {
+ link.labelObject.visible = false;
+ }
+ });
+
+ // Then show labels for active links
+ this.dataManager.activeLinks.forEach(linkId => {
+ const link = this.dataManager.graphObjects.links.get(linkId);
+ if (link && link.labelObject) {
+ link.labelObject.visible = true;
+ }
+ });
+
+ // Show node info panel
+ const graphController = this.sceneManager.graphController ||
+ (this.graphObjectManager && this.graphObjectManager.graphController);
+
+ if (graphController && graphController.uiManager) {
+ graphController.uiManager.showNodeInfo(nodeId);
+ }
+ }
+
+ /**
+ * Deselect the currently selected node
+ */
+ deselectNode() {
+ if (!this.dataManager.selectedNode) return;
+
+ // Hide all link labels before clearing selection
+ this.dataManager.activeLinks.forEach(linkId => {
+ const link = this.dataManager.graphObjects.links.get(linkId);
+ if (link && link.labelObject) {
+ link.labelObject.visible = false;
+ }
+ });
+
+ // Clear selection in data manager
+ this.dataManager.clearSelectedNode();
+
+ // Update colors in bulk
+ this.graphController.nodeCloud.updateColors(
+ null, // No selected node
+ new Set(), // No neighbor nodes
+ this.dataManager.hoveredNode // Keep hover state
+ );
+
+ // Update link colors
+ this.updateLinkColors();
+
+ // Hide node info panel
+ const graphController = this.sceneManager.graphController ||
+ (this.graphObjectManager && this.graphObjectManager.graphController);
+
+ if (graphController && graphController.uiManager) {
+ graphController.uiManager.hideNodeInfo();
+ }
+ }
+
+ /**
+ * Focus the camera on a specific node
+ * @param {string} nodeId - The ID of the node to focus on
+ */
+ focusOnNode(nodeId) {
+ const node = this.dataManager.graphObjects.nodes.get(nodeId);
+ if (!node || !node.object) return;
+
+ const position = node.object.position.clone();
+ this.sceneManager.focusCamera(position);
+ }
+
+ /**
+ * Apply hover effect to a node
+ * @param {string} nodeId - The ID of the node to hover
+ */
+ hoverNode(nodeId) {
+ // If already hovering over this node, do nothing
+ if (this.dataManager.hoveredNode === nodeId) return;
+
+ // Set hover in data manager
+ this.dataManager.setHoveredNode(nodeId);
+
+ // Update colors in bulk
+ this.graphController.nodeCloud.updateColors(
+ this.dataManager.selectedNode,
+ this.dataManager.neighborNodes,
+ nodeId
+ );
+
+ // Change cursor to pointer
+ this.sceneManager.renderer.domElement.style.cursor = 'pointer';
+ }
+
+ /**
+ * Remove hover effect from the currently hovered node
+ */
+ unhoverNode() {
+ if (!this.dataManager.hoveredNode) return;
+
+ // Get the node ID before clearing
+ const nodeId = this.dataManager.hoveredNode;
+
+ // Clear hover in data manager
+ this.dataManager.clearHoveredNode();
+
+ // Update colors in bulk
+ this.graphController.nodeCloud.updateColors(
+ this.dataManager.selectedNode,
+ this.dataManager.neighborNodes,
+ null // No hover
+ );
+
+ // Reset cursor
+ this.sceneManager.renderer.domElement.style.cursor = 'auto';
+ }
+
+ /**
+ * Update the colors of all visible nodes based on selection state
+ */
+ updateNodeColors() {
+ this.dataManager.graphObjects.nodes.forEach((node, id) => {
+ this.graphObjectManager.updateNodeColors(id);
+ });
+ }
+
+ /**
+ * Update the colors of all visible links based on selection state
+ */
+ updateLinkColors() {
+ this.dataManager.graphObjects.links.forEach((link, id) => {
+ this.graphObjectManager.updateLinkColors(id);
+ });
+ }
+}
+
+/**
+ * DebugVisualizer - Generic visualization for debugging graph components
+ */
+class DebugVisualizer {
+ constructor(sceneManager, graphController) {
+ this.sceneManager = sceneManager;
+ this.graphController = graphController;
+ this.debugObjects = [];
+ this.enabled = false;
+ this.lastUpdateTime = 0;
+ this.updateInterval = 1000; // Update debug visuals every second
+ this.activeVisualizations = {
+ grid: true,
+ performance: true,
+ nodes: true
+ };
+ this.stats = {};
+ }
+
+ /**
+ * Toggle debug visualization
+ * @param {boolean} enabled - Whether to enable or disable visualization
+ */
+ toggle(enabled) {
+ this.enabled = enabled;
+
+ // Show or hide debug UI elements
+ const debugPanel = document.getElementById('debug-info-panel');
+ const frameGraph = document.getElementById('debug-frame-graph');
+
+ if (debugPanel) {
+ debugPanel.style.display = enabled ? 'block' : 'none';
+ }
+
+ if (frameGraph) {
+ frameGraph.style.display = enabled ? 'block' : 'none';
+ }
+
+ if (enabled) {
+ // Initialize frame history if needed
+ if (!this.frameHistory) {
+ const canvas = document.getElementById('debug-frame-canvas');
+ if (canvas) {
+ this.frameHistory = new Array(canvas.width).fill(0);
+ }
+ }
+
+ this.createDebugVisualization();
+ } else {
+ this.clearDebugVisualization();
+ }
+ }
+
+ /**
+ * Toggle specific visualization types
+ * @param {string} type - Visualization type to toggle
+ */
+ toggleVisualization(type) {
+ if (this.activeVisualizations.hasOwnProperty(type)) {
+ this.activeVisualizations[type] = !this.activeVisualizations[type];
+ if (this.enabled) {
+ this.createDebugVisualization();
+ }
+ }
+ }
+
+ /**
+ * Create visual representation of debug data
+ */
+ createDebugVisualization() {
+ this.clearDebugVisualization();
+
+ // Collect debug stats
+ this.collectStats();
+
+ // Create visualizations based on active settings
+ if (this.activeVisualizations.grid) {
+ this.createSpatialGridVisualization();
+ }
+
+ if (this.activeVisualizations.nodes) {
+ this.createNodeStatsVisualization();
+ }
+
+ if (this.activeVisualizations.performance) {
+ this.createPerformanceVisualization();
+ }
+
+ // Create debug panel with statistics
+ this.createDebugPanel();
+
+ this.lastUpdateTime = performance.now();
+ }
+
+ /**
+ * Collect statistics for debug display
+ */
+ collectStats() {
+ // Clear previous stats
+ this.stats = {
+ fps: this.graphController.fps || 0,
+ nodeCount: 0,
+ visibleNodeCount: 0,
+ linkCount: 0,
+ gridStats: {
+ cells: 0,
+ objects: 0,
+ avgPerCell: 0
+ },
+ cameraPosition: {
+ x: 0,
+ y: 0,
+ z: 0
+ },
+ performanceMode: this.graphController.performanceMode
+ };
+
+ // Collect node and link stats
+ if (this.graphController.dataManager) {
+ const dataManager = this.graphController.dataManager;
+
+ this.stats.nodeCount = dataManager.graphData.nodes.length;
+ this.stats.visibleNodeCount = dataManager.graphObjects.nodes.size;
+ this.stats.linkCount = dataManager.graphObjects.links.size;
+ }
+
+ // Collect grid stats
+ if (this.graphController.simulationManager &&
+ this.graphController.simulationManager.spatialGrid) {
+
+ const grid = this.graphController.simulationManager.spatialGrid;
+ this.stats.gridStats.cells = grid.grid.size;
+ this.stats.gridStats.objects = grid.objects.size;
+
+ if (grid.grid.size > 0 && grid.objects.size > 0) {
+ this.stats.gridStats.avgPerCell =
+ (grid.objects.size / grid.grid.size).toFixed(1);
+ }
+ }
+
+ // Collect camera position
+ if (this.graphController.sceneManager && this.graphController.sceneManager.camera) {
+ const camera = this.graphController.sceneManager.camera;
+ this.stats.cameraPosition.x = camera.position.x.toFixed(1);
+ this.stats.cameraPosition.y = camera.position.y.toFixed(1);
+ this.stats.cameraPosition.z = camera.position.z.toFixed(1);
+ }
+ }
+
+ /**
+ * Create spatial grid visualization
+ */
+ createSpatialGridVisualization() {
+ const spatialGrid = this.graphController.simulationManager?.spatialGrid;
+ if (!spatialGrid) return;
+
+ // Create wireframe boxes for each cell in the grid
+ spatialGrid.grid.forEach((cell, key) => {
+ const [x, y, z] = key.split(',').map(Number);
+ const cellSize = spatialGrid.cellSize;
+
+ // Create box geometry for the cell
+ const geometry = new THREE.BoxGeometry(cellSize, cellSize, cellSize);
+ const material = new THREE.MeshBasicMaterial({
+ color: 0x00ff00,
+ wireframe: true,
+ transparent: true,
+ opacity: 0.05 + (0.05 * Math.min(cell.size, 10)) // Brighter for more populated cells
+ });
+
+ const box = new THREE.Mesh(geometry, material);
+ box.position.set(
+ (x + 0.5) * cellSize,
+ (y + 0.5) * cellSize,
+ (z + 0.5) * cellSize
+ );
+
+ this.sceneManager.addToScene(box);
+ this.debugObjects.push(box);
+
+ // Add text label showing object count in cell
+ if (cell.size > 0) {
+ const text = new SpriteText(`${cell.size}`, 12);
+ text.color = '#ffff00';
+ text.backgroundColor = 'rgba(0,0,0,0.5)';
+ text.padding = 2;
+ text.position.copy(box.position);
+ this.sceneManager.addToScene(text);
+ this.debugObjects.push(text);
+ }
+ });
+ }
+
+ /**
+ * Create node statistics visualization
+ */
+ createNodeStatsVisualization() {
+ // Highlight nodes with different colors based on properties
+ const nodeManager = this.graphController.dataManager;
+ const nodeCloud = this.graphController.nodeCloud;
+
+ if (!nodeManager || !nodeCloud || !nodeCloud.colors) return;
+
+ // Store original colors to restore later
+ this.originalColors = new Float32Array(nodeCloud.colors.length);
+ this.originalColors.set(nodeCloud.colors); // Make a copy of all colors
+
+ // Iterate through nodes and update colors in the buffer
+ nodeManager.graphObjects.nodes.forEach((node, id) => {
+ if (nodeCloud.nodeIndices.has(id)) {
+ // Get the node's index in the color buffer
+ const index = nodeCloud.nodeIndices.get(id);
+ const i3 = index * 3;
+
+ // Get connection count
+ const connectedLinks = nodeManager.getConnectedLinks(id);
+ const connectionCount = connectedLinks.length;
+
+ // Set color based on connection count
+ let color;
+ if (connectionCount > 10) {
+ color = new THREE.Color(0xff0000); // Red for highly connected
+ } else if (connectionCount > 5) {
+ color = new THREE.Color(0xff8800); // Orange for medium
+ } else if (connectionCount > 2) {
+ color = new THREE.Color(0xffff00); // Yellow for low
+ } else {
+ color = new THREE.Color(0x00ffff); // Cyan for minimal
+ }
+
+ // Update the color buffer directly
+ nodeCloud.colors[i3] = color.r;
+ nodeCloud.colors[i3 + 1] = color.g;
+ nodeCloud.colors[i3 + 2] = color.b;
+ }
+ });
+
+ // Mark the color buffer as needing update
+ if (nodeCloud.geometry && nodeCloud.geometry.attributes.color) {
+ nodeCloud.geometry.attributes.color.needsUpdate = true;
+ }
+ }
+
+ /**
+ * Create performance metrics visualization
+ */
+ createPerformanceVisualization() {
+ // Update frame history and redraw
+ this.updateFrameGraph();
+ }
+
+ /**
+ * Update the frame rate graph
+ */
+ updateFrameGraph() {
+ const canvas = document.getElementById('debug-frame-canvas');
+ const fpsLabel = document.getElementById('debug-fps-label');
+
+ if (!canvas || !fpsLabel) return;
+
+ // Update FPS label
+ fpsLabel.textContent = `${this.stats.fps} FPS`;
+
+ // Add current FPS to history
+ if (!this.frameHistory) {
+ this.frameHistory = new Array(canvas.width).fill(0);
+ }
+
+ this.frameHistory.push(this.stats.fps);
+ this.frameHistory.shift();
+
+ // Draw frame history
+ const ctx = canvas.getContext('2d');
+ const width = canvas.width;
+ const height = canvas.height;
+
+ // Clear canvas
+ ctx.clearRect(0, 0, width, height);
+
+ // Calculate scale - find max FPS in history for scaling
+ const maxFPS = Math.max(60, ...this.frameHistory);
+ const scale = height / maxFPS;
+
+ // Draw background grid
+ ctx.strokeStyle = '#333';
+ ctx.lineWidth = 0.5;
+
+ // Draw horizontal grid lines at 15, 30, 45, 60 FPS
+ [15, 30, 45, 60].forEach(fps => {
+ const y = height - (fps * scale);
+ if (y >= 0 && y <= height) {
+ ctx.beginPath();
+ ctx.moveTo(0, y);
+ ctx.lineTo(width, y);
+ ctx.stroke();
+ }
+ });
+
+ // Draw FPS graph
+ ctx.strokeStyle = '#4CAF50';
+ ctx.lineWidth = 1.5;
+ ctx.beginPath();
+
+ // Start at bottom-left corner with 0 FPS
+ ctx.moveTo(0, height);
+
+ // Draw lines for each frame sample
+ this.frameHistory.forEach((fps, x) => {
+ const y = height - (fps * scale);
+ ctx.lineTo(x, y);
+ });
+
+ // Finish at bottom-right corner
+ ctx.lineTo(width - 1, height);
+ ctx.closePath();
+
+ // Fill gradient
+ const gradient = ctx.createLinearGradient(0, 0, 0, height);
+ gradient.addColorStop(0, 'rgba(76, 175, 80, 0.7)');
+ gradient.addColorStop(1, 'rgba(76, 175, 80, 0.1)');
+ ctx.fillStyle = gradient;
+ ctx.fill();
+
+ // Stroke the line on top of the fill
+ ctx.strokeStyle = '#4CAF50';
+ ctx.lineWidth = 1.5;
+ ctx.beginPath();
+
+ this.frameHistory.forEach((fps, x) => {
+ const y = height - (fps * scale);
+ if (x === 0) {
+ ctx.moveTo(x, y);
+ } else {
+ ctx.lineTo(x, y);
+ }
+ });
+
+ ctx.stroke();
+ }
+
+ /**
+ * Create debug panel with statistics
+ */
+ createDebugPanel() {
+ // Update debug panel content using the existing HTML element
+ document.getElementById('debug-nodes').textContent = `${this.stats.visibleNodeCount}/${this.stats.nodeCount}`;
+ document.getElementById('debug-links').textContent = `${this.stats.linkCount}`;
+ document.getElementById('debug-cells').textContent = `${this.stats.gridStats.cells}`;
+ document.getElementById('debug-objects').textContent = `${this.stats.gridStats.objects}`;
+ document.getElementById('debug-avg-per-cell').textContent = `${this.stats.gridStats.avgPerCell}`;
+
+ // Update camera position
+ document.getElementById('debug-camera-x').textContent = `${this.stats.cameraPosition.x}`;
+ document.getElementById('debug-camera-y').textContent = `${this.stats.cameraPosition.y}`;
+ document.getElementById('debug-camera-z').textContent = `${this.stats.cameraPosition.z}`;
+ }
+
+ /**
+ * Remove all debug visualization objects
+ */
+ clearDebugVisualization() {
+ // Remove all debug visualization objects from scene
+ this.debugObjects.forEach(obj => {
+ this.sceneManager.removeFromScene(obj);
+ });
+ this.debugObjects = [];
+
+ // Restore original node colors
+ if (this.originalColors && this.graphController.nodeCloud) {
+ const nodeCloud = this.graphController.nodeCloud;
+
+ // Copy the original colors back to the nodeCloud color buffer
+ if (nodeCloud.colors && this.originalColors.length === nodeCloud.colors.length) {
+ nodeCloud.colors.set(this.originalColors);
+
+ // Mark the color buffer as needing update
+ if (nodeCloud.geometry && nodeCloud.geometry.attributes.color) {
+ nodeCloud.geometry.attributes.color.needsUpdate = true;
+ }
+ }
+
+ this.originalColors = null;
+ }
+ }
+
+ /**
+ * Update debug visualization
+ */
+ update() {
+ if (!this.enabled) return;
+
+ // Update stats more frequently than full visualization refresh
+ this.collectStats();
+
+ // Update FPS graph and info panel more frequently
+ if (this.activeVisualizations.performance && document.getElementById('debug-frame-canvas')) {
+ this.updateFrameGraph();
+ }
+
+ // Update info panel if it exists
+ if (document.getElementById('debug-info-panel')) {
+ this.createDebugPanel(); // Updates panel content
+ }
+
+ // Only update full visualization periodically to avoid performance impact
+ const now = performance.now();
+ if (now - this.lastUpdateTime < this.updateInterval) return;
+
+ // Recreate visualization
+ this.createDebugVisualization();
+ }
+
+ /**
+ * Handle keyboard shortcuts for debugging
+ * @param {KeyboardEvent} event - Keyboard event
+ */
+ handleKeyPress(event) {
+ if (!this.enabled) return;
+
+ switch (event.key) {
+ case '1':
+ this.toggleVisualization('grid');
+ break;
+ case '2':
+ this.toggleVisualization('nodes');
+ break;
+ case '3':
+ this.toggleVisualization('performance');
+ break;
+ }
+ }
+}
+
+/**
+ * Main controller class that coordinates all graph components
+ */
+class GraphController {
+ constructor(containerId) {
+ // DOM container reference
+ this.container = document.getElementById(containerId);
+
+ // Initialize component managers
+ this.themeManager = new ThemeManager();
+ this.sceneManager = new SceneManager(this.container, this.themeManager);
+ this.sceneManager.graphController = this; // Add reference to this controller
+
+ this.dataManager = new DataManager();
+ this.graphObjectManager = new GraphObjectManager(this.sceneManager, this.dataManager, this.themeManager);
+ this.graphObjectManager.graphController = this; // Add reference to this controller
+
+ // Create the node cloud for efficient node rendering
+ this.nodeCloud = new NodeCloud(this.sceneManager.scene, this.themeManager);
+
+ this.simulationManager = new SimulationManager(this.dataManager, this.graphObjectManager, this.themeManager);
+ this.simulationManager.graphController = this; // Add reference to this controller
+
+ this.uiManager = new UIManager(this.container, this.dataManager, this);
+ this.eventManager = new EventManager(this.sceneManager, this.dataManager, this.graphObjectManager);
+ this.eventManager.graphController = this; // Add reference to this controller
+
+ // Performance and debug settings
+ this.performanceMode = true; // Always on
+ this.debugMode = false;
+
+ // Initialize generic debugger
+ this.debugger = new DebugVisualizer(this.sceneManager, this);
+
+ // Initialize FPS counter
+ this.fpsCounter = document.getElementById('fps-counter');
+ this.frameCount = 0;
+ this.lastTime = performance.now();
+ this.fps = 0;
+ this.fpsUpdateInterval = 500; // Update FPS display every 500ms
+
+ // Set up UI button handlers
+ this.setupButtonHandlers();
+
+ // Setup keyboard listeners for debug controls
+ document.addEventListener('keydown', this.handleKeyPress.bind(this));
+
+ // Store a global reference for convenience (used by NodeCloud)
+ window.lastGraphController = this;
+
+ // Load data
+ this.loadGraphData();
+
+ // Always enable performance optimizations
+ this.enablePerformanceMode();
+
+ // Start animation loop
+ this.animate();
+ }
+
+ /**
+ * Set up handlers for UI buttons
+ */
+ setupButtonHandlers() {
+ // Debug mode button
+ const debugBtn = document.getElementById('toggle-debug-btn');
+ if (debugBtn) {
+ debugBtn.addEventListener('click', () => {
+ this.toggleDebugMode();
+ debugBtn.classList.toggle('active', this.debugMode);
+ });
+ }
+ }
+
+ /**
+ * Enable performance optimizations
+ */
+ enablePerformanceMode() {
+ // Enable spatial grid and frustum culling
+ if (this.simulationManager) {
+ this.simulationManager.useSpatialIndex = true;
+ }
+ if (this.sceneManager) {
+ this.sceneManager.enableFrustumCulling = true;
+ }
+ }
+
+ /**
+ * Handle keyboard shortcuts
+ * @param {KeyboardEvent} event - Keyboard event
+ */
+ handleKeyPress(event) {
+ // Pass to debugger if debug mode is on
+ if (this.debugMode && this.debugger) {
+ this.debugger.handleKeyPress(event);
+ }
+ }
+
+ /**
+ * Toggle debug visualization mode
+ */
+ toggleDebugMode() {
+ this.debugMode = !this.debugMode;
+
+ if (this.debugger) {
+ this.debugger.toggle(this.debugMode);
+ }
+
+ return this.debugMode;
+ }
+
+ /**
+ * Animation loop
+ */
+ animate() {
+ requestAnimationFrame(() => this.animate());
+
+ // Update FPS calculation
+ this.frameCount++;
+ const currentTime = performance.now();
+ const elapsed = currentTime - this.lastTime;
+
+ // Update FPS counter every interval
+ if (elapsed > this.fpsUpdateInterval) {
+ this.fps = Math.round((this.frameCount * 1000) / elapsed);
+ this.fpsCounter.textContent = `FPS: ${this.fps}`;
+
+ // Reset counters
+ this.frameCount = 0;
+ this.lastTime = currentTime;
+ }
+
+ // Update debug visualization if enabled
+ if (this.debugMode && this.debugger) {
+ this.debugger.update();
+ }
+
+ // Update scene
+ this.sceneManager.update();
+ }
+
+ /**
+ * Load graph data from the server
+ */
+ loadGraphData() {
+ this.clearDisplay();
+ // Show loading indicator
+ this.uiManager.showLoading(true);
+
+ // Load data via the data manager
+ this.dataManager.loadData()
+ .then(data => {
+ // Initialize the force simulation with loaded data
+ this.simulationManager.updateSimulation(false);
+
+ // Update statistics
+ this.uiManager.updateStats();
+
+ // Show initial message
+ this.uiManager.showInitialMessage("Enter a search term to display nodes");
+
+ // Hide loading indicator
+ this.uiManager.showLoading(false);
+ })
+ .catch(error => {
+ // Show error message
+ this.uiManager.showError('Failed to load graph data: ' + error.message);
+ this.uiManager.showLoading(false);
+ });
+ }
+
+ /**
+ * Search for nodes by term and display them
+ * @param {string} searchTerm - The term to search for
+ */
+ searchNodes(searchTerm) {
+ // Clear current display
+ this.clearDisplay();
+
+ // Hide the initial message
+ this.uiManager.hideInitialMessage();
+
+ if (!searchTerm) return;
+
+ // Find matching nodes
+ const matchingNodeIds = this.dataManager.searchNodes(searchTerm);
+
+ // If no nodes found, show a message
+ if (matchingNodeIds.length === 0) {
+ console.log(`No nodes found matching "${searchTerm}"`);
+ this.uiManager.showInitialMessage(`No nodes found matching "${searchTerm}"`);
+ return;
+ }
+
+ console.log(`Found ${matchingNodeIds.length} nodes matching "${searchTerm}"`);
+
+ // Show loading indicator during simulation
+ this.uiManager.showLoading(true);
+
+ // Add each matching node and its connections with depth of 10
+ const addedNodes = new Set();
+
+ matchingNodeIds.forEach(nodeId => {
+ // Use getConnectedSubgraph to get nodes and links up to depth 10
+ const { nodes, links } = this.dataManager.getConnectedSubgraph(nodeId, 10);
+
+ // Add all nodes to the scene
+ nodes.forEach(node => {
+ if (!this.dataManager.graphObjects.nodes.has(node.id)) {
+ this.graphObjectManager.createNodeObject(node);
+ addedNodes.add(node.id);
+ }
+ });
+
+ // Add all links to the scene
+ links.forEach(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+ const linkId = `${sourceId}-${targetId}`;
+
+ if (!this.dataManager.graphObjects.links.has(linkId)) {
+ this.graphObjectManager.createLinkObject(link);
+ }
+ });
+ });
+
+ // Update simulation and restart it properly
+ this.simulationManager.updateSimulation(true);
+
+ // Center the view on the found nodes
+ this.centerOnNodes(Array.from(addedNodes));
+
+ // Hide loading indicator when view is centered
+ this.uiManager.showLoading(false);
+ }
+
+ /**
+ * Clear the current display
+ */
+ clearDisplay() {
+ this.graphObjectManager.clearVisibleObjects();
+ }
+
+ /**
+ * Center the view on a set of nodes
+ * @param {Array} nodeIds - Array of node IDs to center on
+ */
+ centerOnNodes(nodeIds) {
+ if (!nodeIds || nodeIds.length === 0) return;
+
+ // Calculate the center position of the specified nodes
+ let center = { x: 0, y: 0, z: 0 };
+ let count = 0;
+
+ nodeIds.forEach(nodeId => {
+ const nodeData = this.dataManager.graphObjects.nodes.get(nodeId);
+ if (nodeData && nodeData.object) {
+ center.x += nodeData.object.position.x;
+ center.y += nodeData.object.position.y;
+ center.z += nodeData.object.position.z;
+ count++;
+ }
+ });
+
+ if (count === 0) return;
+
+ center.x /= count;
+ center.y /= count;
+ center.z /= count;
+
+ // Set the camera target for perspective camera
+ this.sceneManager.controls.target.set(center.x, center.y, 0);
+
+ // Position the perspective camera
+ const distance = 1000;
+ this.sceneManager.camera.position.set(
+ center.x,
+ center.y,
+ distance
+ );
+
+ // Update the camera and controls
+ this.sceneManager.camera.updateProjectionMatrix();
+ this.sceneManager.controls.update();
+ }
+
+ /**
+ * Toggle label visibility
+ */
+ toggleLabels() {
+ const showLabels = this.graphObjectManager.toggleLabels();
+ return showLabels;
+ }
+
+ /**
+ * Toggle physics simulation
+ */
+ togglePhysics() {
+ this.simulationManager.togglePhysics();
+ }
+
+ /**
+ * Reset the view
+ */
+ resetView() {
+ // Clear current display
+ this.clearDisplay();
+
+ // Reset camera
+ this.sceneManager.resetView();
+
+ // Show initial message
+ this.uiManager.showInitialMessage("Enter a search term to display nodes");
+ }
+
+ /**
+ * Load all nodes in the graph
+ */
+ loadAllNodes() {
+ // Clear current display
+ this.clearDisplay();
+
+ // Hide the initial message
+ this.uiManager.hideInitialMessage();
+
+ // Show loading indicator
+ this.uiManager.showLoading(true);
+
+ console.log(`Loading all ${this.dataManager.graphData.nodes.length} nodes`);
+
+ // Store all added node IDs
+ const addedNodes = new Set();
+
+ // Add all nodes to the scene
+ this.dataManager.graphData.nodes.forEach(node => {
+ if (!this.dataManager.graphObjects.nodes.has(node.id)) {
+ this.graphObjectManager.createNodeObject(node);
+ addedNodes.add(node.id);
+ }
+ });
+
+ // Add all links between the visible nodes
+ this.dataManager.graphData.links.forEach(link => {
+ const sourceId = typeof link.source === 'object' ? link.source.id : link.source;
+ const targetId = typeof link.target === 'object' ? link.target.id : link.target;
+ const linkId = `${sourceId}-${targetId}`;
+
+ // Only add links between nodes that are visible
+ if (addedNodes.has(sourceId) && addedNodes.has(targetId) &&
+ !this.dataManager.graphObjects.links.has(linkId)) {
+ this.graphObjectManager.createLinkObject(link);
+ }
+ });
+
+ // Update simulation and restart it
+ this.simulationManager.updateSimulation(true);
+
+ // Center the view on all nodes
+ this.centerOnNodes(Array.from(addedNodes));
+
+ // Hide loading indicator
+ this.uiManager.showLoading(false);
+ }
+}
+
+// Initialize the application when the page loads
+document.addEventListener('DOMContentLoaded', () => {
+ new GraphController('graph-container');
+});
\ No newline at end of file
diff --git a/src/html/hyperbuddy@1.0/index.html b/src/html/hyperbuddy@1.0/index.html
index 521570933..7f2ccf4d8 100755
--- a/src/html/hyperbuddy@1.0/index.html
+++ b/src/html/hyperbuddy@1.0/index.html
@@ -1,150 +1,145 @@
-
-
-
-
-
-
- HyperBEAM
-
-
-
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/html/hyperbuddy@1.0/styles.css b/src/html/hyperbuddy@1.0/styles.css
index 4c6ce7a71..042523c89 100644
--- a/src/html/hyperbuddy@1.0/styles.css
+++ b/src/html/hyperbuddy@1.0/styles.css
@@ -148,6 +148,10 @@ a:hover {
}
.header {
+ position: relative;
+}
+
+.header-inner {
width: 100%;
position: relative;
display: flex;
@@ -470,7 +474,7 @@ a:hover {
margin-top: -50px;
}
- .header {
+ .header-inner {
position: relative;
flex-direction: column;
align-items: flex-start;
@@ -509,7 +513,7 @@ a:hover {
}
@media (max-width: 800px) {
- .header {
+ .header-inner {
padding: 20px;
}
@@ -526,7 +530,7 @@ a:hover {
@media (max-width: 500px) {
- .header {
+ .header-inner {
padding: 20px;
}
@@ -656,4 +660,30 @@ a:hover {
.section-lines-header {
border-bottom: 1px solid var(--border-color);
/* border-radius: 5px 5px 0 0; */
-}
\ No newline at end of file
+}
+
+/* Graph visualization styles */
+.graph-link {
+ position: absolute;
+ z-index: 100;
+ top: -18px;
+ right: -18px;
+ background: var(--section-bg-color-primary);
+ border-radius: 5px;
+ border: 0.5px solid var(--border-color);
+ height: 36px;
+ padding: 0px 12px;
+ font-size: clamp(0.6rem, 1.75vw, 0.9rem);
+ font-weight: 500;
+ color: var(--text-color-primary);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+}
+
+.graph-link:hover {
+ background: var(--section-bg-color-alt1);
+ color: var(--indicator-color-active);
+ text-decoration: none;
+
+}
diff --git a/src/include/hb.hrl b/src/include/hb.hrl
index 44878a105..bbf86686d 100644
--- a/src/include/hb.hrl
+++ b/src/include/hb.hrl
@@ -6,7 +6,7 @@
-define(IS_EMPTY_MESSAGE(Msg), (map_size(Msg) == 0) orelse (map_size(Msg) == 1 andalso (is_map_key(priv, Msg) orelse is_map_key(<<"priv">>, Msg)))).
%% @doc Macro usable in guards that validates whether a term is a
%% human-readable ID encoding.
--define(IS_ID(X), (is_binary(X) andalso (byte_size(X) == 43 orelse byte_size(X) == 32))).
+-define(IS_ID(X), (is_binary(X) andalso (byte_size(X) == 42 orelse byte_size(X) == 43 orelse byte_size(X) == 32))).
%% @doc List of special keys that are used in the AO-Core protocol.
-define(AO_CORE_KEYS, [<<"path">>, <<"hashpath">>, <<"priv">>]).
%% @doc Keys that can be regenerated losslessly.
diff --git a/test/admissible-report-wallet.json b/test/admissible-report-wallet.json
new file mode 100644
index 000000000..52905a778
--- /dev/null
+++ b/test/admissible-report-wallet.json
@@ -0,0 +1 @@
+{"p":"v1JKa5JENrE5AOfbxQJKP5G_Q3miLbxobazHhAoznYVXtrklpNiYa5tbTT4yK1cH5dZpW2gNyYP6FuxfdwWN6EKXMlugH8BDCchLhcPvzuQd9WL7uyxO3CTTvJ3Rz_E87rAGfPTbVtM1kIXmK582HLZtbe1eDwvlMoEYOglobRO2YOc2_EL7P8v6spUvUW_y3UpwQnnE5zb-ZIb3W7A_3Xzb6sJB8KTVn8cjSX4eFAWA6WKZFvxyQ3yVnqLtwawon966lH-z2QOcV0fZa35K4GW8k2PX3dp4zJk4bremyG4bE0WY5xUSbqOhCOf8mjlGbdDYI1onPQQRBoi0ex-DPQ","q":"-xLVvoLT7YqbrFKg4BzxzD_MUJ-_EHcoj0K7z6S9Eb523nxoFMPFomd-kYCDCdDzGfN_d5lESI-3wv651OStG8phIBybCVWpUyGo1Kxp2d-qE8sI4JOhXX7p2oqjsgD-5iJATDwcKEXklkvCSv4PeRlsvhL55Y6fGblBzNec6fPrDrNOnk4O1mANIAzuSdO9MH1qVEKVCLOIfxyWoo3vQI6MXJdqxS-X5qjD_5PnhcyWH1rNMG9tb7RZJXd8tMRu5donJhYUdcgewI5_lEpem_9xB06wx7O6_Armd2z57yoq4xwLLVxfZDfsqJTTi6sRid2JlFpCXH4VYndCIybLfw","d":"BIOBc7rd_aah2JO7WgAa_bEO5bG2qES_-UdPe0MaECLakpm8YqnZKu-pR8yy5utGXwJiESR5XhhHWkI_tLY9WxNybAFKmqrL1LmfcITz63KySxxOSxQPIIicffOzOMQkc7vkeJ5Nyp1NveubKC_jcWSKH3F5Okf1GR-5kIWUUiEs3kRzK75VOierSLPp2PTc80Rl15oGQd6x7cfU_GEF34IQPHNekrtWY81gHuv7EyOMeWIKCxErecKJWfXwnAsIoDP9KEP_e2DynTynJQkbFs8DUTVrIrPtpkPG-lrnR9x-7tOx0DtTTZuxsCM1W5XkQQXNQH4RXRqGjAm8NoqdbEG0WKLRCH9uTXQxw7OD6v2miTTfgus2NBaH4D1yzvgpTTwtS__KMbjY2QefwWiZ-gEbEcb1XLFDCDwAIacwIeniQDb2E4JpXQqflAav6cpcB9VOMeQQxk2LUYo1mMbv5Ij-HpK2OAqAd38W8ImV_uD5rHaujyiGBSN5j8TAONl2D7_GJNHzKNy7yfEl0KSDca5wrP9CM2ri6aUe6LEBnoPkT9Xh7ZfEwPwOoMXYG0gD7WCfplVDm1hIheGi5TM5LDa8u7WqXG_jZVtYX-NnIsA_Sq-VbvaUF9CiYuii6OgUvr_5eDaJ7ck9v8lfg8yF1k0ZsBf5RxSOhXs6fc38kwU","n":"u6PCfoy-ExmoBXPZojc1oqL3wmSSywwjFGzr4ERqucFFp-ACA3KylqpgiBNGfobTokOQht4QKyMRDnf_LoO0is6idVuaVxp37L_3QRlSebn4VilPtlWcOr2rfSYbhM8TjBzwFoj1BpeQJzD4vYK9thyp2hlHVBB2ZFNAwB_22_Mr1atOYdZstv23eWo1Pb2Rwv_vwuvsK_-kffjRL6lTy7OjMwGL6UBnyWaInTSH74m4ihxS_vql8jMBySlB2ChhTpzsJi-JrrcBvCqbWhVK7ULXhVs8LvCMqCNG5w38ts8rfYtZYYjlcHOA_NER6PtYkS5nz9LVZtpZ4_QGNy7Xohqf9t0jrcYEmeSOV4EXhbLqxdwm_ITwoZbe6ZnXcSBUMOdMzaM7q0aIT8HOOI3MvLzl5uFRUdjfZ_irJOC0vb0IrsvRicC7jGQo0mBv8M2EE5l2otBO5aQFgpFqYphiCAMLhyTnsOUTvkA4yw40xXVstMzEJ2pxYOWiEvP1qPj-kjnq_7uHBa9TWzJ7bxiO_BzHMbKd7sb3r2jicsemmeMm4yeQzOT6Tf3jvNE17emV6K51SyWN6UNJUTJ9HpEhqZ0Y17nI3oOLOPsNMMXQ9_rmbP7Tv8dCbq9sRGnjmX1Svrs6SwNVpfoUT8NmG2tqM5VGKXo4mknLOr-1g-PBekM","e":"AQAB","ext":true,"dp":"HtDmY8U_b3_EKr0tzOG9i9ex8vBYiv1Z5LB7wmzSO4EKy8eupIquokZ3wk1OT2TJRN_wQGTWM6sqUR7pkYY3gT2YlOflNrgFFEJKx9Tzf2OG38t9uHw-h373C95vuQqmQdvgb6gQ3D9Q1WJ73HLciGtp3Nbq24mS9TuN52s0gr02Fw2m8aLoTTJRwwn8gSWC_NnMkyiB6qwU9aQ3m3EcGFTQJ1P6wwQJ1J6CtIe32Im6Zd0Xw3gN_4jFoLOlkBhmwrlhXCHlmgLW38gW4RWKgfJhGWxvjLBv3KShTlQObSIvAj-njTD7sw5wFbsoGL849N86sRcIUu-gvmiuiVZeEQ","dq":"6m952bu7O1Bzb4Jv6RPdy0O--YFQHIXG_43mZEqEqG7Z-4DahpkOj0hn7GC8-ot6kz7ERN593eskQRUsW9dytEJSUnOjaCHuS0tgo8ShyeiInJa2oUv4Hp8EqSVPGETJvgU5WHXALPKmMJhowTFdLUxKN2jsoiZ79L8A685gHCu_zigrPrHQNOfXGZg5YAIv43kXsbnCAy_wQhBlrz8sqXDxKvvPnHOGOMBY0uo-Arc3beuRMKq62tThcJSTgw7wJft_FpcDX78Ox-nGwqZ2lN79oT8e3jm6XOGotNaywVj0Vr-2yBI6mA-IERl2NjHz3HFZp4Zn9IleWmTVApGU7Q","kty":"RSA","qi":"VHkl0nQEL-BDIRm_o24lBpAKnWro3xpsh3bbWJD8KQy0ybi8txxPyNuMEeo8mMSYluxO2QBjO-9ATqx5jLNuWW07_e607UX6xDPY1UrG0irTgqcv_Iabel82qoA_RJ8dOw4ng_wBd9fdyRPliOWcmLpWMEZerkfhXNYRimw1z2kh3sOQSnibHQnBPzol0lNbp_Mi8MY378ssBIvCTN7p3aiQAiC5gyGyiP9y7e4ejl32G6zyco9X98-4n1s4ZPP1LZDkN9YoY_lrrDnUFJpQ8o6RuthiBxpN8gmutK4blU_6hE9ze5OYJkLCb-eyRVPqmbXX0WWAZJFQagFYgMX_PA"}
\ No newline at end of file
diff --git a/test/admissible-report.eterm b/test/admissible-report.eterm
new file mode 100644
index 000000000..03fce5ada
--- /dev/null
+++ b/test/admissible-report.eterm
@@ -0,0 +1,41 @@
+#{
+ <<"address">> => <<"-q_bZCJGupSiZvjNg-KoD0bjx53pqnay_5Ojvo0597s">>,
+ <<"local-hashes">> =>
+ #{
+ <<"append">> =>
+ <<"95a34faced5e487991f9cc2253a41cbd26b708bf00328f98dddbbf6b3ea2892e">>,
+ <<"firmware">> =>
+ <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
+ <<"guest_features">> => 1,
+ <<"initrd">> =>
+ <<"544045560322dbcd2c454bdc50f35edf0147829ec440e6cb487b4a1503f923c1">>,
+ <<"kernel">> =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ <<"vcpu_type">> => 5,
+ <<"vcpus">> => 32,
+ <<"vmm_type">> => 1
+ },
+ <<"node-message">> =>
+ #{
+ <<"address">> => <<"-q_bZCJGupSiZvjNg-KoD0bjx53pqnay_5Ojvo0597s">>,
+ <<"snp_trusted">> =>
+ [#{
+ <<"append">> =>
+ <<"95a34faced5e487991f9cc2253a41cbd26b708bf00328f98dddbbf6b3ea2892e">>,
+ <<"firmware">> =>
+ <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
+ <<"guest_features">> => 1,
+ <<"initrd">> =>
+ <<"544045560322dbcd2c454bdc50f35edf0147829ec440e6cb487b4a1503f923c1">>,
+ <<"kernel">> =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ <<"vcpu_type">> => 5,
+ <<"vcpus">> => 32,
+ <<"vmm_type">> => 1
+ }]
+ },
+ <<"nonce">> =>
+ <<"+q/bZCJGupSiZvjNg+KoD0bjx53pqnay/5Ojvo0597t+kqD7iwdTjAzQ35bcHZXitlIkgX0e3NnLV3thQJdRRA==">>,
+ <<"report">> =>
+ <<"{\"version\":2,\"guest_svn\":0,\"policy\":196608,\"family_id\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"image_id\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"vmpl\":1,\"sig_algo\":1,\"current_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"plat_info\":3,\"_author_key_en\":0,\"_reserved_0\":0,\"report_data\":[250,175,219,100,34,70,186,148,162,102,248,205,131,226,168,15,70,227,199,157,233,170,118,178,255,147,163,190,141,57,247,187,181,160,210,21,56,188,91,240,50,131,40,188,234,18,130,134,180,231,217,163,72,45,87,74,139,105,40,115,207,229,115,46],\"measurement\":[135,166,103,101,166,120,21,18,52,110,203,71,81,17,101,194,107,109,163,231,41,151,61,151,16,160,197,103,199,74,166,87,130,58,240,193,98,213,22,248,67,0,84,255,163,46,194,73],\"host_data\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"id_key_digest\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"author_key_digest\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"report_id\":[241,10,197,48,71,124,10,164,25,84,217,143,57,33,170,252,188,35,183,1,18,3,169,47,254,196,204,111,197,155,181,36],\"report_id_ma\":[255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255],\"reported_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"_reserved_1\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"chip_id\":[6,154,118,21,12,115,53,47,251,19,195,100,155,12,239,102,116,131,103,189,106,202,153,3,114,175,16,182,24,166,229,214,231,126,164,15,129,52,233,142,196,43,43,8,89,238,118,246,21,144,209,16,165,197,134,105,214,250,155,148,50,78,87,203],\"committed_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"current_build\":20,\"current_minor\":55,\"current_major\":1,\"_reserved_2\":0,\"committed_build\":20,\"committed_minor\":55,\"committed_major\":1,\"_reserved_3\":0,\"launch_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"_reserved_4\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"signature\":{\"r\":[136,143,81,241,242,123,84,48,177,36,241,167,35,181,109,42,134,193,44,88,162,240,140,195,117,252,151,27,83,156,188,69,14,114,21,107,105,163,12,20,128,144,61,65,233,31,205,111,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"s\":[64,250,227,155,71,182,238,179,71,128,109,219,182,33,119,151,202,50,123,211,22,167,104,241,222,221,82,34,138,148,86,254,190,174,191,86,202,194,207,91,110,250,3,196,127,105,133,135,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"_reserved\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}">>
+}.
\ No newline at end of file
diff --git a/test/hyper-aos.lua b/test/hyper-aos.lua
index eb5d7c22c..b177e04b4 100644
--- a/test/hyper-aos.lua
+++ b/test/hyper-aos.lua
@@ -439,9 +439,10 @@ local json = require('.json')
-- @treturn {function} The handler function, which takes a message as an argument.
-- @see stringify
return function (ao)
- return function (msg)
+ return function (req)
+ local msg = req.body
-- exec expression
- local expr = msg.body.data
+ local expr = msg.body and msg.body.body or msg.data or ""
local func, err = load("return " .. expr, 'aos', 't', _G)
local output = ""
local e = nil
@@ -465,14 +466,14 @@ return function (ao)
or tostring(output)
)
-- print(stringify.format(HandlerPrintLogs))
- else
- -- set result in outbox.Output (Left for backwards compatibility)
- ao.outbox.Output = {
- data = type(output) == "table"
- and stringify.format(output) or tostring(output),
- prompt = Prompt()
- }
-
+ -- else
+ -- -- set result in outbox.Output (Left for backwards compatibility)
+ -- ao.outbox.Output = {
+ -- data = type(output) == "table"
+ -- and stringify.format(output) or tostring(output),
+ -- prompt = Prompt()
+ -- }
+ --
end
end
end
@@ -1637,1752 +1638,6 @@ print("loaded dump")
-local function load_bint()
- --[[--
-lua-bint - v0.5.1 - 26/Jun/2023
-Eduardo Bart - edub4rt@gmail.com
-https://github.com/edubart/lua-bint
-
-Small portable arbitrary-precision integer arithmetic library in pure Lua for
-computing with large integers.
-
-Different from most arbitrary-precision integer libraries in pure Lua out there this one
-uses an array of lua integers as underlying data-type in its implementation instead of
-using strings or large tables, this make it efficient for working with fixed width integers
-and to make bitwise operations.
-
-## Design goals
-
-The main design goal of this library is to be small, correct, self contained and use few
-resources while retaining acceptable performance and feature completeness.
-
-The library is designed to follow recent Lua integer semantics, this means that
-integer overflow warps around,
-signed integers are implemented using two-complement arithmetic rules,
-integer division operations rounds towards minus infinity,
-any mixed operations with float numbers promotes the value to a float,
-and the usual division/power operation always promotes to floats.
-
-The library is designed to be possible to work with only unsigned integer arithmetic
-when using the proper methods.
-
-All the lua arithmetic operators (+, -, *, //, /, %) and bitwise operators (&, |, ~, <<, >>)
-are implemented as metamethods.
-
-The integer size must be fixed in advance and the library is designed to be more efficient when
-working with integers of sizes between 64-4096 bits. If you need to work with really huge numbers
-without size restrictions then use another library. This choice has been made to have more efficiency
-in that specific size range.
-
-## Usage
-
-First on you should require the bint file including how many bits the bint module will work with,
-by calling the returned function from the require, for example:
-
-```lua
-local bint = require 'bint'(1024)
-```
-
-For more information about its arguments see @{newmodule}.
-Then when you need create a bint, you can use one of the following functions:
-
-* @{bint.fromuinteger} (convert from lua integers, but read as unsigned integer)
-* @{bint.frominteger} (convert from lua integers, preserving the sign)
-* @{bint.frombase} (convert from arbitrary bases, like hexadecimal)
-* @{bint.fromstring} (convert from arbitrary string, support binary/hexadecimal/decimal)
-* @{bint.trunc} (convert from lua numbers, truncating the fractional part)
-* @{bint.new} (convert from anything, asserts on invalid integers)
-* @{bint.tobint} (convert from anything, returns nil on invalid integers)
-* @{bint.parse} (convert from anything, returns a lua number as fallback)
-* @{bint.zero}
-* @{bint.one}
-* `bint`
-
-You can also call `bint` as it is an alias to `bint.new`.
-In doubt use @{bint.new} to create a new bint.
-
-Then you can use all the usual lua numeric operations on it,
-all the arithmetic metamethods are implemented.
-When you are done computing and need to get the result,
-get the output from one of the following functions:
-
-* @{bint.touinteger} (convert to a lua integer, wraps around as an unsigned integer)
-* @{bint.tointeger} (convert to a lua integer, wraps around, preserves the sign)
-* @{bint.tonumber} (convert to lua float, losing precision)
-* @{bint.tobase} (convert to a string in any base)
-* @{bint.__tostring} (convert to a string in base 10)
-
-To output a very large integer with no loss you probably want to use @{bint.tobase}
-or call `tostring` to get a string representation.
-
-## Precautions
-
-All library functions can be mixed with lua numbers,
-this makes easy to mix operations between bints and lua numbers,
-however the user should take care in some situations:
-
-* Don't mix integers and float operations if you want to work with integers only.
-* Don't use the regular equal operator ('==') to compare values from this library,
-unless you know in advance that both values are of the same primitive type,
-otherwise it will always return false, use @{bint.eq} to be safe.
-* Don't pass fractional numbers to functions that an integer is expected
-* Don't mix operations between bint classes with different sizes as this is not supported, this
-will throw assertions.
-* Remember that casting back to lua integers or numbers precision can be lost.
-* For dividing while preserving integers use the @{bint.__idiv} (the '//' operator).
-* For doing power operation preserving integers use the @{bint.ipow} function.
-* Configure the proper integer size you intend to work with, otherwise large integers may wrap around.
-
-]]
-
--- Returns number of bits of the internal lua integer type.
-local function luainteger_bitsize()
- local n, i = -1, 0
- repeat
- n, i = n >> 16, i + 16
- until n==0
- return i
-end
-
-local math_type = math.type
-local math_floor = math.floor
-local math_abs = math.abs
-local math_ceil = math.ceil
-local math_modf = math.modf
-local math_mininteger = math.mininteger
-local math_maxinteger = math.maxinteger
-local math_max = math.max
-local math_min = math.min
-local string_format = string.format
-local table_insert = table.insert
-local table_concat = table.concat
-local table_unpack = table.unpack
-
-local memo = {}
-
---- Create a new bint module representing integers of the desired bit size.
--- This is the returned function when `require 'bint'` is called.
--- @function newmodule
--- @param bits Number of bits for the integer representation, must be multiple of wordbits and
--- at least 64.
--- @param[opt] wordbits Number of the bits for the internal word,
--- defaults to half of Lua's integer size.
-local function newmodule(bits, wordbits)
-
-local intbits = luainteger_bitsize()
-bits = bits or 256
-wordbits = wordbits or (intbits // 2)
-
--- Memoize bint modules
-local memoindex = bits * 64 + wordbits
-if memo[memoindex] then
- return memo[memoindex]
-end
-
--- Validate
-assert(bits % wordbits == 0, 'bitsize is not multiple of word bitsize')
-assert(2*wordbits <= intbits, 'word bitsize must be half of the lua integer bitsize')
-assert(bits >= 64, 'bitsize must be >= 64')
-assert(wordbits >= 8, 'wordbits must be at least 8')
-assert(bits % 8 == 0, 'bitsize must be multiple of 8')
-
--- Create bint module
-local bint = {}
-bint.__index = bint
-
---- Number of bits representing a bint instance.
-bint.bits = bits
-
--- Constants used internally
-local BINT_BITS = bits
-local BINT_BYTES = bits // 8
-local BINT_WORDBITS = wordbits
-local BINT_SIZE = BINT_BITS // BINT_WORDBITS
-local BINT_WORDMAX = (1 << BINT_WORDBITS) - 1
-local BINT_WORDMSB = (1 << (BINT_WORDBITS - 1))
-local BINT_LEPACKFMT = '<'..('I'..(wordbits // 8)):rep(BINT_SIZE)
-local BINT_MATHMININTEGER, BINT_MATHMAXINTEGER
-local BINT_MININTEGER
-
---- Create a new bint with 0 value.
-function bint.zero()
- local x = setmetatable({}, bint)
- for i=1,BINT_SIZE do
- x[i] = 0
- end
- return x
-end
-local bint_zero = bint.zero
-
---- Create a new bint with 1 value.
-function bint.one()
- local x = setmetatable({}, bint)
- x[1] = 1
- for i=2,BINT_SIZE do
- x[i] = 0
- end
- return x
-end
-local bint_one = bint.one
-
--- Convert a value to a lua integer without losing precision.
-local function tointeger(x)
- x = tonumber(x)
- local ty = math_type(x)
- if ty == 'float' then
- local floorx = math_floor(x)
- if floorx == x then
- x = floorx
- ty = math_type(x)
- end
- end
- if ty == 'integer' then
- return x
- end
-end
-
---- Create a bint from an unsigned integer.
--- Treats signed integers as an unsigned integer.
--- @param x A value to initialize from convertible to a lua integer.
--- @return A new bint or nil in case the input cannot be represented by an integer.
--- @see bint.frominteger
-function bint.fromuinteger(x)
- x = tointeger(x)
- if x then
- if x == 1 then
- return bint_one()
- elseif x == 0 then
- return bint_zero()
- end
- local n = setmetatable({}, bint)
- for i=1,BINT_SIZE do
- n[i] = x & BINT_WORDMAX
- x = x >> BINT_WORDBITS
- end
- return n
- end
-end
-local bint_fromuinteger = bint.fromuinteger
-
---- Create a bint from a signed integer.
--- @param x A value to initialize from convertible to a lua integer.
--- @return A new bint or nil in case the input cannot be represented by an integer.
--- @see bint.fromuinteger
-function bint.frominteger(x)
- x = tointeger(x)
- if x then
- if x == 1 then
- return bint_one()
- elseif x == 0 then
- return bint_zero()
- end
- local neg = false
- if x < 0 then
- x = math_abs(x)
- neg = true
- end
- local n = setmetatable({}, bint)
- for i=1,BINT_SIZE do
- n[i] = x & BINT_WORDMAX
- x = x >> BINT_WORDBITS
- end
- if neg then
- n:_unm()
- end
- return n
- end
-end
-local bint_frominteger = bint.frominteger
-
-local basesteps = {}
-
--- Compute the read step for frombase function
-local function getbasestep(base)
- local step = basesteps[base]
- if step then
- return step
- end
- step = 0
- local dmax = 1
- local limit = math_maxinteger // base
- repeat
- step = step + 1
- dmax = dmax * base
- until dmax >= limit
- basesteps[base] = step
- return step
-end
-
--- Compute power with lua integers.
-local function ipow(y, x, n)
- if n == 1 then
- return y * x
- elseif n & 1 == 0 then --even
- return ipow(y, x * x, n // 2)
- end
- return ipow(x * y, x * x, (n-1) // 2)
-end
-
---- Create a bint from a string of the desired base.
--- @param s The string to be converted from,
--- must have only alphanumeric and '+-' characters.
--- @param[opt] base Base that the number is represented, defaults to 10.
--- Must be at least 2 and at most 36.
--- @return A new bint or nil in case the conversion failed.
-function bint.frombase(s, base)
- if type(s) ~= 'string' then
- return
- end
- base = base or 10
- if not (base >= 2 and base <= 36) then
- -- number base is too large
- return
- end
- local step = getbasestep(base)
- if #s < step then
- -- string is small, use tonumber (faster)
- return bint_frominteger(tonumber(s, base))
- end
- local sign, int = s:lower():match('^([+-]?)(%w+)$')
- if not (sign and int) then
- -- invalid integer string representation
- return
- end
- local n = bint_zero()
- for i=1,#int,step do
- local part = int:sub(i,i+step-1)
- local d = tonumber(part, base)
- if not d then
- -- invalid integer string representation
- return
- end
- if i > 1 then
- n = n * ipow(1, base, #part)
- end
- if d ~= 0 then
- n:_add(d)
- end
- end
- if sign == '-' then
- n:_unm()
- end
- return n
-end
-local bint_frombase = bint.frombase
-
---- Create a new bint from a string.
--- The string can by a decimal number, binary number prefixed with '0b' or hexadecimal number prefixed with '0x'.
--- @param s A string convertible to a bint.
--- @return A new bint or nil in case the conversion failed.
--- @see bint.frombase
-function bint.fromstring(s)
- if type(s) ~= 'string' then
- return
- end
- if s:find('^[+-]?[0-9]+$') then
- return bint_frombase(s, 10)
- elseif s:find('^[+-]?0[xX][0-9a-fA-F]+$') then
- return bint_frombase(s:gsub('0[xX]', '', 1), 16)
- elseif s:find('^[+-]?0[bB][01]+$') then
- return bint_frombase(s:gsub('0[bB]', '', 1), 2)
- end
-end
-local bint_fromstring = bint.fromstring
-
---- Create a new bint from a buffer of little-endian bytes.
--- @param buffer Buffer of bytes, extra bytes are trimmed from the right, missing bytes are padded to the right.
--- @raise An assert is thrown in case buffer is not an string.
--- @return A bint.
-function bint.fromle(buffer)
- assert(type(buffer) == 'string', 'buffer is not a string')
- if #buffer > BINT_BYTES then -- trim extra bytes from the right
- buffer = buffer:sub(1, BINT_BYTES)
- elseif #buffer < BINT_BYTES then -- add missing bytes to the right
- buffer = buffer..('\x00'):rep(BINT_BYTES - #buffer)
- end
- return setmetatable({BINT_LEPACKFMT:unpack(buffer)}, bint)
-end
-
---- Create a new bint from a buffer of big-endian bytes.
--- @param buffer Buffer of bytes, extra bytes are trimmed from the left, missing bytes are padded to the left.
--- @raise An assert is thrown in case buffer is not an string.
--- @return A bint.
-function bint.frombe(buffer)
- assert(type(buffer) == 'string', 'buffer is not a string')
- if #buffer > BINT_BYTES then -- trim extra bytes from the left
- buffer = buffer:sub(-BINT_BYTES, #buffer)
- elseif #buffer < BINT_BYTES then -- add missing bytes to the left
- buffer = ('\x00'):rep(BINT_BYTES - #buffer)..buffer
- end
- return setmetatable({BINT_LEPACKFMT:unpack(buffer:reverse())}, bint)
-end
-
---- Create a new bint from a value.
--- @param x A value convertible to a bint (string, number or another bint).
--- @return A new bint, guaranteed to be a new reference in case needed.
--- @raise An assert is thrown in case x is not convertible to a bint.
--- @see bint.tobint
--- @see bint.parse
-function bint.new(x)
- if getmetatable(x) ~= bint then
- local ty = type(x)
- if ty == 'number' then
- x = bint_frominteger(x)
- elseif ty == 'string' then
- x = bint_fromstring(x)
- end
- assert(x, 'value cannot be represented by a bint')
- return x
- end
- -- return a clone
- local n = setmetatable({}, bint)
- for i=1,BINT_SIZE do
- n[i] = x[i]
- end
- return n
-end
-local bint_new = bint.new
-
---- Convert a value to a bint if possible.
--- @param x A value to be converted (string, number or another bint).
--- @param[opt] clone A boolean that tells if a new bint reference should be returned.
--- Defaults to false.
--- @return A bint or nil in case the conversion failed.
--- @see bint.new
--- @see bint.parse
-function bint.tobint(x, clone)
- if getmetatable(x) == bint then
- if not clone then
- return x
- end
- -- return a clone
- local n = setmetatable({}, bint)
- for i=1,BINT_SIZE do
- n[i] = x[i]
- end
- return n
- end
- local ty = type(x)
- if ty == 'number' then
- return bint_frominteger(x)
- elseif ty == 'string' then
- return bint_fromstring(x)
- end
-end
-local tobint = bint.tobint
-
---- Convert a value to a bint if possible otherwise to a lua number.
--- Useful to prepare values that you are unsure if it's going to be an integer or float.
--- @param x A value to be converted (string, number or another bint).
--- @param[opt] clone A boolean that tells if a new bint reference should be returned.
--- Defaults to false.
--- @return A bint or a lua number or nil in case the conversion failed.
--- @see bint.new
--- @see bint.tobint
-function bint.parse(x, clone)
- local i = tobint(x, clone)
- if i then
- return i
- end
- return tonumber(x)
-end
-local bint_parse = bint.parse
-
---- Convert a bint to an unsigned integer.
--- Note that large unsigned integers may be represented as negatives in lua integers.
--- Note that lua cannot represent values larger than 64 bits,
--- in that case integer values wrap around.
--- @param x A bint or a number to be converted into an unsigned integer.
--- @return An integer or nil in case the input cannot be represented by an integer.
--- @see bint.tointeger
-function bint.touinteger(x)
- if getmetatable(x) == bint then
- local n = 0
- for i=1,BINT_SIZE do
- n = n | (x[i] << (BINT_WORDBITS * (i - 1)))
- end
- return n
- end
- return tointeger(x)
-end
-
---- Convert a bint to a signed integer.
--- It works by taking absolute values then applying the sign bit in case needed.
--- Note that lua cannot represent values larger than 64 bits,
--- in that case integer values wrap around.
--- @param x A bint or value to be converted into an unsigned integer.
--- @return An integer or nil in case the input cannot be represented by an integer.
--- @see bint.touinteger
-function bint.tointeger(x)
- if getmetatable(x) == bint then
- local n = 0
- local neg = x:isneg()
- if neg then
- x = -x
- end
- for i=1,BINT_SIZE do
- n = n | (x[i] << (BINT_WORDBITS * (i - 1)))
- end
- if neg then
- n = -n
- end
- return n
- end
- return tointeger(x)
-end
-local bint_tointeger = bint.tointeger
-
-local function bint_assert_tointeger(x)
- x = bint_tointeger(x)
- if not x then
- error('value has no integer representation')
- end
- return x
-end
-
---- Convert a bint to a lua float in case integer would wrap around or lua integer otherwise.
--- Different from @{bint.tointeger} the operation does not wrap around integers,
--- but digits precision are lost in the process of converting to a float.
--- @param x A bint or value to be converted into a lua number.
--- @return A lua number or nil in case the input cannot be represented by a number.
--- @see bint.tointeger
-function bint.tonumber(x)
- if getmetatable(x) == bint then
- if x <= BINT_MATHMAXINTEGER and x >= BINT_MATHMININTEGER then
- return x:tointeger()
- end
- return tonumber(tostring(x))
- end
- return tonumber(x)
-end
-local bint_tonumber = bint.tonumber
-
--- Compute base letters to use in bint.tobase
-local BASE_LETTERS = {}
-do
- for i=1,36 do
- BASE_LETTERS[i-1] = ('0123456789abcdefghijklmnopqrstuvwxyz'):sub(i,i)
- end
-end
-
---- Convert a bint to a string in the desired base.
--- @param x The bint to be converted from.
--- @param[opt] base Base to be represented, defaults to 10.
--- Must be at least 2 and at most 36.
--- @param[opt] unsigned Whether to output as an unsigned integer.
--- Defaults to false for base 10 and true for others.
--- When unsigned is false the symbol '-' is prepended in negative values.
--- @return A string representing the input.
--- @raise An assert is thrown in case the base is invalid.
-function bint.tobase(x, base, unsigned)
- x = tobint(x)
- if not x then
- -- x is a fractional float or something else
- return
- end
- base = base or 10
- if not (base >= 2 and base <= 36) then
- -- number base is too large
- return
- end
- if unsigned == nil then
- unsigned = base ~= 10
- end
- local isxneg = x:isneg()
- if (base == 10 and not unsigned) or (base == 16 and unsigned and not isxneg) then
- if x <= BINT_MATHMAXINTEGER and x >= BINT_MATHMININTEGER then
- -- integer is small, use tostring or string.format (faster)
- local n = x:tointeger()
- if base == 10 then
- return tostring(n)
- elseif unsigned then
- return string_format('%x', n)
- end
- end
- end
- local ss = {}
- local neg = not unsigned and isxneg
- x = neg and x:abs() or bint_new(x)
- local xiszero = x:iszero()
- if xiszero then
- return '0'
- end
- -- calculate basepow
- local step = 0
- local basepow = 1
- local limit = (BINT_WORDMSB - 1) // base
- repeat
- step = step + 1
- basepow = basepow * base
- until basepow >= limit
- -- serialize base digits
- local size = BINT_SIZE
- local xd, carry, d
- repeat
- -- single word division
- carry = 0
- xiszero = true
- for i=size,1,-1 do
- carry = carry | x[i]
- d, xd = carry // basepow, carry % basepow
- if xiszero and d ~= 0 then
- size = i
- xiszero = false
- end
- x[i] = d
- carry = xd << BINT_WORDBITS
- end
- -- digit division
- for _=1,step do
- xd, d = xd // base, xd % base
- if xiszero and xd == 0 and d == 0 then
- -- stop on leading zeros
- break
- end
- table_insert(ss, 1, BASE_LETTERS[d])
- end
- until xiszero
- if neg then
- table_insert(ss, 1, '-')
- end
- return table_concat(ss)
-end
-
-local function bint_assert_convert(x)
- return assert(tobint(x), 'value has not integer representation')
-end
-
---- Convert a bint to a buffer of little-endian bytes.
--- @param x A bint or lua integer.
--- @param[opt] trim If true, zero bytes on the right are trimmed.
--- @return A buffer of bytes representing the input.
--- @raise Asserts in case input is not convertible to an integer.
-function bint.tole(x, trim)
- x = bint_assert_convert(x)
- local s = BINT_LEPACKFMT:pack(table_unpack(x))
- if trim then
- s = s:gsub('\x00+$', '')
- if s == '' then
- s = '\x00'
- end
- end
- return s
-end
-
---- Convert a bint to a buffer of big-endian bytes.
--- @param x A bint or lua integer.
--- @param[opt] trim If true, zero bytes on the left are trimmed.
--- @return A buffer of bytes representing the input.
--- @raise Asserts in case input is not convertible to an integer.
-function bint.tobe(x, trim)
- x = bint_assert_convert(x)
- local s = BINT_LEPACKFMT:pack(table_unpack(x)):reverse()
- if trim then
- s = s:gsub('^\x00+', '')
- if s == '' then
- s = '\x00'
- end
- end
- return s
-end
-
---- Check if a number is 0 considering bints.
--- @param x A bint or a lua number.
-function bint.iszero(x)
- if getmetatable(x) == bint then
- for i=1,BINT_SIZE do
- if x[i] ~= 0 then
- return false
- end
- end
- return true
- end
- return x == 0
-end
-
---- Check if a number is 1 considering bints.
--- @param x A bint or a lua number.
-function bint.isone(x)
- if getmetatable(x) == bint then
- if x[1] ~= 1 then
- return false
- end
- for i=2,BINT_SIZE do
- if x[i] ~= 0 then
- return false
- end
- end
- return true
- end
- return x == 1
-end
-
---- Check if a number is -1 considering bints.
--- @param x A bint or a lua number.
-function bint.isminusone(x)
- if getmetatable(x) == bint then
- for i=1,BINT_SIZE do
- if x[i] ~= BINT_WORDMAX then
- return false
- end
- end
- return true
- end
- return x == -1
-end
-local bint_isminusone = bint.isminusone
-
---- Check if the input is a bint.
--- @param x Any lua value.
-function bint.isbint(x)
- return getmetatable(x) == bint
-end
-
---- Check if the input is a lua integer or a bint.
--- @param x Any lua value.
-function bint.isintegral(x)
- return getmetatable(x) == bint or math_type(x) == 'integer'
-end
-
---- Check if the input is a bint or a lua number.
--- @param x Any lua value.
-function bint.isnumeric(x)
- return getmetatable(x) == bint or type(x) == 'number'
-end
-
---- Get the number type of the input (bint, integer or float).
--- @param x Any lua value.
--- @return Returns "bint" for bints, "integer" for lua integers,
--- "float" from lua floats or nil otherwise.
-function bint.type(x)
- if getmetatable(x) == bint then
- return 'bint'
- end
- return math_type(x)
-end
-
---- Check if a number is negative considering bints.
--- Zero is guaranteed to never be negative for bints.
--- @param x A bint or a lua number.
-function bint.isneg(x)
- if getmetatable(x) == bint then
- return x[BINT_SIZE] & BINT_WORDMSB ~= 0
- end
- return x < 0
-end
-local bint_isneg = bint.isneg
-
---- Check if a number is positive considering bints.
--- @param x A bint or a lua number.
-function bint.ispos(x)
- if getmetatable(x) == bint then
- return not x:isneg() and not x:iszero()
- end
- return x > 0
-end
-
---- Check if a number is even considering bints.
--- @param x A bint or a lua number.
-function bint.iseven(x)
- if getmetatable(x) == bint then
- return x[1] & 1 == 0
- end
- return math_abs(x) % 2 == 0
-end
-
---- Check if a number is odd considering bints.
--- @param x A bint or a lua number.
-function bint.isodd(x)
- if getmetatable(x) == bint then
- return x[1] & 1 == 1
- end
- return math_abs(x) % 2 == 1
-end
-
---- Create a new bint with the maximum possible integer value.
-function bint.maxinteger()
- local x = setmetatable({}, bint)
- for i=1,BINT_SIZE-1 do
- x[i] = BINT_WORDMAX
- end
- x[BINT_SIZE] = BINT_WORDMAX ~ BINT_WORDMSB
- return x
-end
-
---- Create a new bint with the minimum possible integer value.
-function bint.mininteger()
- local x = setmetatable({}, bint)
- for i=1,BINT_SIZE-1 do
- x[i] = 0
- end
- x[BINT_SIZE] = BINT_WORDMSB
- return x
-end
-
---- Bitwise left shift a bint in one bit (in-place).
-function bint:_shlone()
- local wordbitsm1 = BINT_WORDBITS - 1
- for i=BINT_SIZE,2,-1 do
- self[i] = ((self[i] << 1) | (self[i-1] >> wordbitsm1)) & BINT_WORDMAX
- end
- self[1] = (self[1] << 1) & BINT_WORDMAX
- return self
-end
-
---- Bitwise right shift a bint in one bit (in-place).
-function bint:_shrone()
- local wordbitsm1 = BINT_WORDBITS - 1
- for i=1,BINT_SIZE-1 do
- self[i] = ((self[i] >> 1) | (self[i+1] << wordbitsm1)) & BINT_WORDMAX
- end
- self[BINT_SIZE] = self[BINT_SIZE] >> 1
- return self
-end
-
--- Bitwise left shift words of a bint (in-place). Used only internally.
-function bint:_shlwords(n)
- for i=BINT_SIZE,n+1,-1 do
- self[i] = self[i - n]
- end
- for i=1,n do
- self[i] = 0
- end
- return self
-end
-
--- Bitwise right shift words of a bint (in-place). Used only internally.
-function bint:_shrwords(n)
- if n < BINT_SIZE then
- for i=1,BINT_SIZE-n do
- self[i] = self[i + n]
- end
- for i=BINT_SIZE-n+1,BINT_SIZE do
- self[i] = 0
- end
- else
- for i=1,BINT_SIZE do
- self[i] = 0
- end
- end
- return self
-end
-
---- Increment a bint by one (in-place).
-function bint:_inc()
- for i=1,BINT_SIZE do
- local tmp = self[i]
- local v = (tmp + 1) & BINT_WORDMAX
- self[i] = v
- if v > tmp then
- break
- end
- end
- return self
-end
-
---- Increment a number by one considering bints.
--- @param x A bint or a lua number to increment.
-function bint.inc(x)
- local ix = tobint(x, true)
- if ix then
- return ix:_inc()
- end
- return x + 1
-end
-
---- Decrement a bint by one (in-place).
-function bint:_dec()
- for i=1,BINT_SIZE do
- local tmp = self[i]
- local v = (tmp - 1) & BINT_WORDMAX
- self[i] = v
- if v <= tmp then
- break
- end
- end
- return self
-end
-
---- Decrement a number by one considering bints.
--- @param x A bint or a lua number to decrement.
-function bint.dec(x)
- local ix = tobint(x, true)
- if ix then
- return ix:_dec()
- end
- return x - 1
-end
-
---- Assign a bint to a new value (in-place).
--- @param y A value to be copied from.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint:_assign(y)
- y = bint_assert_convert(y)
- for i=1,BINT_SIZE do
- self[i] = y[i]
- end
- return self
-end
-
---- Take absolute of a bint (in-place).
-function bint:_abs()
- if self:isneg() then
- self:_unm()
- end
- return self
-end
-
---- Take absolute of a number considering bints.
--- @param x A bint or a lua number to take the absolute.
-function bint.abs(x)
- local ix = tobint(x, true)
- if ix then
- return ix:_abs()
- end
- return math_abs(x)
-end
-local bint_abs = bint.abs
-
---- Take the floor of a number considering bints.
--- @param x A bint or a lua number to perform the floor operation.
-function bint.floor(x)
- if getmetatable(x) == bint then
- return bint_new(x)
- end
- return bint_new(math_floor(tonumber(x)))
-end
-
---- Take ceil of a number considering bints.
--- @param x A bint or a lua number to perform the ceil operation.
-function bint.ceil(x)
- if getmetatable(x) == bint then
- return bint_new(x)
- end
- return bint_new(math_ceil(tonumber(x)))
-end
-
---- Wrap around bits of an integer (discarding left bits) considering bints.
--- @param x A bint or a lua integer.
--- @param y Number of right bits to preserve.
-function bint.bwrap(x, y)
- x = bint_assert_convert(x)
- if y <= 0 then
- return bint_zero()
- elseif y < BINT_BITS then
- return x & (bint_one() << y):_dec()
- end
- return bint_new(x)
-end
-
---- Rotate left integer x by y bits considering bints.
--- @param x A bint or a lua integer.
--- @param y Number of bits to rotate.
-function bint.brol(x, y)
- x, y = bint_assert_convert(x), bint_assert_tointeger(y)
- if y > 0 then
- return (x << y) | (x >> (BINT_BITS - y))
- elseif y < 0 then
- if y ~= math_mininteger then
- return x:bror(-y)
- else
- x:bror(-(y+1))
- x:bror(1)
- end
- end
- return x
-end
-
---- Rotate right integer x by y bits considering bints.
--- @param x A bint or a lua integer.
--- @param y Number of bits to rotate.
-function bint.bror(x, y)
- x, y = bint_assert_convert(x), bint_assert_tointeger(y)
- if y > 0 then
- return (x >> y) | (x << (BINT_BITS - y))
- elseif y < 0 then
- if y ~= math_mininteger then
- return x:brol(-y)
- else
- x:brol(-(y+1))
- x:brol(1)
- end
- end
- return x
-end
-
---- Truncate a number to a bint.
--- Floats numbers are truncated, that is, the fractional port is discarded.
--- @param x A number to truncate.
--- @return A new bint or nil in case the input does not fit in a bint or is not a number.
-function bint.trunc(x)
- if getmetatable(x) ~= bint then
- x = tonumber(x)
- if x then
- local ty = math_type(x)
- if ty == 'float' then
- -- truncate to integer
- x = math_modf(x)
- end
- return bint_frominteger(x)
- end
- return
- end
- return bint_new(x)
-end
-
---- Take maximum between two numbers considering bints.
--- @param x A bint or lua number to compare.
--- @param y A bint or lua number to compare.
--- @return A bint or a lua number. Guarantees to return a new bint for integer values.
-function bint.max(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- return bint_new(ix > iy and ix or iy)
- end
- return bint_parse(math_max(x, y))
-end
-
---- Take minimum between two numbers considering bints.
--- @param x A bint or lua number to compare.
--- @param y A bint or lua number to compare.
--- @return A bint or a lua number. Guarantees to return a new bint for integer values.
-function bint.min(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- return bint_new(ix < iy and ix or iy)
- end
- return bint_parse(math_min(x, y))
-end
-
---- Add an integer to a bint (in-place).
--- @param y An integer to be added.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint:_add(y)
- y = bint_assert_convert(y)
- local carry = 0
- for i=1,BINT_SIZE do
- local tmp = self[i] + y[i] + carry
- carry = tmp >> BINT_WORDBITS
- self[i] = tmp & BINT_WORDMAX
- end
- return self
-end
-
---- Add two numbers considering bints.
--- @param x A bint or a lua number to be added.
--- @param y A bint or a lua number to be added.
-function bint.__add(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local z = setmetatable({}, bint)
- local carry = 0
- for i=1,BINT_SIZE do
- local tmp = ix[i] + iy[i] + carry
- carry = tmp >> BINT_WORDBITS
- z[i] = tmp & BINT_WORDMAX
- end
- return z
- end
- return bint_tonumber(x) + bint_tonumber(y)
-end
-
---- Subtract an integer from a bint (in-place).
--- @param y An integer to subtract.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint:_sub(y)
- y = bint_assert_convert(y)
- local borrow = 0
- local wordmaxp1 = BINT_WORDMAX + 1
- for i=1,BINT_SIZE do
- local res = self[i] + wordmaxp1 - y[i] - borrow
- self[i] = res & BINT_WORDMAX
- borrow = (res >> BINT_WORDBITS) ~ 1
- end
- return self
-end
-
---- Subtract two numbers considering bints.
--- @param x A bint or a lua number to be subtracted from.
--- @param y A bint or a lua number to subtract.
-function bint.__sub(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local z = setmetatable({}, bint)
- local borrow = 0
- local wordmaxp1 = BINT_WORDMAX + 1
- for i=1,BINT_SIZE do
- local res = ix[i] + wordmaxp1 - iy[i] - borrow
- z[i] = res & BINT_WORDMAX
- borrow = (res >> BINT_WORDBITS) ~ 1
- end
- return z
- end
- return bint_tonumber(x) - bint_tonumber(y)
-end
-
---- Multiply two numbers considering bints.
--- @param x A bint or a lua number to multiply.
--- @param y A bint or a lua number to multiply.
-function bint.__mul(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local z = bint_zero()
- local sizep1 = BINT_SIZE+1
- local s = sizep1
- local e = 0
- for i=1,BINT_SIZE do
- if ix[i] ~= 0 or iy[i] ~= 0 then
- e = math_max(e, i)
- s = math_min(s, i)
- end
- end
- for i=s,e do
- for j=s,math_min(sizep1-i,e) do
- local a = ix[i] * iy[j]
- if a ~= 0 then
- local carry = 0
- for k=i+j-1,BINT_SIZE do
- local tmp = z[k] + (a & BINT_WORDMAX) + carry
- carry = tmp >> BINT_WORDBITS
- z[k] = tmp & BINT_WORDMAX
- a = a >> BINT_WORDBITS
- end
- end
- end
- end
- return z
- end
- return bint_tonumber(x) * bint_tonumber(y)
-end
-
---- Check if bints are equal.
--- @param x A bint to compare.
--- @param y A bint to compare.
-function bint.__eq(x, y)
- for i=1,BINT_SIZE do
- if x[i] ~= y[i] then
- return false
- end
- end
- return true
-end
-
---- Check if numbers are equal considering bints.
--- @param x A bint or lua number to compare.
--- @param y A bint or lua number to compare.
-function bint.eq(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- return ix == iy
- end
- return x == y
-end
-local bint_eq = bint.eq
-
-local function findleftbit(x)
- for i=BINT_SIZE,1,-1 do
- local v = x[i]
- if v ~= 0 then
- local j = 0
- repeat
- v = v >> 1
- j = j + 1
- until v == 0
- return (i-1)*BINT_WORDBITS + j - 1, i
- end
- end
-end
-
--- Single word division modulus
-local function sudivmod(nume, deno)
- local rema
- local carry = 0
- for i=BINT_SIZE,1,-1 do
- carry = carry | nume[i]
- nume[i] = carry // deno
- rema = carry % deno
- carry = rema << BINT_WORDBITS
- end
- return rema
-end
-
---- Perform unsigned division and modulo operation between two integers considering bints.
--- This is effectively the same of @{bint.udiv} and @{bint.umod}.
--- @param x The numerator, must be a bint or a lua integer.
--- @param y The denominator, must be a bint or a lua integer.
--- @return The quotient following the remainder, both bints.
--- @raise Asserts on attempt to divide by zero
--- or if inputs are not convertible to integers.
--- @see bint.udiv
--- @see bint.umod
-function bint.udivmod(x, y)
- local nume = bint_new(x)
- local deno = bint_assert_convert(y)
- -- compute if high bits of denominator are all zeros
- local ishighzero = true
- for i=2,BINT_SIZE do
- if deno[i] ~= 0 then
- ishighzero = false
- break
- end
- end
- if ishighzero then
- -- try to divide by a single word (optimization)
- local low = deno[1]
- assert(low ~= 0, 'attempt to divide by zero')
- if low == 1 then
- -- denominator is one
- return nume, bint_zero()
- elseif low <= (BINT_WORDMSB - 1) then
- -- can do single word division
- local rema = sudivmod(nume, low)
- return nume, bint_fromuinteger(rema)
- end
- end
- if nume:ult(deno) then
- -- denominator is greater than numerator
- return bint_zero(), nume
- end
- -- align leftmost digits in numerator and denominator
- local denolbit = findleftbit(deno)
- local numelbit, numesize = findleftbit(nume)
- local bit = numelbit - denolbit
- deno = deno << bit
- local wordmaxp1 = BINT_WORDMAX + 1
- local wordbitsm1 = BINT_WORDBITS - 1
- local denosize = numesize
- local quot = bint_zero()
- while bit >= 0 do
- -- compute denominator <= numerator
- local le = true
- local size = math_max(numesize, denosize)
- for i=size,1,-1 do
- local a, b = deno[i], nume[i]
- if a ~= b then
- le = a < b
- break
- end
- end
- -- if the portion of the numerator above the denominator is greater or equal than to the denominator
- if le then
- -- subtract denominator from the portion of the numerator
- local borrow = 0
- for i=1,size do
- local res = nume[i] + wordmaxp1 - deno[i] - borrow
- nume[i] = res & BINT_WORDMAX
- borrow = (res >> BINT_WORDBITS) ~ 1
- end
- -- concatenate 1 to the right bit of the quotient
- local i = (bit // BINT_WORDBITS) + 1
- quot[i] = quot[i] | (1 << (bit % BINT_WORDBITS))
- end
- -- shift right the denominator in one bit
- for i=1,denosize-1 do
- deno[i] = ((deno[i] >> 1) | (deno[i+1] << wordbitsm1)) & BINT_WORDMAX
- end
- local lastdenoword = deno[denosize] >> 1
- deno[denosize] = lastdenoword
- -- recalculate denominator size (optimization)
- if lastdenoword == 0 then
- while deno[denosize] == 0 do
- denosize = denosize - 1
- end
- if denosize == 0 then
- break
- end
- end
- -- decrement current set bit for the quotient
- bit = bit - 1
- end
- -- the remaining numerator is the remainder
- return quot, nume
-end
-local bint_udivmod = bint.udivmod
-
---- Perform unsigned division between two integers considering bints.
--- @param x The numerator, must be a bint or a lua integer.
--- @param y The denominator, must be a bint or a lua integer.
--- @return The quotient, a bint.
--- @raise Asserts on attempt to divide by zero
--- or if inputs are not convertible to integers.
-function bint.udiv(x, y)
- return (bint_udivmod(x, y))
-end
-
---- Perform unsigned integer modulo operation between two integers considering bints.
--- @param x The numerator, must be a bint or a lua integer.
--- @param y The denominator, must be a bint or a lua integer.
--- @return The remainder, a bint.
--- @raise Asserts on attempt to divide by zero
--- or if the inputs are not convertible to integers.
-function bint.umod(x, y)
- local _, rema = bint_udivmod(x, y)
- return rema
-end
-local bint_umod = bint.umod
-
---- Perform integer truncate division and modulo operation between two numbers considering bints.
--- This is effectively the same of @{bint.tdiv} and @{bint.tmod}.
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The quotient following the remainder, both bint or lua number.
--- @raise Asserts on attempt to divide by zero or on division overflow.
--- @see bint.tdiv
--- @see bint.tmod
-function bint.tdivmod(x, y)
- local ax, ay = bint_abs(x), bint_abs(y)
- local ix, iy = tobint(ax), tobint(ay)
- local quot, rema
- if ix and iy then
- assert(not (bint_eq(x, BINT_MININTEGER) and bint_isminusone(y)), 'division overflow')
- quot, rema = bint_udivmod(ix, iy)
- else
- quot, rema = ax // ay, ax % ay
- end
- local isxneg, isyneg = bint_isneg(x), bint_isneg(y)
- if isxneg ~= isyneg then
- quot = -quot
- end
- if isxneg then
- rema = -rema
- end
- return quot, rema
-end
-local bint_tdivmod = bint.tdivmod
-
---- Perform truncate division between two numbers considering bints.
--- Truncate division is a division that rounds the quotient towards zero.
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The quotient, a bint or lua number.
--- @raise Asserts on attempt to divide by zero or on division overflow.
-function bint.tdiv(x, y)
- return (bint_tdivmod(x, y))
-end
-
---- Perform integer truncate modulo operation between two numbers considering bints.
--- The operation is defined as the remainder of the truncate division
--- (division that rounds the quotient towards zero).
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The remainder, a bint or lua number.
--- @raise Asserts on attempt to divide by zero or on division overflow.
-function bint.tmod(x, y)
- local _, rema = bint_tdivmod(x, y)
- return rema
-end
-
---- Perform integer floor division and modulo operation between two numbers considering bints.
--- This is effectively the same of @{bint.__idiv} and @{bint.__mod}.
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The quotient following the remainder, both bint or lua number.
--- @raise Asserts on attempt to divide by zero.
--- @see bint.__idiv
--- @see bint.__mod
-function bint.idivmod(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local isnumeneg = ix[BINT_SIZE] & BINT_WORDMSB ~= 0
- local isdenoneg = iy[BINT_SIZE] & BINT_WORDMSB ~= 0
- if isnumeneg then
- ix = -ix
- end
- if isdenoneg then
- iy = -iy
- end
- local quot, rema = bint_udivmod(ix, iy)
- if isnumeneg ~= isdenoneg then
- quot:_unm()
- -- round quotient towards minus infinity
- if not rema:iszero() then
- quot:_dec()
- -- adjust the remainder
- if isnumeneg and not isdenoneg then
- rema:_unm():_add(y)
- elseif isdenoneg and not isnumeneg then
- rema:_add(y)
- end
- end
- elseif isnumeneg then
- -- adjust the remainder
- rema:_unm()
- end
- return quot, rema
- end
- local nx, ny = bint_tonumber(x), bint_tonumber(y)
- return nx // ny, nx % ny
-end
-local bint_idivmod = bint.idivmod
-
---- Perform floor division between two numbers considering bints.
--- Floor division is a division that rounds the quotient towards minus infinity,
--- resulting in the floor of the division of its operands.
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The quotient, a bint or lua number.
--- @raise Asserts on attempt to divide by zero.
-function bint.__idiv(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local isnumeneg = ix[BINT_SIZE] & BINT_WORDMSB ~= 0
- local isdenoneg = iy[BINT_SIZE] & BINT_WORDMSB ~= 0
- if isnumeneg then
- ix = -ix
- end
- if isdenoneg then
- iy = -iy
- end
- local quot, rema = bint_udivmod(ix, iy)
- if isnumeneg ~= isdenoneg then
- quot:_unm()
- -- round quotient towards minus infinity
- if not rema:iszero() then
- quot:_dec()
- end
- end
- return quot, rema
- end
- return bint_tonumber(x) // bint_tonumber(y)
-end
-
---- Perform division between two numbers considering bints.
--- This always casts inputs to floats, for integer division only use @{bint.__idiv}.
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The quotient, a lua number.
-function bint.__div(x, y)
- return bint_tonumber(x) / bint_tonumber(y)
-end
-
---- Perform integer floor modulo operation between two numbers considering bints.
--- The operation is defined as the remainder of the floor division
--- (division that rounds the quotient towards minus infinity).
--- @param x The numerator, a bint or lua number.
--- @param y The denominator, a bint or lua number.
--- @return The remainder, a bint or lua number.
--- @raise Asserts on attempt to divide by zero.
-function bint.__mod(x, y)
- local _, rema = bint_idivmod(x, y)
- return rema
-end
-
---- Perform integer power between two integers considering bints.
--- If y is negative then pow is performed as an unsigned integer.
--- @param x The base, an integer.
--- @param y The exponent, an integer.
--- @return The result of the pow operation, a bint.
--- @raise Asserts in case inputs are not convertible to integers.
--- @see bint.__pow
--- @see bint.upowmod
-function bint.ipow(x, y)
- y = bint_assert_convert(y)
- if y:iszero() then
- return bint_one()
- elseif y:isone() then
- return bint_new(x)
- end
- -- compute exponentiation by squaring
- x, y = bint_new(x), bint_new(y)
- local z = bint_one()
- repeat
- if y:iseven() then
- x = x * x
- y:_shrone()
- else
- z = x * z
- x = x * x
- y:_dec():_shrone()
- end
- until y:isone()
- return x * z
-end
-
---- Perform integer power between two unsigned integers over a modulus considering bints.
--- @param x The base, an integer.
--- @param y The exponent, an integer.
--- @param m The modulus, an integer.
--- @return The result of the pow operation, a bint.
--- @raise Asserts in case inputs are not convertible to integers.
--- @see bint.__pow
--- @see bint.ipow
-function bint.upowmod(x, y, m)
- m = bint_assert_convert(m)
- if m:isone() then
- return bint_zero()
- end
- x, y = bint_new(x), bint_new(y)
- local z = bint_one()
- x = bint_umod(x, m)
- while not y:iszero() do
- if y:isodd() then
- z = bint_umod(z*x, m)
- end
- y:_shrone()
- x = bint_umod(x*x, m)
- end
- return z
-end
-
---- Perform numeric power between two numbers considering bints.
--- This always casts inputs to floats, for integer power only use @{bint.ipow}.
--- @param x The base, a bint or lua number.
--- @param y The exponent, a bint or lua number.
--- @return The result of the pow operation, a lua number.
--- @see bint.ipow
-function bint.__pow(x, y)
- return bint_tonumber(x) ^ bint_tonumber(y)
-end
-
---- Bitwise left shift integers considering bints.
--- @param x An integer to perform the bitwise shift.
--- @param y An integer with the number of bits to shift.
--- @return The result of shift operation, a bint.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint.__shl(x, y)
- x, y = bint_new(x), bint_assert_tointeger(y)
- if y == math_mininteger or math_abs(y) >= BINT_BITS then
- return bint_zero()
- end
- if y < 0 then
- return x >> -y
- end
- local nvals = y // BINT_WORDBITS
- if nvals ~= 0 then
- x:_shlwords(nvals)
- y = y - nvals * BINT_WORDBITS
- end
- if y ~= 0 then
- local wordbitsmy = BINT_WORDBITS - y
- for i=BINT_SIZE,2,-1 do
- x[i] = ((x[i] << y) | (x[i-1] >> wordbitsmy)) & BINT_WORDMAX
- end
- x[1] = (x[1] << y) & BINT_WORDMAX
- end
- return x
-end
-
---- Bitwise right shift integers considering bints.
--- @param x An integer to perform the bitwise shift.
--- @param y An integer with the number of bits to shift.
--- @return The result of shift operation, a bint.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint.__shr(x, y)
- x, y = bint_new(x), bint_assert_tointeger(y)
- if y == math_mininteger or math_abs(y) >= BINT_BITS then
- return bint_zero()
- end
- if y < 0 then
- return x << -y
- end
- local nvals = y // BINT_WORDBITS
- if nvals ~= 0 then
- x:_shrwords(nvals)
- y = y - nvals * BINT_WORDBITS
- end
- if y ~= 0 then
- local wordbitsmy = BINT_WORDBITS - y
- for i=1,BINT_SIZE-1 do
- x[i] = ((x[i] >> y) | (x[i+1] << wordbitsmy)) & BINT_WORDMAX
- end
- x[BINT_SIZE] = x[BINT_SIZE] >> y
- end
- return x
-end
-
---- Bitwise AND bints (in-place).
--- @param y An integer to perform bitwise AND.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint:_band(y)
- y = bint_assert_convert(y)
- for i=1,BINT_SIZE do
- self[i] = self[i] & y[i]
- end
- return self
-end
-
---- Bitwise AND two integers considering bints.
--- @param x An integer to perform bitwise AND.
--- @param y An integer to perform bitwise AND.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint.__band(x, y)
- return bint_new(x):_band(y)
-end
-
---- Bitwise OR bints (in-place).
--- @param y An integer to perform bitwise OR.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint:_bor(y)
- y = bint_assert_convert(y)
- for i=1,BINT_SIZE do
- self[i] = self[i] | y[i]
- end
- return self
-end
-
---- Bitwise OR two integers considering bints.
--- @param x An integer to perform bitwise OR.
--- @param y An integer to perform bitwise OR.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint.__bor(x, y)
- return bint_new(x):_bor(y)
-end
-
---- Bitwise XOR bints (in-place).
--- @param y An integer to perform bitwise XOR.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint:_bxor(y)
- y = bint_assert_convert(y)
- for i=1,BINT_SIZE do
- self[i] = self[i] ~ y[i]
- end
- return self
-end
-
---- Bitwise XOR two integers considering bints.
--- @param x An integer to perform bitwise XOR.
--- @param y An integer to perform bitwise XOR.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint.__bxor(x, y)
- return bint_new(x):_bxor(y)
-end
-
---- Bitwise NOT a bint (in-place).
-function bint:_bnot()
- for i=1,BINT_SIZE do
- self[i] = (~self[i]) & BINT_WORDMAX
- end
- return self
-end
-
---- Bitwise NOT a bint.
--- @param x An integer to perform bitwise NOT.
--- @raise Asserts in case inputs are not convertible to integers.
-function bint.__bnot(x)
- local y = setmetatable({}, bint)
- for i=1,BINT_SIZE do
- y[i] = (~x[i]) & BINT_WORDMAX
- end
- return y
-end
-
---- Negate a bint (in-place). This effectively applies two's complements.
-function bint:_unm()
- return self:_bnot():_inc()
-end
-
---- Negate a bint. This effectively applies two's complements.
--- @param x A bint to perform negation.
-function bint.__unm(x)
- return (~x):_inc()
-end
-
---- Compare if integer x is less than y considering bints (unsigned version).
--- @param x Left integer to compare.
--- @param y Right integer to compare.
--- @raise Asserts in case inputs are not convertible to integers.
--- @see bint.__lt
-function bint.ult(x, y)
- x, y = bint_assert_convert(x), bint_assert_convert(y)
- for i=BINT_SIZE,1,-1 do
- local a, b = x[i], y[i]
- if a ~= b then
- return a < b
- end
- end
- return false
-end
-
---- Compare if bint x is less or equal than y considering bints (unsigned version).
--- @param x Left integer to compare.
--- @param y Right integer to compare.
--- @raise Asserts in case inputs are not convertible to integers.
--- @see bint.__le
-function bint.ule(x, y)
- x, y = bint_assert_convert(x), bint_assert_convert(y)
- for i=BINT_SIZE,1,-1 do
- local a, b = x[i], y[i]
- if a ~= b then
- return a < b
- end
- end
- return true
-end
-
---- Compare if number x is less than y considering bints and signs.
--- @param x Left value to compare, a bint or lua number.
--- @param y Right value to compare, a bint or lua number.
--- @see bint.ult
-function bint.__lt(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local xneg = ix[BINT_SIZE] & BINT_WORDMSB ~= 0
- local yneg = iy[BINT_SIZE] & BINT_WORDMSB ~= 0
- if xneg == yneg then
- for i=BINT_SIZE,1,-1 do
- local a, b = ix[i], iy[i]
- if a ~= b then
- return a < b
- end
- end
- return false
- end
- return xneg and not yneg
- end
- return bint_tonumber(x) < bint_tonumber(y)
-end
-
---- Compare if number x is less or equal than y considering bints and signs.
--- @param x Left value to compare, a bint or lua number.
--- @param y Right value to compare, a bint or lua number.
--- @see bint.ule
-function bint.__le(x, y)
- local ix, iy = tobint(x), tobint(y)
- if ix and iy then
- local xneg = ix[BINT_SIZE] & BINT_WORDMSB ~= 0
- local yneg = iy[BINT_SIZE] & BINT_WORDMSB ~= 0
- if xneg == yneg then
- for i=BINT_SIZE,1,-1 do
- local a, b = ix[i], iy[i]
- if a ~= b then
- return a < b
- end
- end
- return true
- end
- return xneg and not yneg
- end
- return bint_tonumber(x) <= bint_tonumber(y)
-end
-
---- Convert a bint to a string on base 10.
--- @see bint.tobase
-function bint:__tostring()
- return self:tobase(10)
-end
-
--- Allow creating bints by calling bint itself
-setmetatable(bint, {
- __call = function(_, x)
- return bint_new(x)
- end
-})
-
-BINT_MATHMININTEGER, BINT_MATHMAXINTEGER = bint_new(math.mininteger), bint_new(math.maxinteger)
-BINT_MININTEGER = bint.mininteger()
-memo[memoindex] = bint
-
-return bint
-
-end
-
-return newmodule
-end
-_G.package.loaded[".bint"] = load_bint()
-print("loaded bint")
-
-
-
local function load_pretty()
local pretty = { _version = "0.0.1" }
@@ -3659,20 +1914,20 @@ end
function ao.init(env)
if ao.id == "" then ao.id = getId(env.process) end
- -- if ao._module == "" then
- -- ao._module = env.Module.Id
- -- end
- -- TODO: need to deal with assignables
- if #ao.authorities < 1 then
- if type(env.process.authority) == 'string' then
- ao.authorities = { env.process.authority }
- else
- ao.authorities = env.process.authority
- end
- end
+ -- if ao._module == "" then
+ -- ao._module = env.Module.Id
+ -- end
+ -- TODO: need to deal with assignables
+ if #ao.authorities < 1 then
+ if type(env.process.authority) == 'string' then
+ ao.authorities = { env.process.authority }
+ else
+ ao.authorities = env.process.authority
+ end
+ end
- ao.outbox = {Output = {}, Messages = {}, Spawns = {}, Assignments = {}}
- ao.env = env
+ ao.outbox = {Output = {}, Messages = {}, Spawns = {}, Assignments = {}}
+ ao.env = env
end
@@ -3999,30 +2254,6 @@ function Prompt()
return "aos> "
end
--- global print function
-function print(a)
- if type(a) == "table" then
- a = stringify.format(a)
- end
-
- if type(a) == "boolean" then
- a = Colors.blue .. tostring(a) .. Colors.reset
- end
- if type(a) == "nil" then
- a = Colors.red .. tostring(a) .. Colors.reset
- end
- if type(a) == "number" then
- a = Colors.green .. tostring(a) .. Colors.reset
- end
-
- if HandlerPrintLogs then
- table.insert(HandlerPrintLogs, a)
- return nil
- end
-
- return tostring(a)
-end
-
local maxInboxCount = 10000
function state.insertInbox(msg)
@@ -4047,7 +2278,7 @@ end
local function isFromOwner(m)
local _owner = getOwnerAddress(m)
- local _fromProcess = m['from-process'] or nil
+ local _fromProcess = m['from-process'] or _owner
return _owner ~= nil and _fromProcess == _owner
end
@@ -4080,12 +2311,36 @@ function state.init(req, base)
-- if env.Process.Name then
-- Name = Name == "aos" and env.Process.Name
-- end
+ -- global print function
+ function print(a)
+ if type(a) == "table" then
+ a = stringify.format(a)
+ end
+
+ if type(a) == "boolean" then
+ a = Colors.blue .. tostring(a) .. Colors.reset
+ end
+ if type(a) == "nil" then
+ a = Colors.red .. tostring(a) .. Colors.reset
+ end
+ if type(a) == "number" then
+ a = Colors.green .. tostring(a) .. Colors.reset
+ end
+
+ if HandlerPrintLogs then
+ table.insert(HandlerPrintLogs, a)
+ return nil
+ end
+
+ return tostring(a)
+ end
+
Initialized = true
end
end
-function state.getFrom(req)
- return req.body['from-process'] or getOwner(req.body)
+function state.getFrom(req)
+ return getOwner(req.body)
end
function state.isTrusted(req)
@@ -4222,7 +2477,6 @@ function process.handle(req, base)
local printData = table.concat(HandlerPrintLogs, "\n")
if not status then
if req.body.action == "Eval" then
- table.insert(Errors, error)
return {
Error = table.concat({
printData,
@@ -4235,9 +2489,11 @@ function process.handle(req, base)
end
print(Colors.red .. "Error" .. Colors.gray .. " handling message " .. Colors.reset)
print(Colors.green .. error .. Colors.reset)
- print("\n" .. Colors.gray .. debug.traceback() .. Colors.reset)
+ -- print("\n" .. Colors.gray .. debug.traceback() .. Colors.reset)
return ao.result({
- Error = printData .. '\n\n' .. Colors.red .. 'error:\n' .. Colors.reset .. error,
+ Output = {
+ data = printData .. '\n\n' .. Colors.red .. 'error:\n' .. Colors.reset .. error
+ },
Messages = {},
Spawns = {},
Assignments = {}
@@ -4286,9 +2542,7 @@ function compute(base, req, opts)
local _results = _process.handle(req, base)
base.results = {
outbox = {},
- output = _results.Output,
- authority = base.process.authority,
- from = req.body['from-process']
+ output = _results.Output
}
for i=1,#_results.Messages do
base.results.outbox[tostring(i)] = _results.Messages[i]
@@ -4296,3 +2550,10 @@ function compute(base, req, opts)
return base
end
+
+print [[ _ ___ ____
+ / \ / _ \/ ___|
+ / _ \| | | \___ \
+ / ___ \ |_| |___) |
+ /_/ \_\___/|____/
+ ]]
diff --git a/test/large-message.eterm b/test/large-message.eterm
new file mode 100644
index 000000000..557ad0857
--- /dev/null
+++ b/test/large-message.eterm
@@ -0,0 +1,218 @@
+#{
+ <<"address">> =>
+ <<"XgN1kN-ZyAWtYvdUlPEM3EIIi-budUx81mjcHQ1mSNU">>,
+ <<"append">> =>
+ <<"aaf13c9ed2e821ea8c82fcc7981c73a14dc2d01c855f09262d42090fa0424422">>,
+ <<"commitments">> =>
+ #{<<"88VMQLqJxIwAT5PThjsKMTSVU56jEExem5hwQPbxcqc">> =>
+ #{<<"alg">> => <<"hmac-sha256">>,
+ <<"commitment-device">> => <<"httpsig@1.0">>,
+ <<"signature">> =>
+ <<"http-sig-037590df99c805ad=:N6AyqYlF04sxNcv+PZedgHW8TrL+9XqsQxQyDWDWywTDF6qeMR5IdokYGwqHRF1O5ygMvD7kqrlTCSJZSfwBiraOPaR3ewASGEn9v0D/ytRApN2rVMVrSVdWXM48L/YAwwYQj1AJ1B6l11GwB7tJQdnK5CjFWZg95KKsOf4cP8pUsNIeJMoOF8y4mcUVu9ncxz5xmCzNVAoLlCZC7LhOpAoQmejPWYlOURdao5bCLAHNgp0MwVVAIMbC7e0dryzIwkPKJ6b9dpdQQ8M3WKLw95BB4dBXCe4I6S8iUL6VhL9sSy9B0vZTax30ndgvh0f/QkObUwp3K41j5TEYhxLXMMgs/aEWzNawZzfeCLICuDd6XxQG0F2QS3UMOgtnZKoWfX2YBq7CS8lkaVrANkh5g2MFMlgJMEnyVFegsREUg8qqO0I6oLbECJsOfWUAe12pv3scMhF9ofi8TJpHuRUKdxhzOA9IWzLS6GO+wkNEQ3IAKCGdZUUci+ntoY5Hm1/R7lSoiEQSOWl41Mx6ZVSWNhZRbdqDOXhYTpTP6NU8Ql2st0vF0uefC5Zljd9cs9wk0D44XTSIZxLQao60j1CX0lLxW03Dh3eHD490/7T7/HVhayJF/Vx5u7evJCdReTNCxhyDCSnV5RKx6++6zX0B/VdxoTWyMO2XA5qG9v5Bs7o=:">>,
+ <<"signature-input">> =>
+ <<"http-sig-037590df99c805ad=(\"address\" \"ao-types\" \"append\" \"content-digest\" \"content-type\" \"firmware\" \"guest_features\" \"initrd\" \"kernel\" \"nonce\" \"public-key\" \"report\" \"vcpu_type\" \"vcpus\" \"vmm_type\");alg=\"rsa-pss-sha512\";keyid=\"o987RvpWhYgDA9rte9WJmVnws2z6YX_khhBrQOBCOyxQG5lj3i7aJIXfEwV-X3GPWRSMQ_7inIi4Fw7ocUDIXp4EWhiCSU4JTywxzbwcgRzH42bTk8AsEm7uQrIWqCYbinQ92Qsn3qnW4jhcpa1wPASRlv3vYAQIu6eXKuloXpIjx8rFOLTzTje9NFVWmjypP9oShtjI8Y7VdEjjl5ILMqu5ZfwlIsssbx9J6jDvN9nMCj8-j1YyKh-Uq3aJKeYxeytSSYvGU4mHrG3j2HYooct9LXOwTh89sXsc7gCvSafpI8j7ZFGRrP28FhRhCjzYhiCT88efsWxAjkv1C1JYf5jJLtLFfEl-Y39jpGPXnIaofeMJ9X2X_Xfx86KOKSqoOU-DmFKfPdt4JjqaafI1_Asvxp60enS1BdvgfLr34K8gq6Yyos47FrzhqCW_BWOF77vgp-w5PV-tcs2uO1oKpUghy1eaTeeNdKHjlQj9huOjv7kOMBGfOWtZDW1sO9ChYB3DDTT3AAwo6bVjj0iiiwWGbzWecwT9OPLetRCymVPEFvTQWXK6-1Qu3WNoYqFtC_pFqHo-owxaHS37ySTfB1A04C5a6m3TEP9hLWde52C5frMdfchYuy5pqoNqMb3sznGRQhOwmcWxb3unq9tytAYNmEpQhzZ-sHapTs1Y080\"">>},
+ <<"oRjdfP_V-aFCZgdDGmHhgijR4Y95nxbHJ_FwUgVYXQs">> =>
+ #{<<"alg">> => <<"rsa-pss-sha512">>,
+ <<"commitment-device">> => <<"httpsig@1.0">>,
+ <<"committer">> =>
+ <<"XgN1kN-ZyAWtYvdUlPEM3EIIi-budUx81mjcHQ1mSNU">>,
+ <<"signature">> =>
+ <<"http-sig-037590df99c805ad=:N6AyqYlF04sxNcv+PZedgHW8TrL+9XqsQxQyDWDWywTDF6qeMR5IdokYGwqHRF1O5ygMvD7kqrlTCSJZSfwBiraOPaR3ewASGEn9v0D/ytRApN2rVMVrSVdWXM48L/YAwwYQj1AJ1B6l11GwB7tJQdnK5CjFWZg95KKsOf4cP8pUsNIeJMoOF8y4mcUVu9ncxz5xmCzNVAoLlCZC7LhOpAoQmejPWYlOURdao5bCLAHNgp0MwVVAIMbC7e0dryzIwkPKJ6b9dpdQQ8M3WKLw95BB4dBXCe4I6S8iUL6VhL9sSy9B0vZTax30ndgvh0f/QkObUwp3K41j5TEYhxLXMMgs/aEWzNawZzfeCLICuDd6XxQG0F2QS3UMOgtnZKoWfX2YBq7CS8lkaVrANkh5g2MFMlgJMEnyVFegsREUg8qqO0I6oLbECJsOfWUAe12pv3scMhF9ofi8TJpHuRUKdxhzOA9IWzLS6GO+wkNEQ3IAKCGdZUUci+ntoY5Hm1/R7lSoiEQSOWl41Mx6ZVSWNhZRbdqDOXhYTpTP6NU8Ql2st0vF0uefC5Zljd9cs9wk0D44XTSIZxLQao60j1CX0lLxW03Dh3eHD490/7T7/HVhayJF/Vx5u7evJCdReTNCxhyDCSnV5RKx6++6zX0B/VdxoTWyMO2XA5qG9v5Bs7o=:">>,
+ <<"signature-input">> =>
+ <<"http-sig-037590df99c805ad=(\"address\" \"ao-types\" \"append\" \"content-digest\" \"content-type\" \"firmware\" \"guest_features\" \"initrd\" \"kernel\" \"nonce\" \"public-key\" \"report\" \"vcpu_type\" \"vcpus\" \"vmm_type\");alg=\"rsa-pss-sha512\";keyid=\"o987RvpWhYgDA9rte9WJmVnws2z6YX_khhBrQOBCOyxQG5lj3i7aJIXfEwV-X3GPWRSMQ_7inIi4Fw7ocUDIXp4EWhiCSU4JTywxzbwcgRzH42bTk8AsEm7uQrIWqCYbinQ92Qsn3qnW4jhcpa1wPASRlv3vYAQIu6eXKuloXpIjx8rFOLTzTje9NFVWmjypP9oShtjI8Y7VdEjjl5ILMqu5ZfwlIsssbx9J6jDvN9nMCj8-j1YyKh-Uq3aJKeYxeytSSYvGU4mHrG3j2HYooct9LXOwTh89sXsc7gCvSafpI8j7ZFGRrP28FhRhCjzYhiCT88efsWxAjkv1C1JYf5jJLtLFfEl-Y39jpGPXnIaofeMJ9X2X_Xfx86KOKSqoOU-DmFKfPdt4JjqaafI1_Asvxp60enS1BdvgfLr34K8gq6Yyos47FrzhqCW_BWOF77vgp-w5PV-tcs2uO1oKpUghy1eaTeeNdKHjlQj9huOjv7kOMBGfOWtZDW1sO9ChYB3DDTT3AAwo6bVjj0iiiwWGbzWecwT9OPLetRCymVPEFvTQWXK6-1Qu3WNoYqFtC_pFqHo-owxaHS37ySTfB1A04C5a6m3TEP9hLWde52C5frMdfchYuy5pqoNqMb3sznGRQhOwmcWxb3unq9tytAYNmEpQhzZ-sHapTs1Y080\"">>}},
+ <<"firmware">> =>
+ <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
+ <<"guest_features">> => 1,
+ <<"initrd">> =>
+ <<"da6dffff50373e1d393bf92cb9b552198b1930068176a046dda4e23bb725b3bb">>,
+ <<"kernel">> =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ <<"node-message">> =>
+ #{<<"debug_metadata">> => true,
+ <<"gateway">> => <<"https://arweave.net">>,
+ <<"process_now_from_cache">> => false,
+ <<"debug_print_map_line_threshold">> => 30,
+ <<"http_client">> => gun,<<"http_keepalive">> => 120000,
+ <<"short_trace_len">> => 5,
+ <<"debug_print_trace">> => short,
+ <<"date">> => <<"Fri, 18 Apr 2025 19:01:31 GMT">>,
+ <<"scheduler_location_ttl">> => 604800000,
+ <<"access_control_allow_origin">> => <<"*">>,
+ <<"compute_mode">> => lazy,<<"preload_devices">> => [],
+ <<"hb_config_location">> => <<"config.flat">>,
+ <<"process_workers">> => false,
+ <<"access_control_allow_methods">> =>
+ <<"GET, POST, PUT, DELETE, OPTIONS">>,
+ <<"initialized">> => permanent,<<"only">> => local,
+ <<"trusted">> =>
+ #{<<"append">> =>
+ <<"aaf13c9ed2e821ea8c82fcc7981c73a14dc2d01c855f09262d42090fa0424422">>,
+ <<"firmware">> =>
+ <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
+ <<"guest_features">> => 1,
+ <<"initrd">> =>
+ <<"da6dffff50373e1d393bf92cb9b552198b1930068176a046dda4e23bb725b3bb">>,
+ <<"kernel">> =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ <<"vcpu_type">> => 5,<<"vcpus">> => 1,
+ <<"vmm_type">> => 1},
+ <<"snp_hashes">> =>
+ #{<<"append">> =>
+ <<"aaf13c9ed2e821ea8c82fcc7981c73a14dc2d01c855f09262d42090fa0424422">>,
+ <<"firmware">> =>
+ <<"b8c5d4082d5738db6b0fb0294174992738645df70c44cdecf7fad3a62244b788e7e408c582ee48a74b289f3acec78510">>,
+ <<"guest_features">> => 1,
+ <<"initrd">> =>
+ <<"da6dffff50373e1d393bf92cb9b552198b1930068176a046dda4e23bb725b3bb">>,
+ <<"kernel">> =>
+ <<"69d0cd7d13858e4fcef6bc7797aebd258730f215bc5642c4ad8e4b893cc67576">>,
+ <<"vcpu_type">> => 5,<<"vcpus">> => 1,
+ <<"vmm_type">> => 1},
+ <<"cache_lookup_hueristics">> => false,
+ <<"address">> =>
+ <<"XgN1kN-ZyAWtYvdUlPEM3EIIi-budUx81mjcHQ1mSNU">>,
+ <<"store">> =>
+ [#{<<"prefix">> => <<"cache-mainnet">>,
+ <<"store-module">> => hb_store_fs},
+ #{<<"store">> =>
+ [#{<<"prefix">> => <<"cache-mainnet">>,
+ <<"store-module">> => hb_store_fs}],
+ <<"store-module">> => hb_store_gateway}],
+ <<"postprocessor">> => undefined,
+ <<"debug_show_priv">> => false,
+ <<"routes">> =>
+ [#{<<"node">> =>
+ #{<<"prefix">> => <<"http://localhost:6363">>},
+ <<"template">> => <<"/result/.*">>},
+ #{<<"nodes">> =>
+ [#{<<"opts">> =>
+ #{<<"http_client">> => httpc,
+ <<"protocol">> => http2},
+ <<"prefix">> =>
+ <<"https://arweave-search.goldsky.com">>},
+ #{<<"opts">> =>
+ #{<<"http_client">> => gun,
+ <<"protocol">> => http2},
+ <<"prefix">> => <<"https://arweave.net">>}],
+ <<"template">> => <<"/graphql">>},
+ #{<<"node">> =>
+ #{<<"opts">> =>
+ #{<<"http_client">> => gun,
+ <<"protocol">> => http2},
+ <<"prefix">> => <<"https://arweave.net">>},
+ <<"template">> => <<"/raw">>}],
+ <<"debug_print">> => false,
+ <<"bundler_ans104">> => <<"https://up.arweave.net:443">>,
+ <<"client_error_strategy">> => throw,
+ <<"http_request_send_timeout">> => 60000,
+ <<"http_extra_opts">> =>
+ #{<<"cache_control">> => [<<"always">>],
+ <<"force_message">> => true},
+ <<"force_signed">> => true,
+ <<"server">> => <<"nginx/1.18.0 (Ubuntu)">>,
+ <<"access_remote_cache_for_client">> => false,
+ <<"await_inprogress">> => named,
+ <<"content_type">> =>
+ <<"multipart/form-data; boundary=\"5zd5yKhUfgSnCzBVlt29-Vs4rQQeUBGuVqNJubt07jI\"">>,
+ <<"http_connect_timeout">> => 5000,<<"status">> => 200,
+ <<"body_keys">> =>
+ <<"\"routes\", \"routes\", \"routes\", \"routes\", \"routes\", \"routes\", \"routes\", \"routes\", \"routes\", \"routes\"">>,
+ <<"stack_print_prefixes">> => ["hb","dev","ar"],
+ <<"scheduling_mode">> => disabled,
+ <<"http_server">> =>
+ <<"XgN1kN-ZyAWtYvdUlPEM3EIIi-budUx81mjcHQ1mSNU">>,
+ <<"debug_print_binary_max">> => 60,
+ <<"debug_ids">> => false,<<"relay_http_client">> => httpc,
+ <<"debug_stack_depth">> => 40,
+ <<"preloaded_devices">> =>
+ [#{<<"module">> => dev_codec_ans104,
+ <<"name">> => <<"ans104@1.0">>},
+ #{<<"module">> => dev_cu,
+ <<"name">> => <<"compute@1.0">>},
+ #{<<"module">> => dev_cache,
+ <<"name">> => <<"cache@1.0">>},
+ #{<<"module">> => dev_cacheviz,
+ <<"name">> => <<"cacheviz@1.0">>},
+ #{<<"module">> => dev_cron,
+ <<"name">> => <<"cron@1.0">>},
+ #{<<"module">> => dev_dedup,
+ <<"name">> => <<"dedup@1.0">>},
+ #{<<"module">> => dev_delegated_compute,
+ <<"name">> => <<"delegated-compute@1.0">>},
+ #{<<"module">> => dev_faff,
+ <<"name">> => <<"faff@1.0">>},
+ #{<<"module">> => dev_codec_flat,
+ <<"name">> => <<"flat@1.0">>},
+ #{<<"module">> => dev_genesis_wasm,
+ <<"name">> => <<"genesis-wasm@1.0">>},
+ #{<<"module">> => dev_green_zone,
+ <<"name">> => <<"greenzone@1.0">>},
+ #{<<"module">> => dev_codec_httpsig,
+ <<"name">> => <<"httpsig@1.0">>},
+ #{<<"module">> => dev_hyperbuddy,
+ <<"name">> => <<"hyperbuddy@1.0">>},
+ #{<<"module">> => dev_codec_json,
+ <<"name">> => <<"json@1.0">>},
+ #{<<"module">> => dev_json_iface,
+ <<"name">> => <<"json-iface@1.0">>},
+ #{<<"module">> => dev_lookup,
+ <<"name">> => <<"lookup@1.0">>},
+ #{<<"module">> => dev_lua,
+ <<"name">> => <<"lua@5.3a">>},
+ #{<<"module">> => dev_message,
+ <<"name">> => <<"message@1.0">>},
+ #{<<"module">> => dev_meta,
+ <<"name">> => <<"meta@1.0">>},
+ #{<<"module">> => dev_monitor,
+ <<"name">> => <<"monitor@1.0">>},
+ #{<<"module">> => dev_multipass,
+ <<"name">> => <<"multipass@1.0">>},
+ #{<<"module">> => dev_p4,<<"name">> => <<"p4@1.0">>},
+ #{<<"module">> => dev_patch,
+ <<"name">> => <<"patch@1.0">>},
+ #{<<"module">> => dev_poda,
+ <<"name">> => <<"poda@1.0">>},
+ #{<<"module">> => dev_process,
+ <<"name">> => <<"process@1.0">>},
+ #{<<"module">> => dev_push,
+ <<"name">> => <<"push@1.0">>},
+ #{<<"module">> => dev_relay,
+ <<"name">> => <<"relay@1.0">>},
+ #{<<"module">> => dev_router,
+ <<"name">> => <<"router@1.0">>},
+ #{<<"module">> => dev_scheduler,
+ <<"name">> => <<"scheduler@1.0">>},
+ #{<<"module">> => dev_simple_pay,
+ <<"name">> => <<"simple-pay@1.0">>},
+ #{<<"module">> => dev_snp,
+ <<"name">> => <<"snp@1.0">>},
+ #{<<"module">> => dev_stack,
+ <<"name">> => <<"stack@1.0">>},
+ #{<<"module">> => dev_codec_structured,
+ <<"name">> => <<"structured@1.0">>},
+ #{<<"module">> => dev_test,
+ <<"name">> => <<"test-device@1.0">>},
+ #{<<"module">> => dev_wasi,
+ <<"name">> => <<"wasi@1.0">>},
+ #{<<"module">> => dev_wasm,
+ <<"name">> => <<"wasm-64@1.0">>}],
+ <<"cache_writers">> =>
+ [<<"XgN1kN-ZyAWtYvdUlPEM3EIIi-budUx81mjcHQ1mSNU">>],
+ <<"wasm_allow_aot">> => false,
+ <<"store_all_signed">> => true,
+ <<"ans104_trust_gql">> => true,
+ <<"commitment_device">> => <<"httpsig@1.0">>,
+ <<"preprocessor">> => undefined,
+ <<"debug_committers">> => false,<<"port">> => 10000,
+ <<"load_remote_devices">> => false,
+ <<"node_history">> => [],<<"mode">> => debug,
+ <<"trusted_device_signers">> => [],
+ <<"debug_print_indent">> => 2,
+ <<"host">> => <<"localhost">>},
+ <<"nonce">> =>
+ <<"XgN1kN-ZyAWtYvdUlPEM3EIIi-budUx81mjcHQ1mSNU4bfGIgeSRG0fgGtFEdPTXAcI6szcsMPvbZmwMWraKxA">>,
+ <<"public-key">> =>
+ <<"g2gCaAJ3A3JzYWIAAQABbQAAAgCj3ztG+laFiAMD2u171YmZWfCzbPphf+SGEGtA4EI7LFAbmWPeLtokhd8TBX5fcY9ZFIxD/uKciLgXDuhxQMhengRaGIJJTglPLDHNvByBHMfjZtOTwCwSbu5CshaoJhuKdD3ZCyfeqdbiOFylrXA8BJGW/e9gBAi7p5cq6WhekiPHysU4tPNON700VVaaPKk/2hKG2MjxjtV0SOOXkgsyq7ll/CUiyyxvH0nqMO832cwKPz6PVjIqH5Srdokp5jF7K1JJi8ZTiYesbePYdiihy30tc7BOHz2xexzuAK9Jp+kjyPtkUZGs/bwWFGEKPNiGIJPzx5+xbECOS/ULUlh/mMku0sV8SX5jf2OkY9echqh94wn1fZf9d/Hzoo4pKqg5T4OYUp8923gmOppp8jX8Cy/GnrR6dLUF2+B8uvfgryCrpjKizjsWvOGoJb8FY4Xvu+Cn7Dk9X61yza47WgqlSCHLV5pN5410oeOVCP2G46O/uQ4wEZ85a1kNbWw70KFgHcMNNPcADCjptWOPSKKLBYZvNZ5zBP048t61ELKZU8QW9NBZcrr7VC7dY2hioW0L+kWoej6jDFodLfvJJN8HUDTgLlrqbdMQ/2EtZ17nYLl+sx19yFi7Lmmqg2oxvezOcZFCE7CZxbFve6er23K0Bg2YSlCHNn6wdqlOzVjTzQ==">>,
+ <<"report">> =>
+ <<"{\"version\":2,\"guest_svn\":0,\"policy\":196608,\"family_id\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"image_id\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"vmpl\":1,\"sig_algo\":1,\"current_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"plat_info\":3,\"_author_key_en\":0,\"_reserved_0\":0,\"report_data\":[94,3,117,144,223,153,200,5,173,98,247,84,148,241,12,220,66,8,139,230,238,117,76,124,214,104,220,29,13,102,72,213,56,109,241,136,129,228,145,27,71,224,26,209,68,116,244,215,1,194,58,179,55,44,48,251,219,102,108,12,90,182,138,196],\"measurement\":[57,145,156,83,216,77,75,73,242,17,131,95,71,138,47,87,91,44,72,35,26,227,241,105,23,212,36,168,251,224,172,31,33,175,179,197,217,106,227,52,65,65,10,56,83,147,108,11],\"host_data\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"id_key_digest\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"author_key_digest\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"report_id\":[76,118,205,58,46,223,75,32,141,9,113,137,69,229,230,102,49,239,64,145,130,106,21,147,154,141,222,210,40,40,121,150],\"report_id_ma\":[255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255],\"reported_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"_reserved_1\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"chip_id\":[6,154,118,21,12,115,53,47,251,19,195,100,155,12,239,102,116,131,103,189,106,202,153,3,114,175,16,182,24,166,229,214,231,126,164,15,129,52,233,142,196,43,43,8,89,238,118,246,21,144,209,16,165,197,134,105,214,250,155,148,50,78,87,203],\"committed_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"current_build\":20,\"current_minor\":55,\"current_major\":1,\"_reserved_2\":0,\"committed_build\":20,\"committed_minor\":55,\"committed_major\":1,\"_reserved_3\":0,\"launch_tcb\":{\"bootloader\":4,\"tee\":0,\"_reserved\":[0,0,0,0],\"snp\":22,\"microcode\":213},\"_reserved_4\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"signature\":{\"r\":[208,7,136,60,224,138,74,102,17,217,164,154,114,117,150,30,151,247,93,90,52,122,13,58,58,169,124,13,168,74,187,144,221,165,218,93,104,175,212,214,248,23,142,13,132,232,208,5,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"s\":[67,238,106,198,241,37,137,212,124,29,212,20,95,170,10,247,160,213,57,13,223,123,32,246,242,28,76,121,72,170,113,199,14,128,130,162,198,59,138,105,227,27,136,207,243,245,201,196,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\"_reserved\":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}}">>,
+ <<"vcpu_type">> => 5,<<"vcpus">> => 1,<<"vmm_type">> => 1}.
\ No newline at end of file
diff --git a/test/test.lua b/test/test.lua
index 23addadb1..8614a3341 100644
--- a/test/test.lua
+++ b/test/test.lua
@@ -13,6 +13,10 @@ function assoctable()
}
end
+function error_response()
+ return "error", "Very bad, but Lua caught it."
+end
+
--- @function ListTable
--- @treturn table
--- @return a table with three elements. In Erlang, this will be
@@ -81,8 +85,8 @@ end
--- @tparam table base
--- @tparam table request
--- @return table an answer to every HTTP request with the words "i like turtles"
-function preprocess(base, req, opts)
- return { { body = "i like turtles" } }
+function request(base, req, opts)
+ return "ok", { body = { { body = "i like turtles" } } }
end
--- @function sandboxed_fail